Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
12,100
|
<ASSISTANT_TASK:>
Python Code:
# Importa la librería financiera.
# Solo es necesario ejecutar la importación una sola vez.
import cashflows as cf
x = cf.cashflow(const_value=[1000]*10, start='2016')
x
cf.const2curr(cflo=x,
inflation=cf.interest_rate(const_value=[5]*10, start='2016'))
cf.const2curr(cflo=x,
inflation=cf.interest_rate(const_value=[5]*10, start='2016'),
base_date=9)
## flujo de caja
x = cf.cashflow(const_value=[1000]*10, start='2016')
cf.textplot(x)
## inflación
inflation = cf.interest_rate(const_value=[5]*10, start='2016', chgpts={5:10})
cf.textplot(inflation)
cf.const2curr(cflo=x, inflation=inflation)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Inflación
Step2: Ejemplo.-- Considere el mismo ejemplo anterior. Conviertalo a dinero del período 10.
Step3: Ejemplo.-- Considere un flujo de caja constante de $ 1000. Conviértalo a dinero del período 0 usando una inflación inicial del 5% por período. La inflación cambia al 10% a partir del período 5.
|
12,101
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import time
from tensorflow.python.client import timeline
import matplotlib.pyplot as plt
%matplotlib inline
FLAGS = tf.app.flags.FLAGS
# number of device count
tf.app.flags.DEFINE_integer('num_cpu_core', 1, 'Number of CPU cores to use')
tf.app.flags.DEFINE_integer('intra_op_parallelism_threads', 1, 'How many ops can be launched in parallel')
tf.app.flags.DEFINE_integer('num_gpu_core', 0, 'Number of GPU cores to use')
device_id = -1 # Global Variable Counter for device_id used
def next_device(use_cpu = True):
''' See if there is available next device;
Args: use_cpu, global device_id
Return: new device id
'''
global device_id
if (use_cpu):
if ((device_id + 1) < FLAGS.num_cpu_core):
device_id += 1
device = '/cpu:%d' % device_id
else:
if ((device_id + 1) < FLAGS.num_gpu_core):
device_id += 1
device = '/gpu:%d' % device_id
return device
def mnist_loader():
Load MNIST data in tensorflow readable format
The script comes from:
https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/mnist/input_data.py
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
mnist = read_data_sets('MNIST_data', one_hot=True)
n_samples = mnist.train.num_examples
return (mnist, n_samples)
(mnist, n_samples) = mnist_loader()
print('Number of available data: %d' % n_samples)
x_sample = mnist.test.next_batch(100)[0]
plt.figure(figsize=(8, 4))
for i in range(6):
plt.subplot(2, 3, i + 1)
plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("MNIST Data")
plt.colorbar()
plt.tight_layout()
def xavier_init(neuron_in, neuron_out, constant=1):
low = -constant*np.sqrt(6/(neuron_in + neuron_out))
high = constant*np.sqrt(6/(neuron_in + neuron_out))
return tf.random_uniform((neuron_in, neuron_out), minval=low, maxval=high, dtype=tf.float32)
sess = tf.Session()
weights = []
for i in range(1000):
weights.append(sess.run(xavier_init(3,3)))
weights = np.array(weights).reshape((-1,1))
n, bins, patches = plt.hist(weights, bins=20)
plt.xlabel('weight value')
plt.ylabel('counts')
plt.title('Histogram of Weights Initialized by Xavier')
plt.show()
def init_weights(config):
Initialize weights with specified configuration using Xavier algorithm
encoder_weights = dict()
decoder_weights = dict()
# two layers encoder
encoder_weights['h1'] = tf.Variable(xavier_init(config['x_in'], config['encoder_1']))
encoder_weights['h2'] = tf.Variable(xavier_init(config['encoder_1'], config['encoder_2']))
encoder_weights['mu'] = tf.Variable(xavier_init(config['encoder_2'], config['z']))
encoder_weights['sigma'] = tf.Variable(xavier_init(config['encoder_2'], config['z']))
encoder_weights['b1'] = tf.Variable(tf.zeros([config['encoder_1']], dtype=tf.float32))
encoder_weights['b2'] = tf.Variable(tf.zeros([config['encoder_2']], dtype=tf.float32))
encoder_weights['bias_mu'] = tf.Variable(tf.zeros([config['z']], dtype=tf.float32))
encoder_weights['bias_sigma'] = tf.Variable(tf.zeros([config['z']], dtype=tf.float32))
# two layers decoder
decoder_weights['h1'] = tf.Variable(xavier_init(config['z'], config['decoder_1']))
decoder_weights['h2'] = tf.Variable(xavier_init(config['decoder_1'], config['decoder_2']))
decoder_weights['mu'] = tf.Variable(xavier_init(config['decoder_2'], config['x_in']))
decoder_weights['sigma'] = tf.Variable(xavier_init(config['decoder_2'], config['x_in']))
decoder_weights['b1'] = tf.Variable(tf.zeros([config['decoder_1']], dtype=tf.float32))
decoder_weights['b2'] = tf.Variable(tf.zeros([config['decoder_2']], dtype=tf.float32))
decoder_weights['bias_mu'] = tf.Variable(tf.zeros([config['x_in']], dtype=tf.float32))
decoder_weights['bias_sigma'] = tf.Variable(tf.zeros([config['x_in']], dtype=tf.float32))
return (encoder_weights, decoder_weights)
def forward_z(x, encoder_weights):
Compute mean and sigma of z
with tf.device(next_device()):
layer_1 = tf.nn.softplus(tf.add(tf.matmul(x, encoder_weights['h1']), encoder_weights['b1']))
with tf.device(next_device()):
layer_2 = tf.nn.softplus(tf.add(tf.matmul(layer_1, encoder_weights['h2']), encoder_weights['b2']))
z_mean = tf.add(tf.matmul(layer_2, encoder_weights['mu']), encoder_weights['bias_mu'])
z_sigma = tf.add(tf.matmul(layer_2, encoder_weights['sigma']), encoder_weights['bias_sigma'])
return(z_mean, z_sigma)
def reconstruct_x(z, decoder_weights):
Use z to reconstruct x
with tf.device(next_device()):
layer_1 = tf.nn.softplus(tf.add(tf.matmul(z, decoder_weights['h1']), decoder_weights['b1']))
with tf.device(next_device()):
layer_2 = tf.nn.softplus(tf.add(tf.matmul(layer_1, decoder_weights['h2']), decoder_weights['b2']))
x_prime = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, decoder_weights['mu']), decoder_weights['bias_mu']))
return x_prime
def optimize_func(z, z_mean, z_sigma, x, x_prime, learn_rate):
Define cost and optimize function
# define loss function
# reconstruction lost
recons_loss = -tf.reduce_sum(x * tf.log(1e-10 + x_prime) + (1-x) * tf.log(1e-10 + 1 - x_prime), 1)
# KL distance
latent_loss = -0.5 * tf.reduce_sum(1 + z_sigma - tf.square(z_mean) - tf.exp(z), 1)
# summing two loss terms together
cost = tf.reduce_mean(recons_loss + latent_loss)
# use ADAM to optimize
optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(cost)
return (cost, optimizer)
def vae_init(batch_size=100, learn_rate=0.001, config={}):
This function build a varational autoencoder based on https://jmetzen.github.io/2015-11-27/vae.html
In consideration of simplicity and future work on optimization, we removed the class structure
A tensorflow session, optimizer and cost function as well as input data will be returned
# default configuration of network
# x_in = 784
# encoder_1 = 500
# encoder_2 = 500
# decoder_1 = 500
# decoder_2 = 500
# z = 20
# use default setting if no configuration is specified
if not config:
config['x_in'] = 784
config['encoder_1'] = 500
config['encoder_2'] = 500
config['decoder_1'] = 500
config['decoder_2'] = 500
config['z'] = 20
# input
x = tf.placeholder(tf.float32, [None, config['x_in']])
# initialize weights
(encoder_weights, decoder_weights) = init_weights(config)
# compute mean and sigma of z
(z_mean, z_sigma) = forward_z(x, encoder_weights)
# compute z by drawing sample from normal distribution
eps = tf.random_normal((batch_size, config['z']), 0, 1, dtype=tf.float32)
z_val = tf.add(z_mean, tf.multiply(tf.sqrt(tf.exp(z_sigma)), eps))
# use z to reconstruct the network
x_prime = reconstruct_x(z_val, decoder_weights)
# define loss function
(cost, optimizer) = optimize_func(z_val, z_mean, z_sigma, x, x_prime, learn_rate)
# initialize all variables
init = tf.global_variables_initializer()
#
config_ = tf.ConfigProto(device_count={"CPU": FLAGS.num_cpu_core}, # limit to num_cpu_core CPU usage
inter_op_parallelism_threads = 1,
intra_op_parallelism_threads = FLAGS.intra_op_parallelism_threads,
log_device_placement=True)
# define and return the session
sess = tf.Session(config=config_)
# define and return the session
# sess = tf.InteractiveSession()
sess.run(init)
return (sess, optimizer, cost, x, x_prime)
def vae_train(sess, optimizer, cost, x, n_samples, batch_size=100, learn_rate=0.001, train_epoch=10, verb=1, verb_step=5):
start_time = time.time()
for epoch in range(train_epoch):
avg_cost = 0
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_x, _ = mnist.train.next_batch(batch_size)
_, c = sess.run((optimizer, cost), feed_dict={x: batch_x})
avg_cost += c / n_samples * batch_size
elapsed_time = (time.time() - start_time)* 1000 / verb_step
start_time = time.time()
if verb:
if epoch % verb_step == 0:
# print('Epoch:%04d\tCost=%.2f' % (epoch+1, avg_cost))
print('Epoch:%04d' % (epoch+1), 'cost=', '{:.9f}'.format(avg_cost),'Elapsed time: ','%.9f' % elapsed_time)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timeline.json', 'w') as f:
f.write(ctf)
(sess, optimizer, cost, x, x_prime) = vae_init()
vae_train(sess, optimizer, cost, x, n_samples, train_epoch=75)
x_sample = mnist.test.next_batch(100)[0]
x_reconstruct = sess.run(x_prime, feed_dict={x: x_sample})
plt.figure(figsize=(8, 12))
for i in range(5):
plt.subplot(5, 2, 2*i + 1)
plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("Test input")
plt.colorbar()
plt.subplot(5, 2, 2*i + 2)
plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.title("Reconstruction")
plt.colorbar()
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: mnist_loader
Step3: Test mnist data
Step4: We are generating synthetic data in this project, so all the 55000 samples can be used for training
Step5: xavier_init
Step6: Test xavier_init
Step12: vae_init
Step13: vae_train
Step14: When using only CPUs, the elapsed time is around 2ms. When using combination of CPUs and GPU, the elapsed time is reduced to around 0.9ms.
Step15: Profile
|
12,102
|
<ASSISTANT_TASK:>
Python Code:
from dx import *
me = market_environment(name='me', pricing_date=dt.datetime(2015, 1, 1))
me.add_constant('initial_value', 0.01)
me.add_constant('volatility', 0.1)
me.add_constant('kappa', 2.0)
me.add_constant('theta', 0.05)
me.add_constant('paths', 1000)
me.add_constant('frequency', 'M')
me.add_constant('starting_date', me.pricing_date)
me.add_constant('final_date', dt.datetime(2015, 12, 31))
me.add_curve('discount_curve', 0.0) # dummy
me.add_constant('currency', 0.0) # dummy
ssr = stochastic_short_rate('sr', me)
time_list = [dt.datetime(2015, 1, 1),
dt.datetime(2015, 4, 1),
dt.datetime(2015, 6, 15),
dt.datetime(2015, 10, 21)]
ssr.get_forward_rates(time_list, 10)
ssr.get_discount_factors(time_list, 10)
me.add_constant('initial_value', 36.)
me.add_constant('volatility', 0.2)
# time horizon for the simulation
me.add_constant('currency', 'EUR')
me.add_constant('frequency', 'M')
# monthly frequency; paramter accorind to pandas convention
me.add_constant('paths', 10)
# number of paths for simulation
me.add_curve('discount_curve', ssr)
gbm = geometric_brownian_motion('gbm', me)
gbm.get_instrument_values()
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
%matplotlib inline
# short rate paths
plt.figure(figsize=(10, 6))
plt.plot(ssr.process.instrument_values[:, :10]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Second, the instantiation of the class.
Step2: The following is an example list object containing datetime objects.
Step3: The call of the method get_forward_reates() yields the above time_list object and the simulated forward rates. In this case, 10 simulations.
Step4: Accordingly, the call of the get_discount_factors() method yields simulated zero-coupon bond prices for the time grid.
Step5: Stochstic Drifts
Step6: Then add the stochastic_short_rate object as discount curve.
Step7: Finally, instantiate the geometric_brownian_motion object.
Step8: We get simulated instrument values as usual via the get_instrument_values() method.
Step9: Visualization of Simulated Stochastic Short Rate
|
12,103
|
<ASSISTANT_TASK:>
Python Code:
import glob
import numpy as np
import pandas as pd
from sklearn.metrics import precision_score, recall_score, roc_auc_score
def get_data(datadir):
Read the data files from different subdirectories of datadir corresponding
to different HOG configurations.
Inputs
datadir: top level directory in which there are subdirectories corresponding
to different HOG configurations
Output
data: {hogname: list(pd.DataFrame)} where each key corresponds to a
different subdirectory (HOG configuration) and the value is
a list of dataframes read from each of the files in that
subdirectory
hognames = [s.split('/')[-1] for s in glob.glob(datadir + '/*')]
return {hogname: [pd.read_csv(filename, sep=None)
for filename in glob.glob('{}/{}/filenames_*.txt'.format(datadir, hogname))]
for hogname in hognames}
def get_average_scores(dataframes):
Average the scores from several different rotations.
Inputs
dataframes: list(pd.DataFrame['filename', 'score', 'label'])
Output
df_out: pd.DataFrame['filename', 'score', 'label'] where 'score'
is the average over all of the input dataframes and
'label' is taken arbitrarily from the first input dataframe
dataframes = [df.rename(columns={'score': 'score_{}'.format(idx),
'label': 'label_{}'.format(idx)})
for idx, df in enumerate(dataframes)]
merged_df = reduce(lambda df1, df2: pd.merge(df1, df2, on='filename'), dataframes)
assert all(df.shape[0] == merged_df.shape[0] for df in dataframes), \
'Not all keys are the same in the data sets'
merged_df['score'] = sum(merged_df['score_{}'.format(idx)] for idx, _ in enumerate(dataframes))
merged_df['label'] = merged_df['label_0']
return merged_df[['filename', 'score', 'label']]
def bootstrap(df, func, num_samples, sample_size_frac=1):
Returns the bootstrap average and standard deviation when applying
func to df. It is assumed that applying func to df returns a scalar.
In each iteration, sample_size_frac*N rows are drawn from df at
random with replacement, where N is the number of rows in df.
In this way a DataFrame df_sample is created of the same type
as df, with possible a different number of rows. The calculation
of interest is done on df_sample by applying func and returning
a number. This number is collected into an array, and this
process is repeated for num_samples iterations. Finally, the
mean and standard deviation of the array of length num_samples
is returned. The standard deviation is an estimate of the error
(due to finite sample size) that you would get when applying
func to the full DataFrame df to get a number.
Inputs
df: pd.DataFrame of any type
func: function that takes in df and returns a scalar
num_samples: number of bootstrap samples/iterations,
see description above
sample_size_frac: in each bootstrap sample, the number
of rows sampled is this fraction of
the actual number of rows in df
Outputs
mean: mean of the bootstrap values. Should be close to
func(df) if num_samples is large enough.
std: standard deviation of the bootstrap values. This is
an estimate of the error (due to finite sample size)
of func(df).
N = df.shape[0]
sample_size = int(N*sample_size_frac)
bootstrap_values = [func(df.iloc[np.random.randint(N, size=sample_size)])
for _ in range(num_samples)]
return np.mean(bootstrap_values), np.std(bootstrap_values)
def main(datadir, num_boot_samples, bands=None):
For each HOG configuration, average scores from different rotations and
output metrics: precision, recall, AUC, and standard deviation of the AUC
from the bootstrap analysis. Details of the bootstrap analysis described
in the bootstrap function.
Inputs
datadir: directory name in which there are subdirectories corresponding
to different HOG configurations
num_boot_samples: number of bootstrap samples to create in the bootstrap
analysis (see bootstrap function)
bands: list of bands to analyze separately. If None, don't separate out
bands.
Output
pd.DataFrame['HOG_config', 'Precision', 'Recall', 'AUC',
'AUC_boot_avg', 'AUC_boot_std']
OR
pd.DataFrame['HOG_config', 'Band', 'Precision', 'Recall', 'AUC',
'AUC_boot_avg', 'AUC_boot_std']
data = get_data(datadir)
columns = ['HOG_config',
'Precision',
'Recall',
'AUC',
'AUC_boot_avg',
'AUC_boot_std']
if bands is not None:
columns = columns[:1] + ['Band'] + columns[1:]
output = {k: [] for k in columns}
for hogname, dataframes in data.iteritems():
scores_all_bands = get_average_scores(dataframes)
if bands is not None:
scores_all_bands['band'] = scores_all_bands['filename'].apply(lambda s: s.split('_')[2])
# filter filenames further here if needed
for band in (bands if bands is not None else ['']):
if bands is not None:
scores = scores_all_bands[scores_all_bands['band'] == band]
output['Band'].append(band)
else:
scores = scores_all_bands
output['HOG_config'].append(hogname)
output['Precision'].append(precision_score(scores['label'], scores['score'] > 0.5))
output['Recall'].append(recall_score(scores['label'], scores['score'] > 0.5))
output['AUC'].append(roc_auc_score(scores['label'], scores['score']))
boot_avg, boot_std = bootstrap(scores, lambda sc: roc_auc_score(sc['label'], sc['score']),
num_boot_samples)
output['AUC_boot_avg'].append(boot_avg)
output['AUC_boot_std'].append(boot_std)
return pd.DataFrame(output)[columns]
main('/path/to/data/directory', 10000)
main('/path/to/data/directory', 10000)
main('/path/to/data/directory', 10000, bands=['435', '814'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: (1) average the scores between the _0,_1,_2,_3 directions to get average score per image in each HOG configuration.
Step5: Test on Mock
Step6: Test on SLACS
Step7: Test on SLACS separating out different bands
|
12,104
|
<ASSISTANT_TASK:>
Python Code:
import sys
print('Hello, Colaboratory from Python {}!'.format(sys.version_info[0]))
import tensorflow as tf
import numpy as np
with tf.Session():
input1 = tf.constant(1.0, shape=[2, 3])
input2 = tf.constant(np.reshape(np.arange(1.0, 7.0, dtype=np.float32), (2, 3)))
output = tf.add(input1, input2)
result = output.eval()
result
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(20)
y = [x_i + np.random.randn(1) for x_i in x]
a, b = np.polyfit(x, y, 1)
_ = plt.plot(x, y, 'o', np.arange(20), a*np.arange(20)+b, '-')
# Only needs to be run once at the top of the notebook.
!pip install -q matplotlib-venn
# Now the newly-installed library can be used anywhere else in the notebook.
from matplotlib_venn import venn2
_ = venn2(subsets = (3, 2, 1))
#@title Examples
text = 'value' #@param
date_input = '2018-03-22' #@param {type:"date"}
number_slider = 0.5 #@param {type:"slider", min:-1, max:1, step:0.1}
dropdown = '1st option' #@param ["1st option", "2nd option", "3rd option"]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TensorFlow execution
Step2: Visualization
Step3: Want to use a new library? pip install it. For recipes to import commonly used libraries, refer to the importing libraries example notebook
Step4: Forms
|
12,105
|
<ASSISTANT_TASK:>
Python Code:
import sklearn
from sklearn.datasets import load_iris
iris_data = load_iris()
print(iris_data.keys())
print(iris_data['target_names'])
print(iris_data['feature_names'])
print(type(iris_data['data']))
print(iris_data['data'].shape)
print(iris_data['data'][:5])
print(iris_data['target'])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
iris_data['data'], iris_data['target'], random_state=0)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import mglearn
from IPython.display import display
iris_df = pd.DataFrame(X_train, columns=iris_data.feature_names)
grr = pd.plotting.scatter_matrix(iris_df, c=y_train, figsize=(15,15), marker='o',
hist_kwds={'bins':20}, s=60, alpha=0.8, cmap=mglearn.cm3)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
import numpy as np
X_new = np.array([[5, 2.9, 1, 0.2]])
print(X_new.shape)
prediction = knn.predict(X_new)
print("Prediction: {}".format(prediction))
print("PRedicted target name: {}".format(
iris_data['target_names'][prediction]))
y_pred = knn.predict(X_test)
print("Test set predictions:\n {}".format(y_pred))
print("Test set score: {:2f}".format(np.mean(y_pred == y_test)))
print("Test set score: {:2f}".format(knn.score(X_test, y_test)))
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
X_train, X_test, y_train, y_test = train_test_split(
iris_data['data'], iris_data['target'], random_state=0)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print("Test set score: {:2f}".format(knn.score(X_test, y_test)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.1 Data is contained in target and data fields
Step2: 1.2 Target array contains Species of flowers
Step3: 2. Create Training and Test sets from dataset
Step4: 2.1 Inspect the Data
Step5: 3. Build K-Nearest Neighbors (KNN) Classifier Model
Step6: 4. Make Prediction
Step7: KNN model predicts that the new iris belongs to class 0, which is species 'Setosa'.
Step8: Model accuracy is 97%
|
12,106
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csiro-bom', 'access-1-0', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
12,107
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from math import sqrt
import numpy as np
def parser(x):
return datetime.strptime('190' + x, '%Y-%m')
series = read_csv('data/shampoo-sales.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
print(series.head())
type(series)
series.plot()
X = series.values
train, test = X[0:-12], X[-12:]
print(train.shape, test.shape)
# trainを上書きしないための措置
history = [x for x in train]
predictions = list()
for i in range(len(test)):
# 1つ前のデータを予測値とする
predictions.append(history[-1])
history.append(test[i])
rmse = sqrt(mean_squared_error(test, predictions))
print('RMSE: %.3f' % rmse)
plt.plot(test)
plt.plot(predictions)
from pandas import read_csv
from pandas import datetime
from pandas import DataFrame, Series
from pandas import concat
# 1. 時系列データを (X, y)の教師ありデータの形式に変換する
# (t-1)から(t)を予測する
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag + 1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
supervised = timeseries_to_supervised(X, 1)
print(supervised.head())
# 2. トレンドを除去した定常なデータに変換
# 差分を取る
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
a = [1, 3, 8, 9, 12]
d = difference(a, 1)
print(d)
inverse = list()
for i in range(len(d)):
value = inverse_difference(a, d[i], len(a) - i)
inverse.append(value)
print(inverse)
differenced = difference(series, 1)
print(differenced.head())
inverted = list()
for i in range(len(differenced)):
value = inverse_difference(series, differenced[i], len(series) - i)
inverted.append(value)
inverted = Series(inverted)
print(inverted.head())
series.head()
# 3. 正規化
# [-1, 1]にスケーリングする
print(X.shape)
X = X.reshape(len(X), 1)
print(X.shape)
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(X)
scaled_X = scaler.transform(X)
scaled_X[:10]
inverted_X = scaler.inverse_transform(scaled_X)
print(inverted_X[:10])
def scale(train, test):
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = np.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# load dataset
series = read_csv('data/shampoo-sales.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
print(supervised_values.shape)
# split data into train and test
train, test = supervised_values[0:-12], supervised_values[-12:]
print(train.shape, test.shape)
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
from keras.models import Sequential
from keras.layers import Dense, LSTM
print(train_scaled.shape, test_scaled.shape)
def fit_lstm(train, batch_size, nb_epoch, neurons):
# 各行がサンプル
# 最後の列を予測する
X, y = train[:, 0:-1], train[:, -1]
# (samples, time step, features)
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
# statefulなのでバッチをまたいでも内部状態は維持される
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
# 各サンプルが時系列なのでshuffleせずに順番にバッチを読み込むことで過去の系列を利用する
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=1, shuffle=False)
# バッチをすべて読みこんだら新しいエポックが始まるので内部状態をリセットする
model.reset_states()
return model
lstm_model = fit_lstm(train_scaled, 1, 100, 4)
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0, 0]
train_scaled.shape
# 訓練データを(samples, time step, features)に変換
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
res = lstm_model.predict(train_reshaped, batch_size=1)
plt.plot(res)
plt.plot(train_scaled[:, 1])
# テストデータを順に予測していく
print(test_scaled.shape)
predictions = list()
for i in range(len(test_scaled)):
# X:テスト入力 y:正解(変換後)
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
# LSTMによる予測
yhat = forecast_lstm(lstm_model, 1, X)
yhat = invert_scale(scaler, X, yhat)
yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
predictions.append(yhat)
# 変換前の正解を取得
expected = raw_values[len(train) + i + 1]
print('Month=%d, Predicted=%f, Expected=%f' % (i + 1, yhat, expected))
rmse = sqrt(mean_squared_error(raw_values[-12:], predictions))
print('Test RMSE: %.3f' % rmse)
plt.plot(raw_values[-12:])
plt.plot(predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Persistence Model Forecast
Step2: LSTM訓練データの準備
Step3: データの前処理まとめ
Step4: LSTM
Step5: Persistence ModelのRMSEは136.761なのでそれに比べるとLSTMを用いたRMSEは減少
|
12,108
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from bs4 import BeautifulSoup
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import roc_auc_score,roc_curve
from sklearn.decomposition import TruncatedSVD
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import MultinomialNB
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
train_data=pd.read_csv("../../../data-project1/labeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
train_data.head()
train_data.tail()
train_data.dtypes
train_data.info()
train_data.review[0]
soup=BeautifulSoup(train_data.review[0]).get_text()
letters_only = re.sub("[^a-zA-Z]"," ",soup )
letters_only
tokens=nltk.word_tokenize(letters_only.lower())
tagged_words=nltk.pos_tag(tokens)
tagged_words[0:5]
porter=nltk.PorterStemmer()
def lemmatize_with_potter(token,tag):
if tag[0].lower in ['v','n']:
return porter.stem(token)
return token
stemmed_text_with_potter=[lemmatize_with_potter(token,tag) for token,tag in tagged_words]
lancaster=nltk.LancasterStemmer()
def lemmatize_with_lancaster(token,tag):
if tag[0].lower in ['v','n']:
return lancaster.stem(token)
return token
stemmed_text_with_lancaster=[lemmatize_with_lancaster(token,tag) for token,tag in tagged_words]
stemmed_text_with_potter[0:10]
stemmed_text_with_lancaster[0:10]
tagged_words_after_stem=nltk.pos_tag(stemmed_text_with_potter)
wnl = nltk.WordNetLemmatizer()
def lemmatize_with_WordNet(token,tag):
if tag[0].lower in ['v','n']:
return wnl.lemmatize(token)
return token
stemmed_and_lemmatized_text=[lemmatize_with_WordNet(token,tag) for token,tag in tagged_words_after_stem]
stemmed_and_lemmatized_text[0:10]
porter=nltk.PorterStemmer()
wnl = nltk.WordNetLemmatizer()
def stemmatize_with_potter(token,tag):
if tag[0].lower() in ['v','n']:
return porter.stem(token)
return token
def lemmatize_with_WordNet(token,tag):
if tag[0].lower() in ['v','n']:
return wnl.lemmatize(token)
return token
def corpus_preprocessing(corpus):
preprocessed_corpus = []
for sentence in corpus:
#remove HTML and puctuation
soup=BeautifulSoup(sentence).get_text()
letters_only = re.sub("[^a-zA-Z]"," ",soup )
#Stemming
tokens=nltk.word_tokenize(letters_only.lower())
tagged_words=nltk.pos_tag(tokens)
stemmed_text_with_potter=[stemmatize_with_potter(token,tag) for token,tag in tagged_words]
#lemmatization
tagged_words_after_stem=nltk.pos_tag(stemmed_text_with_potter)
stemmed_and_lemmatized_text=[lemmatize_with_WordNet(token,tag) for token,tag in tagged_words_after_stem]
#join all the tokens
clean_review=" ".join(w for w in stemmed_and_lemmatized_text)
preprocessed_corpus.append(clean_review)
return preprocessed_corpus
vectorizer=CountVectorizer(stop_words='english')
test_corpus=train_data.review[0:5]
test_corpus= corpus_preprocessing(test_corpus)
test_corpus=vectorizer.fit_transform(test_corpus)
print(test_corpus.todense())
vectorizer= TfidfVectorizer(stop_words='english')
test_corpus=train_data.review[0:5]
test_corpus= corpus_preprocessing(test_corpus)
test_corpus=vectorizer.fit_transform(test_corpus)
print (test_corpus.todense())
tsvd=TruncatedSVD(100)
tsvd.fit(test_corpus)
test_corpus=tsvd.transform(test_corpus)
test_corpus
model=MultinomialNB()
#features from train set
train_features=train_data.review
#pro-processing train features
train_features=corpus_preprocessing(train_features)
vectorizer= TfidfVectorizer(stop_words='english')
train_features=vectorizer.fit_transform(train_features)
tsvd=TruncatedSVD(100)
tsvd.fit(train_features)
train_features=tsvd.transform(train_features)
#target from train set
train_target=train_data.sentiment
#fitting the model
model.fit(train_features,train_target)
#reading test data
test_data=train_data=pd.read_csv("../../../data-project1/testData.tsv", header=0,delimiter="\t", quoting=3)
#features from test data
test_features=test_data.review
#pre-processing test features
test_features=corpus_preprocessing(test_features)
test_features=vectorizer.transform(test_features)
test_features=tsvd.transform(test_features)
#predicting the sentiment for test set
prediction=model.predict(test_features)
#writing out submission file
pd.DataFrame( data={"id":test_data["id"], "sentiment":prediction} ).to_csv("../../../data-project1/first_attempt.csv", index=False, quoting=3 )
# Split 80-20 train vs test data
split_train_features, split_test_features, split_train_target, split_test_target = train_test_split(train_features,
train_target,
test_size = 0.20,
random_state = 0)
#pre-processing split train
vectorizer= TfidfVectorizer(stop_words='english')
split_train_features = corpus_preprocessing(split_train_features)
split_train_features = vectorizer.fit_transform(split_train_features)
tsvd=TruncatedSVD(100)
tsvd.fit(split_train_features)
split_train_features = tsvd.transform(split_train_features)
#pre-processing split test features
split_test_features = corpus_preprocessing(split_test_features)
split_test_features = vectorizer.transform(split_test_features)
split_test_features = tsvd.transform(split_test_features)
#fit and predict using split data
model = MultinomialNB()
model.fit(split_train_features,split_train_target)
split_prediction = model.predict(split_test_features)
score=roc_auc_score(split_test_target, split_prediction)
print (score(split_test_target, split_prediction))
false_positive_rates ,recall,thresholds=roc_curve(split_test_target,split_prediction)
plt.title('Receiver Operating Charisteristic')
plt.plot(false_positive_rates,recall,'r', label='AUC = %0.2f' %score)
plt.legend(loc = 'lower right')
plt.ylable('Recall')
plt.xlable('False positive rate')
plt.show()
model=MultinomialNB(alpha=0.1)
model.fit(split_train_features,split_train_target)
split_prediction=model.predict(split_test_features)
score=roc_auc_score(split_test_target, split_predict)
print (score(split_test_target, split_predict))
alphas=np.logspace(-5,0,6)
print alphas
def evaluate_alpha(train_features,train_target,test_features,test_target,model,parameter_value, parameter_name):
scores=[]
for test_alpha in params:
model. set_params(**{parameter_name:test_alpha})
model.fit(train_features,train_target)
prediction=model.predict(test_features)
score=roc_auc_score(test_target, prediction)
scores.append((test_alpha,score))
model=MultinomialNB()
alpha_score=evaluate_alpha(split_train_features,split_train_target,split_test_features,split_test_target,model,alphas,'alpha')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import dataset
Step2: Notice that 'sentiment' is binary
Step3: Type 'object' is a string for pandas. We shall later convert to number representation,maybe using typical bag-of-words or word2vec
Step4: Now that we already have general idea of Data Set. We next clean, transform data to create useful features for machine learning
Step5: Before we can transform text into number representation, we need to process raw text. Let's first remove HTML and puctuation
Step6: Now we start stemming and lemmatizing the text, but it is generally better to first create the pos tagger as we only want to lemmatize verb and noum
Step7: Stemming the text
Step8: Observing that the word 'going' has been stemmed with porter but not with lancaster, I'll choose porter for this task.
Step9: text cleanning summary
Step10: Transforming feature 'review'
Step11: Extending bag-of-words with TF-IDF weights
Step12: Dimensionality reduction
Step13: Training Naive Bayes
Step14: Fitting the training data
Step15: Predicting with Naive Bayes
Step16: Preparing for kaggle submission
Step17: Performance Evaluation
Step18: Evaluating model using splitted data set
Step19: Plotting ROC curve
Step20: The source code of the first attempt can be found here and evaluation script here
Step21: Let's try to generate score over a range of alpha
|
12,109
|
<ASSISTANT_TASK:>
Python Code:
clicks = Table.read_table("http://stat.columbia.edu/~rachel/datasets/nyt1.csv")
clicks
age_upper_bounds = [18, 25, 35, 45, 55, 65]
def age_range(n):
if n == 0:
return '0'
lower = 1
for upper in age_upper_bounds:
if lower <= n < upper:
return str(lower) + '-' + str(upper-1)
lower = upper
return str(lower) + '+'
# a little test
np.unique([age_range(n) for n in range(100)])
clicks["Age Range"] = clicks.apply(age_range, 'Age')
clicks["Person"] = 1
clicks
clicks_by_age = clicks.group('Age Range', sum)
clicks_by_age
clicks_by_age.select(['Age Range', 'Clicks sum', 'Impressions sum', 'Person sum']).barh('Age Range')
clicks_by_age['Gender Mix'] = clicks_by_age['Gender sum'] / clicks_by_age['Person sum']
clicks_by_age["CTR"] = clicks_by_age['Clicks sum'] / clicks_by_age['Impressions sum']
clicks_by_age.select(['Age Range', 'Person sum', 'Gender Mix', 'CTR'])
# Format some columns as percent with limited precision
clicks_by_age.set_format('Gender Mix', PercentFormatter(1))
clicks_by_age.set_format('CTR', PercentFormatter(2))
clicks_by_age
impressed = clicks.where(clicks['Age'] > 0).where('Impressions')
impressed
# Impressions by age and gender
impressed.pivot(rows='Gender', columns='Age Range', values='Impressions', collect=sum)
impressed.pivot("Age Range", "Gender", "Clicks",sum)
impressed.pivot_hist('Age Range','Impressions')
distributions = impressed.pivot_bin('Age Range','Impressions')
distributions
impressed['Gen'] = [['Male','Female'][i] for i in impressed['Gender']]
impressed
# How does gender and clicks vary with age?
gi = impressed.group('Age Range', np.mean).select(['Age Range', 'Gender mean', 'Clicks mean'])
gi.set_format(['Gender mean', 'Clicks mean'], PercentFormatter)
gi
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Well. Half a million rows. That would be painful in excel.
Step2: Now we can group the table by Age Range and count how many clicks come from each range.
Step3: Now we can do some other interesting summaries of these categories
Step4: We might want to do the click rate calculation a little more carefully. We don't care about clicks where there are zero impressions or missing age/gender information. So let's filter those out of our data set.
Step5: Group returns a new table. If we wanted to specify the formats on columns of this table, assign it to a name.
|
12,110
|
<ASSISTANT_TASK:>
Python Code:
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../../data')
import shogun as sg
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def load_file(feat_file,label_file):
feats=sg.create_features(sg.read_csv(feat_file))
labels=sg.create_labels(sg.read_csv(label_file))
return (feats, labels)
trainfeat_file=os.path.join(SHOGUN_DATA_DIR, 'uci/letter/train_fm_letter.dat')
trainlab_file=os.path.join(SHOGUN_DATA_DIR, 'uci/letter/train_label_letter.dat')
train_feats,train_labels=load_file(trainfeat_file,trainlab_file)
def setup_random_forest(num_trees,rand_subset_size,combination_rule,feature_types):
rf=sg.create_machine("RandomForest", num_bags=num_trees,
combination_rule=combination_rule)
rf.get("machine").put("m_randsubset_size", rand_subset_size)
rf.get("machine").put("nominal", feature_types)
return rf
comb_rule=sg.create_combination_rule("MajorityVote")
feat_types=np.array([False]*16)
rand_forest=setup_random_forest(10,4,comb_rule,feat_types)
# train forest
rand_forest.put('labels', train_labels)
rand_forest.train(train_feats)
# load test dataset
testfeat_file= os.path.join(SHOGUN_DATA_DIR, 'uci/letter/test_fm_letter.dat')
testlab_file= os.path.join(SHOGUN_DATA_DIR, 'uci/letter/test_label_letter.dat')
test_feats,test_labels=load_file(testfeat_file,testlab_file)
# apply forest
output_rand_forest_train=rand_forest.apply_multiclass(train_feats)
output_rand_forest_test=rand_forest.apply_multiclass(test_feats)
def train_cart(train_feats,train_labels,feature_types,problem_type):
c=sg.create_machine("CARTree", nominal=feature_types,
mode=problem_type,
folds=2,
apply_cv_pruning=False,
labels=train_labels)
c.train(train_feats)
return c
# train CART
cart=train_cart(train_feats,train_labels,feat_types,"PT_MULTICLASS")
# apply CART model
output_cart_train=cart.apply_multiclass(train_feats)
output_cart_test=cart.apply_multiclass(test_feats)
accuracy=sg.create_evaluation("MulticlassAccuracy")
rf_train_accuracy=accuracy.evaluate(output_rand_forest_train,train_labels)*100
rf_test_accuracy=accuracy.evaluate(output_rand_forest_test,test_labels)*100
cart_train_accuracy=accuracy.evaluate(output_cart_train,train_labels)*100
cart_test_accuracy=accuracy.evaluate(output_cart_test,test_labels)*100
print('Random Forest training accuracy : '+str(round(rf_train_accuracy,3))+'%')
print('CART training accuracy : '+str(round(cart_train_accuracy,3))+'%')
print
print('Random Forest test accuracy : '+str(round(rf_test_accuracy,3))+'%')
print('CART test accuracy : '+str(round(cart_test_accuracy,3))+'%')
def get_rf_accuracy(num_trees,rand_subset_size):
rf=setup_random_forest(num_trees,rand_subset_size,comb_rule,feat_types)
rf.put('labels', train_labels)
rf.train(train_feats)
out_test=rf.apply_multiclass(test_feats)
acc=sg.create_evaluation("MulticlassAccuracy")
return acc.evaluate(out_test,test_labels)
num_trees4=[5,10,20,50,100]
rf_accuracy_4=[round(get_rf_accuracy(i,4)*100,3) for i in num_trees4]
print('Random Forest accuracies (as %) :' + str(rf_accuracy_4))
# plot results
x4=[1]
y4=[86.48] # accuracy for single tree-CART
x4.extend(num_trees4)
y4.extend(rf_accuracy_4)
plt.plot(x4,y4,'--bo')
plt.xlabel('Number of trees')
plt.ylabel('Multiclass Accuracy (as %)')
plt.xlim([0,110])
plt.ylim([85,100])
plt.show()
# subset size 2
num_trees2=[10,20,50,100]
rf_accuracy_2=[round(get_rf_accuracy(i,2)*100,3) for i in num_trees2]
print('Random Forest accuracies (as %) :' + str(rf_accuracy_2))
# subset size 8
num_trees8=[5,10,50,100]
rf_accuracy_8=[round(get_rf_accuracy(i,8)*100,3) for i in num_trees8]
print('Random Forest accuracies (as %) :' + str(rf_accuracy_8))
x2=[1]
y2=[86.48]
x2.extend(num_trees2)
y2.extend(rf_accuracy_2)
x8=[1]
y8=[86.48]
x8.extend(num_trees8)
y8.extend(rf_accuracy_8)
plt.plot(x2,y2,'--bo',label='Subset Size = 2')
plt.plot(x4,y4,'--r^',label='Subset Size = 4')
plt.plot(x8,y8,'--gs',label='Subset Size = 8')
plt.xlabel('Number of trees')
plt.ylabel('Multiclass Accuracy (as %) ')
plt.legend(bbox_to_anchor=(0.92,0.4))
plt.xlim([0,110])
plt.ylim([85,100])
plt.show()
rf=setup_random_forest(100,2,comb_rule,feat_types)
rf.put('labels', train_labels)
rf.train(train_feats)
# set evaluation strategy
rf.put("oob_evaluation_metric", sg.create_evaluation("MulticlassAccuracy"))
oobe=rf.get("oob_error")
print('OOB accuracy : '+str(round(oobe*100,3))+'%')
trainfeat_file= os.path.join(SHOGUN_DATA_DIR, 'uci/wine/fm_wine.dat')
trainlab_file= os.path.join(SHOGUN_DATA_DIR, 'uci/wine/label_wine.dat')
train_feats,train_labels=load_file(trainfeat_file,trainlab_file)
def get_oob_errors_wine(num_trees,rand_subset_size):
feat_types=np.array([False]*13)
rf=setup_random_forest(num_trees,rand_subset_size,sg.create_combination_rule("MajorityVote"),feat_types)
rf.put('labels', train_labels)
rf.train(train_feats)
rf.put("oob_evaluation_metric", sg.create_evaluation("MulticlassAccuracy"))
return rf.get("oob_error")
size=[1,2,4,6,8,10,13]
oobe=[round(get_oob_errors_wine(400,i)*100,3) for i in size]
print('Out-of-box Accuracies (as %) : '+str(oobe))
plt.plot(size,oobe,'--bo')
plt.xlim([0,14])
plt.xlabel('Random subset size')
plt.ylabel('Multiclass accuracy')
plt.show()
size=[50,100,200,400,600]
oobe=[round(get_oob_errors_wine(i,2)*100,3) for i in size]
print('Out-of-box Accuracies (as %) : '+str(oobe))
plt.plot(size,oobe,'--bo')
plt.xlim([40,650])
plt.ylim([90,100])
plt.xlabel('Number of trees')
plt.ylabel('Multiclass accuracy')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we decide the parameters of our Random Forest.
Step2: In the above code snippet, we decided to create a forest using 10 trees in which each split in individual trees will be using a randomly chosen subset of 4 features. Note that 4 here is the square root of the total available features (16) and is hence the usually chosen value as mentioned in the introductory paragraph. The strategy for combination chosen is Majority Vote which, as the name suggests, chooses the mode of all the individual tree outputs. The given features are all continuous in nature and hence feature types are all set false (i.e. not nominal). Next, we train our Random Forest and use it to classify letters in our test dataset.
Step3: We have with us the labels predicted by our Random Forest model. Let us also get the predictions made by a single tree. For this purpose, we train a CART-flavoured decision tree.
Step4: With both results at our disposal, let us find out which one is better.
Step5: As it is clear from the results above, we see a significant improvement in the predictions. The reason for the improvement is clear when one looks at the training accuracy. The single decision tree was over-fitting on the training dataset and hence was not generic. Random Forest on the other hand appropriately trades off training accuracy for the sake of generalization of the model. Impressed already? Let us now see what happens if we increase the number of trees in our forest.
Step6: The method above takes the number of trees and subset size as inputs and returns the evaluated accuracy as output. Let us use this method to get the accuracy for different number of trees keeping the subset size constant at 4.
Step7: NOTE
Step8: NOTE
Step9: As we can see from the above plot, the subset size does not have a major impact on the saturated accuracy obtained in this particular dataset. While this is true in many datasets, this is not a generic observation. In some datasets, the random feature sample size does have a measurable impact on the test accuracy. A simple strategy to find the optimal subset size is to use cross-validation. But with Random Forest model, there is actually no need to perform cross-validation. Let us see how in the next section.
Step10: The above OOB accuracy calculated is found to be slighly less than the test error evaluated in the previous section (see plot for num_trees=100 and rand_subset_size=2). This is because of the fact that the OOB estimate depicts the expected error for any generalized set of data vectors. It is only natural that for some set of vectors, the actual accuracy is slightly greater than the OOB estimate while in some cases the accuracy observed in a bit lower.
Step11: Next let us find out the appropriate feature subset size. For this we will make use of OOB error.
Step12: From the above plot it is clear that subset size of 2 or 3 produces maximum accuracy for wine classification. At this value of subset size, the expected classification accuracy is of the model is 98.87%. Finally, as a sanity check, let us plot the accuracy vs number of trees curve to ensure that 400 is indeed a sufficient value ie. the oob error saturates before 400.
|
12,111
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import os
from os.path import join
path_notebooks = os.path.abspath('.')
path_data = join(path_notebooks, 'data')
from marvin.tools.maps import Maps
# REMOVE FROM NOTEBOOK
filename = '/Users/andrews/hacks/galaxies-mzr/data/manga-8077-6104-MAPS-SPX-GAU-MILESHC.fits.gz'
maps = Maps(filename=filename)
# maps = Maps('8077-6104')
nii = maps.emline_gflux_nii_6585
ha = maps.emline_gflux_ha_6564
n2 = nii / ha
logn2 = np.log10(n2)
oh = 8.90 + 0.57 * logn2
masks_bpt, __, __ = maps.get_bpt()
masks_bpt['sf']['global']
n2.pixmask.schema
mask_non_sf = ~masks_bpt['sf']['global'] * n2.pixmask.labels_to_value('DONOTUSE')
mask_bad_data = n2.pixmask.get_mask(['NOCOV', 'UNRELIABLE', 'DONOTUSE'])
min_snr = 3.
mask_nii_low_snr = (np.abs(nii.value * np.sqrt(nii.ivar)) < min_snr)
mask_ha_low_snr = (np.abs(ha.value * np.sqrt(ha.ivar)) < min_snr)
mask = mask_non_sf | mask_bad_data | mask_nii_low_snr | mask_ha_low_snr
fig, ax = oh.plot(mask=mask, cblabel='12+log(O/H)')
import pandas as pd
mstar = pd.read_csv(join(path_data, 'manga-{}_mstar.csv'.format(maps.plateifu)))
fig, ax = plt.subplots()
p = ax.imshow(mstar, origin='lower')
ax.set_xlabel('spaxel')
ax.set_ylabel('spaxel')
cb = fig.colorbar(p)
cb.set_label('log(Mstar) [M$_\odot$]')
spaxel_size = 0.5 # [arcsec]
# or programmatically:
# spaxel_size = float(maps.getCube().header['CD2_2']) * 3600
redshift = maps.nsa['z']
c = 299792 # speed of light [km/s]
H0 = 70 # [km s^-1 Mpc^-1]
D = c * redshift / H0 # approx. distance to galaxy [Mpc]
scale = 1 / 206265 * D * 1e6 # 1 radian = 206265 arcsec [pc / arcsec]
spaxel_area = (scale * spaxel_size)**2 # [pc^2]
sigma_star = np.log10(10**mstar / spaxel_area) # [Msun / pc^2]
fig, ax = plt.subplots(figsize=(6, 6))
ax.scatter(sigma_star.values[mask == 0], oh.value[mask == 0], alpha=0.15)
ax.set_xlabel('log(Mstar) [M$_\odot$]')
ax.set_ylabel('12+log(O/H)')
ax.axis([0, 4, 8.0, 8.8])
# fitting formula
aa = 8.55
bb = 0.014
cc = 3.14
xx = np.linspace(1, 3, 1000)
yy = aa + bb * (xx - cc) * np.exp(-(xx - cc))
fig, ax = plt.subplots(figsize=(6, 6))
ax.scatter(sigma_star.values[mask == 0], oh.value[mask == 0], alpha=0.15)
ax.plot(xx, yy)
ax.set_xlabel('log(Mstar) [M$_\odot$]')
ax.set_ylabel('12+log(O/H)')
ax.axis([0, 4, 8.0, 8.8])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Maps for Galaxy
Step2: Measure Metallicity
Step3: Calculate the necessary line ratio.
Step4: Finally, calculate the metallicity.
Step5: Select Spaxels
Step6: The BPT masks are dictionaries of dictionaries of a boolean (True/False) arrays. We are interested in the spaxels that are classified as star-forming in all three BPT diagrams are designated as True, which is designated with the global key. Print this mask.
Step7: Masks
Step8: Select non-star-forming spaxels (from the BPT mask) and set their mask value to the DAP's DONOTUSE value with the n2.pixmask.labels_to_value() method. Note that we are selecting spaxels that we want from the BPT mask (i.e., True is a spaxel to keep), whereas we are using the pixmask to select spaxels that we want to exclude (i.e., True is a spaxel to ignore).
Step9: Select spaxels classified by the DAP as bad data according to the masks for spaxels with no IFU coverage, with unreliable measurements, or otherwise unfit for science. Use the n2.pixmask.get_mask method.
Step10: Select spaxels with signal-to-noise ratios (SNRs) > 3 on both [NII] 6585 and Halpha.
Step11: Do a bitwise (binary) OR to create a master mask of spaxels to ignore.
Step12: Plot the Metallicity Map
Step13: Compute Stellar Mass Surface Density
Step14: Plot stellar mass map using ax.imshow(). MaNGA maps are oriented such that you want to specify origin='lower'. Also include a labelled colorbar.
Step15: Calculate physical size of a spaxel
Step16: Get the redshift of the galaxy from the maps.nsa attribute.
Step17: We'll use the small angle approximation to estimate the physical scale
Step18: Rearrange the small angle formula to solve for the scale ($\frac{d}{\theta}$) in pc / arcsec.
Step19: Now convert the spaxel size from arcsec to parsecs and calculate the area of a spaxel.
Step20: Finally, we simply divide the stellar mass by the area to get the stellar mass surface density $\Sigma_\star$ in units of $\frac{M_\odot}{pc^2}$.
Step21: Let's plot metallicity as a function of $\Sigma_\star$! Remember to apply the mask. Also set the axis range to be [0, 4, 8, 8.8].
Step22: MaNGA Spatially-Resolved Mass-Metallicity Relation
Step23: Remake the spatially-resolved MZR plot for our galaxy showing the he best fit line from Barrera-Ballesteros et al. (2016).
|
12,112
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, unicode_literals
from dbpedia_utils import iter_entities_from
from collections import defaultdict, Counter
import pandas as pd
import numpy as np
import json
import gzip
import dbpedia_config
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
source_folder = dbpedia_config.DATA_FOLDER
target_folder = dbpedia_config.TARGET_FOLDER
main_language = dbpedia_config.MAIN_LANGUAGE
links_file = '{0}/page_links_{1}.nt.bz2'.format(source_folder, main_language)
gender_data = pd.read_csv('{0}/person_data_{1}.csv.gz'.format(target_folder, main_language), encoding='utf-8', index_col='uri').loc[:,('gender', 'birth_year')].reset_index().copy()
gender_data.head()
gender_data = gender_data[(gender_data.birth_year < 1900) & (gender_data.birth_year) > 0].reset_index(drop=True)
gender_data.shape
gender_data.head()
uri_to_i = {row.uri: idx for idx, row in gender_data.iterrows()}
i_to_gender = {idx: row.gender[0] for idx, row in gender_data.iterrows()}
import graph_tool
def build_graph(dataframe, uri_to_i):
graph_data = {'graph': graph_tool.Graph(), 'id_map': {}, 'g_map': {}, 'pk_to_i': {}}
graph = graph_data['graph']
print('dataframe shape', dataframe.shape)
graph.add_vertex(dataframe.shape[0])
for entity in iter_entities_from(links_file):
resource = entity['resource']
if resource in uri_to_i:
#print(resource)
links = []
for other in entity['wikiPageWikiLink']:
if other in uri_to_i:
#print('->', other)
links.append(other)
src = uri_to_i[resource]
for link in links:
#print(src, link)
graph.add_edge(src, uri_to_i[link])
graph_data['gender'] = graph.new_vertex_property('string')
gender = graph_data['gender']
print(graph.num_vertices(), graph.num_edges())
for i, row in dataframe.iterrows():
gender[i] = row.gender[0]
graph.vertex_properties['gender'] = gender
return graph_data
graph_data = build_graph(gender_data, uri_to_i)
from cytoolz import frequencies
from __future__ import division
def count_values(graph, property_name):
values = []
for v in graph.vertices():
values.append(graph.vertex_properties[property_name][v])
return frequencies(values)
def clean_isolated_nodes(graph_data):
graph = graph_data['graph']
degree_values = graph.degree_property_map('total').get_array()
degree_flag = degree_values.astype(np.bool)
flags = graph.new_vertex_property('bool', vals=degree_flag)
graph.set_vertex_filter(flags)
print('to keep', np.sum(flags.get_array()), 'from', graph.num_vertices())
graph_data['kept_frequencies'] = count_values(graph, 'gender')
print(graph_data['kept_frequencies'])
graph.clear_filters()
graph.set_vertex_filter(flags, inverted=True)
print('to delete', np.sum(flags.get_array()), 'from', graph.num_vertices())
graph_data['deleted_frequencies'] = count_values(graph, 'gender')
print(graph_data['deleted_frequencies'])
graph.clear_filters()
graph.set_vertex_filter(flags)
#graph.purge_vertices(in_place=True)
#graph.clear_filters()
clean_isolated_nodes(graph_data)
from collections import defaultdict
from collections import Counter
results = {}
def gender_stats(graph_data):
graph = graph_data['graph']
ratios = defaultdict(list)
edge_counts = defaultdict(lambda: defaultdict(int))
for src in graph.vertices():
counts = Counter()
for dst in src.out_neighbours():
counts[graph.vertex_properties['gender'][dst]] += 1
if not counts:
continue
key = graph.vertex_properties['gender'][src]
if counts['m'] > 0:
ratio = counts['f'] / float(counts['m'])
else:
ratio = 0.0
ratios[key].append(ratio)
edge_counts[key]['m'] += counts['m']
edge_counts[key]['f'] += counts['f']
#print src_data['label'], counts
return ratios, edge_counts
def build_ratios(graph_data, results):
graph = graph_data['graph']
genders = ['m', 'f']
print('edge fractions')
expected_counts = graph_data['kept_frequencies']
expected_freqs = [expected_counts['m'] / graph.num_vertices() * 100.0, expected_counts['f'] / graph.num_vertices() * 100.0, ]
print('expected freqs', expected_freqs)
ratios, edge_counts = gender_stats(graph_data)
results['gender_ratios'] = dict(ratios)
results['gender_edge_counts'] = dict(edge_counts)
#print('ratios', ratios)
print('edge counts', edge_counts)
build_ratios(graph_data, results)
import graph_tool.centrality
def centrality(graph_data, results):
graph = graph_data['graph']
def estimate_fraction(method, name, fraction_name):
values = method(graph)
graph.vertex_properties[name] = values
sorted_pr = []
for v in graph.vertices():
sorted_pr.append((graph.vertex_properties[name][v], v))
print(name, graph.num_vertices(), len(sorted_pr))
sorted_pr = sorted(sorted_pr, reverse=True)
#print(sorted_pr)
f_fraction = []
print(sorted_pr[:3])
count_w = 0.0
for i, (node_pr, node_id) in enumerate(sorted_pr, start=1):
#for i, (node_id, node_data) in enumerate(graph.nodes_iter(data=True), start=1):
if graph.vertex_properties['gender'][node_id] == 'f':
count_w += 1.0
f_fraction.append(count_w / i)
results[fraction_name] = f_fraction
results['count_w'] = count_w
estimate_fraction(graph_tool.centrality.pagerank, 'pagerank', 'f_fraction')
estimate_fraction(lambda x: x.degree_property_map('in'), 'in_degree', 'f_id_fraction')
return
centrality(graph_data, results)
def plot_fraction(graph_data, results, label, fraction='f_fraction'):
graph = graph_data['graph']
f_fraction = results[fraction]
count_w = results['count_w']
#plt.figure(figsize=(6,4))
plt.plot(np.arange(1, len(f_fraction) + 1, 1), f_fraction, '-', alpha=0.75, label=label)
#plt.plot(np.arange(1, len(f_fraction) + 1, 1), null_f_fraction, '-', alpha=0.75, label='Null Model')
plt.hlines([count_w / graph.num_vertices()], 1.0, graph.num_vertices(), linestyle='dashed', alpha=0.5, linewidth=0.5)
#plt.savefig('./results/connectivity_observedpagerank_proportion.png', dpi=100, bbox_inches='tight')
plot_fraction(graph_data, results, 'Pre-1900')
plt.xlabel('# of Biographies in the top-k results')
plt.ylabel('Fraction of Women')
plt.xscale('log')
def top_entities(dataframe, graph_data, results, name='pagerank', n=30):
graph = graph_data['graph']
genders = ['m', 'f']
pagerank = graph.vertex_properties[name]
gender = graph.vertex_properties['gender']
sorted_pr = []
for v in graph.vertices():
sorted_pr.append((pagerank[v], v, gender[v]))
print(name, graph.num_vertices(), len(sorted_pr))
sorted_pr = sorted(sorted_pr, reverse=True)
top_pagerank = {}
for g in genders:
values = list(filter(lambda x: x[2] == g, sorted_pr))[0:n]
top_pagerank[g] = [(dataframe.loc[x[1]].uri, x[0]) for x in values]
results['top_pagerank'] = top_pagerank
return top_pagerank
def plot_top_30(top_pagerank):
people = []
for m, w in zip(top_pagerank['m'], top_pagerank['f']):
people.append({'woman': w[0], 'man': m[0], 'w_pr': w[1], 'm_pr': m[1]})
df = pd.DataFrame.from_records(people, index=range(1, 31))
plt.figure(figsize=(9,12))
plt.title('Top-30 Biographies per Gender')
plt.plot(df.m_pr, -df.index + 31, 'o', label="Men")
plt.plot(df.w_pr, -df.index + 31, '^', label="Women")
plt.xlabel('PageRank')
plt.yticks(np.arange(1, 31))
#plt.yticks(df.index, [n[1]['name'] for n in graph.nodes_iter(data=True)])#, fontsize='xx-small', rotation=45)
plt.ylim([0.5, 30.5])
#plt.xlim([-0.1,1.1])
#for i, (x0, x1) in enumerate(zip(bc_real, bc_base)):
# plt.arrow(x0, i, x1 - x0, 0, length_includes_head=True)
plt.hlines(-df.index + 31, df.m_pr, df.w_pr, alpha=0.3, linestyle='dashed', linewidth=1)
plt.legend(loc='lower right')
ax1 = plt.gca()
ax2 = ax1.twinx()
ax2.grid(False)
ax2.set_ylim(ax1.get_ylim())
ax2.set_yticks(np.arange(1, 31))
ax2.set_yticklabels(list(df.man)[::-1])
ax1.set_yticklabels(list(df.woman)[::-1])
top_entities(gender_data, graph_data, results)
plot_top_30(results['top_pagerank'])
import graph_tool.clustering
def append_graph_stats(graph_data, results):
graph = graph_data['graph']
results['n_nodes'] = graph.num_vertices()
results['n_edges'] = graph.num_edges()
print('directed', results['n_nodes'], results['n_edges'])
results['u_n_nodes'] = graph.num_vertices()
results['u_n_edges'] = graph.num_edges()
k = (results['u_n_edges'] - (results['u_n_edges'] % 2)) / results['u_n_nodes']
results['k'] = k
print('k', k, results['u_n_nodes'], results['u_n_edges'])
clust_prop = graph_tool.clustering.local_clustering(graph)
clust_coeff = clust_prop.get_array().mean().tolist()
results['clust_coeff'] = clust_coeff
print('coeff', results['clust_coeff'])
append_graph_stats(graph_data, results)
from scipy.optimize import brenth
import networkx as nx
def estimate_beta(graph_data, results):
k = int(np.round(results['k']))
print('k', k)
def find_clustering(x):
g_nx = nx.watts_strogatz_graph(results['n_nodes'], k, x)
nx.write_graphml(g_nx, '{0}/temp_graph.graphml'.format(target_folder))
g = graph_tool.Graph()
g.load('{0}/temp_graph.graphml'.format(target_folder))
g_prop = graph_tool.clustering.local_clustering(g)
g_coeff = g_prop.get_array().mean().tolist()
print(g_coeff - results['clust_coeff'])
return g_coeff - results['clust_coeff']
beta = brenth(lambda x: (nx.cluster.average_clustering(nx.watts_strogatz_graph(results['n_nodes'], k, x)) - results['clust_coeff']), 0.0, 1.0)
print('beta', beta)
results['beta'] = beta
estimate_beta(graph_data, results)
import tempfile
import random
def prepare_baseline(graph, base_data):
graph_data = {'graph': graph, 'id_map': base_data['id_map'],
'kept_frequencies': base_data['kept_frequencies']}
results = {}
build_ratios(graph_data, results)
centrality(graph_data, results)
append_graph_stats(graph_data, results)
return graph_data, results
def generate_small_world(source_graph_data, source_results):
sw_graph = nx.watts_strogatz_graph(source_results['n_nodes'], int(np.round(source_results['k'])),
source_results['beta'], seed=31072010)
handle, filename = tempfile.mkstemp(suffix='.graphml')
nx.write_graphml(sw_graph, filename)
g = graph_tool.Graph()
g.load(filename)
print(g.num_vertices(), g.num_edges())
freqs = source_graph_data['kept_frequencies']
shuffled = ['m'] * freqs['m'] + ['f'] * freqs['f']
random.shuffle(shuffled)
g.vertex_properties['gender'] = g.new_vertex_property('string')
for i, gender in enumerate(shuffled, start=0):
g.vertex_properties['gender'][i] = gender
sw_data, sw_results = prepare_baseline(g, source_graph_data)
#save_results(sw_data, sw_results, 'sw', 'Small World')
return sw_data, sw_results
sw, sw_results = generate_small_world(graph_data, results)
import graph_tool.generation
def random_graph(source_graph_data, source_results, model='uncorrelated', n_iter=1):
clone = source_graph_data['graph'].copy()
graph_tool.generation.random_rewire(clone, model=model, n_iter=n_iter)
base_data, base_results = prepare_baseline(clone, source_graph_data)
return base_data, base_results
def populate_rand(rand, rand_results, model='erdos', n_iter=1):
random_g_data, random_g_results = random_graph(pre, results['pre'], model=model, n_iter=n_iter)
rand['pre'] = random_g_data
rand_results.update(random_g_results)
rand, rand_results = random_graph(graph_data, results, model='erdos')
deg_seq, deg_seq_results = random_graph(graph_data, results, model='uncorrelated', n_iter=3)
def plot_distributions(loc='best'):
plot_fraction(graph_data, results, 'PageRank (OBS)')
plot_fraction(graph_data, results, 'In-Degree (OBS)', fraction='f_id_fraction')
plot_fraction(sw, sw_results, 'Small World')
plot_fraction(rand, rand_results, 'Random')
plot_fraction(deg_seq, deg_seq_results, 'Degree Sequence')
plt.legend(loc=loc, fontsize='x-small')
sns.set_palette('Set2')
plot_distributions()
plt.xlim([10, 1000000])
plt.ylim([0, 0.3])
plt.xscale('log')
#plt.xlabel('Sorted Top-k Biographies')
plt.ylabel('Fraction of Women')
plt.title('0 -- 1900')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we load person data to process only biographies present in our dataset.
Step2: At this point of time you can decide whether you want to process the entire biography network, or, for instance, consider only biographies born before/after 1900, and so on. You decide
Step3: Note that instead of indexing by URI as we do on the other notebooks, this time we indexed by a number because we use numbers to identify vertices in the graph.
Step4: We use the graphtool library to create our graph.
Step5: Not all nodes have links. In our analysis, we only considered nodes connected to at least one other node.
Step6: We can estimate how one gender is connected to itself and other genders.
Step7: Now we can estimate centrality measures. In particular, we consider PageRank.
Step8: We can plot the PageRank distribution with respect to how many women are present at each subset of the results.
Step9: We can also display the top biographies to find the most central persons according to gender.
Step10: Baseline Estimations
Step11: We want to estimate the $\beta$ parameter for the Small World graph we are going to generate. Note that we need to use networkx because graph_tool does not have a Small World generator.
Step12: We use graph_tool random module to perturb our network and build the baseline random networks.
Step13: And now we can plot the distributions of PageRank for each network. In this way, we can see how biased is the estimated centrality in comparison to unbiased networks.
|
12,113
|
<ASSISTANT_TASK:>
Python Code:
# adapted from http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html#example-neighbors-plot-classification-py
n_neighbors = 30
# step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00'])
clf = sklearn.neighbors.KNeighborsClassifier(n_neighbors, weights='uniform')
clf.fit(d, cl)
def plot_cls_with_decision_surface(d,cl,clf,h = .25 ):
Plot the decision boundary. For that, we will assign a color to each
point in the mesh [x_min, m_max]x[y_min, y_max].
h= step size in the grid
x_min, x_max = d[:, 0].min() - 1, d[:, 0].max() + 1
y_min, y_max = d[:, 1].min() - 1, d[:, 1].max() + 1
xx, yy = numpy.meshgrid(numpy.arange(x_min, x_max, h),
numpy.arange(y_min, y_max, h))
Z = clf.predict(numpy.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(d[:, 0], d[:, 1], c=cl, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plot_cls_with_decision_surface(d,cl,clf)
def classify(d,cl,clf,cv):
pred=numpy.zeros(n)
for train,test in cv:
clf.fit(d[train,:],cl[train])
pred[test]=clf.predict(d[test,:])
return sklearn.metrics.accuracy_score(cl,pred),sklearn.metrics.confusion_matrix(cl,pred)
clf=sklearn.neighbors.KNeighborsClassifier(n_neighbors, weights='uniform')
# use stratified k-fold crossvalidation, which keeps the proportion of classes roughly
# equal across folds
cv=sklearn.cross_validation.StratifiedKFold(cl, 8)
acc,confusion=classify(d,cl,clf,cv)
print acc
print confusion
accuracy_knn=numpy.zeros(30)
for i in range(1,31):
clf=sklearn.neighbors.KNeighborsClassifier(i, weights='uniform')
accuracy_knn[i-1],_=classify(d,cl,clf,cv)
plt.plot(range(1,31),accuracy_knn)
accuracy_knn=numpy.zeros((100,30))
for x in range(100):
ds_cl,ds_x=make_class_data(multiplier=[1.1,1.1],N=n)
ds_cv=sklearn.cross_validation.StratifiedKFold(ds_cl, 8)
for i in range(1,31):
clf=sklearn.neighbors.KNeighborsClassifier(i, weights='uniform')
accuracy_knn[x,i-1],_=classify(ds_x,ds_cl,clf,ds_cv)
plt.plot(range(1,31),numpy.mean(accuracy_knn,0))
plt.xlabel('number of nearest neighbors')
plt.ylabel('accuracy')
clf=sklearn.lda.LDA()
cv=sklearn.cross_validation.LeaveOneOut(n)
acc,confusion=classify(d,cl,clf,cv)
print acc
print confusion
plot_cls_with_decision_surface(d,cl,clf)
clf=sklearn.linear_model.LogisticRegression(C=0.5)
acc,confusion=classify(d,cl,clf,cv)
print acc
print confusion
plot_cls_with_decision_surface(d,cl,clf)
clf=sklearn.svm.SVC(kernel='linear')
acc,confusion=classify(d,cl,clf,cv)
print acc
print confusion
plot_cls_with_decision_surface(d,cl,clf)
clf=sklearn.svm.SVC(kernel='rbf')
acc,confusion=classify(d,cl,clf,cv)
print acc
print confusion
plot_cls_with_decision_surface(d,cl,clf)
gammavals=numpy.arange(0.0,0.2,0.01)
accuracy_rbf=numpy.zeros(len(gammavals))
for i in range(len(gammavals)):
clf=sklearn.svm.SVC(kernel='rbf',gamma=gammavals[i])
accuracy_rbf[i],_=classify(d,cl,clf,cv)
plt.plot(gammavals,accuracy_rbf)
plt.xlabel('gamma')
plt.ylabel('accuracy')
maxgamma=gammavals[numpy.where(accuracy_rbf==numpy.max(accuracy_rbf))]
if len(maxgamma)>1:
maxgamma=maxgamma[0]
print 'Best gamma:', maxgamma
clf=sklearn.svm.SVC(kernel='rbf',gamma=maxgamma)
acc,_=classify(d,cl,clf,cv)
print 'Accuracy:',acc
plot_cls_with_decision_surface(d,cl,clf)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's look at some classification methods.
Step2: Exercise
Step3: Exercise
Step4: Now write a loop that does this using 100 different randomly generated datasets, and plot the mean across datasets. This will take a couple of minutes to run.
Step5: Linear discriminant analysis
Step6: Logistic regression
Step7: Support vector machines
Step8: Exercise
Step9: Exercise
Step10: Plot the boundary for the classifier with the best performance
|
12,114
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def print_sum(a, b):
print(a + b)
# YOUR CODE HERE
interact(print_sum, a=(-10.0, 10.0, 0.1), b=(-8, 8, 2));
assert True # leave this for grading the print_sum exercise
def print_string(s, length=False):
print(s)
if length == True:
print(len(s))
# YOUR CODE HERE
interact(print_string, Length=True, s="");
assert True # leave this for grading the print_string exercise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interact basics
Step2: Use the interact function to interact with the print_sum function.
Step3: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
Step4: Use the interact function to interact with the print_string function.
|
12,115
|
<ASSISTANT_TASK:>
Python Code:
% matplotlib inline
import pylab
import numpy as np
import pandas as pd
from hmmlearn.hmm import GaussianHMM
df = pd.read_csv("speed.csv", sep = ",")
df.head(5)
x = df.drop(["row", "corr"], axis = 1)
x["prev"] = pd.factorize(x["prev"])[0]
model = GaussianHMM(n_components=2, n_iter=10000, random_state=1).fit(x)
model.monitor_
states = model.predict(x)
pd.Series(states).value_counts()
model.means_[:, 0]
fig = pylab.figure(figsize=(20, 1))
ax = fig.add_subplot(111)
ax.grid(True)
ax.set_xlabel("Record number")
ax.set_ylabel("State")
ax.plot(states)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import data
Step2: Drop the row number and "corr" so we can focus on the influence of "prev" and "Pacc" on "rt." Also define "prev" as a factor.
Step3: Create model
Step4: Predict the hidden state for each record and get count of predicted states.
Step5: Get the mean reaction time (rt) for each of the two states.
Step6: Visualize results
|
12,116
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import sys
sys.path.append(os.path.join('..', '..'))
from data_models.parameters import arl_path
results_dir = arl_path('test_results')
from matplotlib import pylab
pylab.rcParams['figure.figsize'] = (10.0, 10.0)
pylab.rcParams['image.cmap'] = 'rainbow'
from matplotlib import pyplot as plt
import numpy
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
from data_models.polarisation import PolarisationFrame
from wrappers.serial.image.iterators import image_raster_iter
from wrappers.serial.visibility.base import create_visibility
from wrappers.serial.visibility.operations import sum_visibility
from wrappers.serial.visibility.iterators import vis_timeslices, vis_wslices
from wrappers.serial.simulation.configurations import create_named_configuration
from wrappers.serial.skycomponent.operations import create_skycomponent, find_skycomponents, \
find_nearest_skycomponent, insert_skycomponent
from wrappers.serial.image.operations import show_image, export_image_to_fits, qa_image, smooth_image
from wrappers.serial.simulation.configurations import create_named_configuration
from wrappers.serial.imaging.base import advise_wide_field, create_image_from_visibility, \
predict_skycomponent_visibility
from wrappers.arlexecute.griddata.kernels import create_awterm_convolutionfunction
from wrappers.arlexecute.griddata.convolution_functions import apply_bounding_box_convolutionfunction
# Use workflows for imaging
from wrappers.arlexecute.execution_support.arlexecute import arlexecute
from workflows.shared.imaging.imaging_shared import imaging_contexts
from workflows.arlexecute.imaging.imaging_arlexecute import predict_list_arlexecute_workflow, \
invert_list_arlexecute_workflow
import logging
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
pylab.rcParams['figure.figsize'] = (12.0, 12.0)
pylab.rcParams['image.cmap'] = 'rainbow'
lowcore = create_named_configuration('LOWBD2-CORE')
arlexecute.set_client(use_dask=True)
times = numpy.array([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) * (numpy.pi / 12.0)
frequency = numpy.array([1e8])
channel_bandwidth = numpy.array([1e6])
reffrequency = numpy.max(frequency)
phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-45.0 * u.deg, frame='icrs', equinox='J2000')
vt = create_visibility(lowcore, times, frequency, channel_bandwidth=channel_bandwidth,
weight=1.0, phasecentre=phasecentre,
polarisation_frame=PolarisationFrame('stokesI'))
advice = advise_wide_field(vt, wprojection_planes=1)
vt.data['vis'] *= 0.0
npixel=256
model = create_image_from_visibility(vt, npixel=npixel, cellsize=0.001, nchan=1,
polarisation_frame=PolarisationFrame('stokesI'))
centre = model.wcs.wcs.crpix-1
spacing_pixels = npixel // 8
log.info('Spacing in pixels = %s' % spacing_pixels)
spacing = model.wcs.wcs.cdelt * spacing_pixels
locations = [-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5]
original_comps = []
# We calculate the source positions in pixels and then calculate the
# world coordinates to put in the skycomponent description
for iy in locations:
for ix in locations:
if ix >= iy:
p = int(round(centre[0] + ix * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[0]))), \
int(round(centre[1] + iy * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[1])))
sc = pixel_to_skycoord(p[0], p[1], model.wcs)
log.info("Component at (%f, %f) [0-rel] %s" % (p[0], p[1], str(sc)))
flux = numpy.array([[100.0 + 2.0 * ix + iy * 20.0]])
comp = create_skycomponent(flux=flux, frequency=frequency, direction=sc,
polarisation_frame=PolarisationFrame('stokesI'))
original_comps.append(comp)
insert_skycomponent(model, comp)
predict_skycomponent_visibility(vt, original_comps)
cmodel = smooth_image(model)
show_image(cmodel)
plt.title("Smoothed model image")
plt.show()
comps = find_skycomponents(cmodel, fwhm=1.0, threshold=10.0, npixels=5)
plt.clf()
for i in range(len(comps)):
ocomp, sep = find_nearest_skycomponent(comps[i].direction, original_comps)
plt.plot((comps[i].direction.ra.value - ocomp.direction.ra.value)/cmodel.wcs.wcs.cdelt[0],
(comps[i].direction.dec.value - ocomp.direction.dec.value)/cmodel.wcs.wcs.cdelt[1],
'.', color='r')
plt.xlabel('delta RA (pixels)')
plt.ylabel('delta DEC (pixels)')
plt.title("Recovered - Original position offsets")
plt.show()
wstep = 8.0
nw = int(1.1 * 800/wstep)
gcfcf = create_awterm_convolutionfunction(model, nw=110, wstep=8, oversampling=8,
support=60,
use_aaf=True)
cf=gcfcf[1]
print(cf.data.shape)
plt.clf()
plt.imshow(numpy.real(cf.data[0,0,0,0,0,:,:]))
plt.title(str(numpy.max(numpy.abs(cf.data[0,0,0,0,0,:,:]))))
plt.show()
cf_clipped = apply_bounding_box_convolutionfunction(cf, fractional_level=1e-3)
print(cf_clipped.data.shape)
gcfcf_clipped=(gcfcf[0], cf_clipped)
plt.clf()
plt.imshow(numpy.real(cf_clipped.data[0,0,0,0,0,:,:]))
plt.title(str(numpy.max(numpy.abs(cf_clipped.data[0,0,0,0,0,:,:]))))
plt.show()
contexts = imaging_contexts().keys()
print(contexts)
print(gcfcf_clipped[1])
contexts = ['2d', 'facets', 'timeslice', 'wstack', 'wprojection']
for context in contexts:
print('Processing context %s' % context)
vtpredict_list =[create_visibility(lowcore, times, frequency, channel_bandwidth=channel_bandwidth,
weight=1.0, phasecentre=phasecentre, polarisation_frame=PolarisationFrame('stokesI'))]
model_list = [model]
vtpredict_list = arlexecute.compute(vtpredict_list, sync=True)
vtpredict_list = arlexecute.scatter(vtpredict_list)
if context == 'wprojection':
future = predict_list_arlexecute_workflow(vtpredict_list, model_list, context='2d', gcfcf=[gcfcf_clipped])
elif context == 'facets':
future = predict_list_arlexecute_workflow(vtpredict_list, model_list, context=context, facets=8)
elif context == 'timeslice':
future = predict_list_arlexecute_workflow(vtpredict_list, model_list, context=context, vis_slices=vis_timeslices(
vtpredict, 'auto'))
elif context == 'wstack':
future = predict_list_arlexecute_workflow(vtpredict_list, model_list, context=context, vis_slices=31)
else:
future = predict_list_arlexecute_workflow(vtpredict_list, model_list, context=context)
vtpredict_list = arlexecute.compute(future, sync=True)
vtpredict = vtpredict_list[0]
uvdist = numpy.sqrt(vt.data['uvw'][:, 0] ** 2 + vt.data['uvw'][:, 1] ** 2)
plt.clf()
plt.plot(uvdist, numpy.abs(vt.data['vis'][:]), '.', color='r', label="DFT")
plt.plot(uvdist, numpy.abs(vtpredict.data['vis'][:]), '.', color='b', label=context)
plt.plot(uvdist, numpy.abs(vtpredict.data['vis'][:] - vt.data['vis'][:]), '.', color='g', label="Residual")
plt.xlabel('uvdist')
plt.ylabel('Amp Visibility')
plt.legend()
plt.show()
contexts = ['2d', 'facets', 'timeslice', 'wstack', 'wprojection']
for context in contexts:
targetimage_list = [create_image_from_visibility(vt, npixel=npixel, cellsize=0.001, nchan=1,
polarisation_frame=PolarisationFrame('stokesI'))]
vt_list = [vt]
print('Processing context %s' % context)
if context == 'wprojection':
future = invert_list_arlexecute_workflow(vt_list, targetimage_list, context='2d', gcfcf=[gcfcf_clipped])
elif context == 'facets':
future = invert_list_arlexecute_workflow(vt_list, targetimage_list, context=context, facets=8)
elif context == 'timeslice':
future = invert_list_arlexecute_workflow(vt_list, targetimage_list, context=context, vis_slices=vis_timeslices(vt, 'auto'))
elif context == 'wstack':
future = invert_list_arlexecute_workflow(vt_list, targetimage_list, context=context, vis_slices=31)
else:
future = invert_list_arlexecute_workflow(vt_list, targetimage_list, context=context)
result = arlexecute.compute(future, sync=True)
targetimage = result[0][0]
show_image(targetimage)
plt.title(context)
plt.show()
print("Dirty Image %s" % qa_image(targetimage, context="imaging-fits notebook, using processor %s" % context))
export_image_to_fits(targetimage, '%s/imaging-fits_dirty_%s.fits' % (results_dir, context))
comps = find_skycomponents(targetimage, fwhm=1.0, threshold=10.0, npixels=5)
plt.clf()
for comp in comps:
distance = comp.direction.separation(model.phasecentre)
dft_flux = sum_visibility(vt, comp.direction)[0]
err = (comp.flux[0, 0] - dft_flux) / dft_flux
plt.plot(distance, err, '.', color='r')
plt.ylabel('Fractional error of image vs DFT')
plt.xlabel('Distance from phasecentre (deg)')
plt.title(
"Fractional error in %s recovered flux vs distance from phasecentre" %
context)
plt.show()
checkpositions = True
if checkpositions:
plt.clf()
for i in range(len(comps)):
ocomp, sep = find_nearest_skycomponent(comps[i].direction, original_comps)
plt.plot(
(comps[i].direction.ra.value - ocomp.direction.ra.value) /
targetimage.wcs.wcs.cdelt[0],
(comps[i].direction.dec.value - ocomp.direction.dec.value) /
targetimage.wcs.wcs.cdelt[1],
'.',
color='r')
plt.xlabel('delta RA (pixels)')
plt.ylabel('delta DEC (pixels)')
plt.title("%s: Position offsets" % context)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Construct the SKA1-LOW core configuration
Step2: Use Dask
Step3: We create the visibility. This just makes the uvw, time, antenna1, antenna2, weight columns in a table
Step4: Fill in the visibility with exact calculation of a number of point sources
Step5: Check that the skycoordinate and image coordinate system are consistent by finding the point sources.
Step6: Make the convolution function
Step7: Predict the visibility using the different approaches.
Step8: Make the image using the different approaches. We will evaluate the results using a number of plots
|
12,117
|
<ASSISTANT_TASK:>
Python Code:
import warnings
import matplotlib.pyplot as plt
import pandas
import seaborn
import pycvc
warnings.simplefilter("ignore")
palette = seaborn.color_palette('deep', n_colors=6)
seaborn.set(style='ticks', context='paper', rc={'text.usetex': False})
%matplotlib inline
hydro = pandas.read_csv('output/tidy/hydro_simple.csv')
fg = pycvc.viz.hydro_histogram(hydro, hue='site', row='has_outflow', save=False)
pycvc.viz.savefig(fg.fig, 'Hist_Site_Outflow', extra='HydroHistogram')
fg = pycvc.viz.hydro_histogram(hydro, row='site', col='has_outflow', hue='season',
hue_order=['winter', 'spring', 'summer', 'autumn'],
margin_titles=True, palette='BrBG_r', save=False)
pycvc.viz.savefig(fg.fig, 'Hist_Site_Outflow_Season', extra='HydroHistogram')
fg = pycvc.viz.hydro_histogram(hydro, col='site', hue='year', col_wrap=2, save=False)
pycvc.viz.savefig(fg.fig, 'Hist_Site_Year', extra='HydroHistogram')
fg = pycvc.viz.hydro_histogram(hydro.query("site == 'ED-1'"), palette='Blues', save=False)
pycvc.viz.savefig(fg.fig, 'Hist_ED1only', extra='HydroHistogram')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Tidy Hydrologic Data
Step2: Split by site name (color) and presence of outflow (row)
Step3: Split by site (row), presence of outflow (column), and season (hue)
Step4: Split by years (hue) and outflow (columns, wrapped at 2 wide)
Step5: Just look at ED-1
|
12,118
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
%matplotlib inline
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
Download a file if not present, and make sure it's the right size.
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
display(Image(filename="notMNIST_small/A/Q0NXaWxkV29yZHMtQm9sZEl0YWxpYy50dGY=.png"))
display(Image(filename="notMNIST_small/B/Q2FsaWd1bGEgUmVndWxhci50dGY=.png"))
display(Image(filename="notMNIST_small/C/QmVlc2tuZWVzQy5vdGY=.png"))
display(Image(filename="notMNIST_large/E/a2VhZ2FuLnR0Zg==.png"))
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
Load the data for a single letter label.
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
pickle_file = train_datasets[7] # index 0 should be all As, 1 = all Bs, etc.
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f) # unpickle
sample_idx = np.random.randint(len(letter_set)) # pick a random image index
sample_image = letter_set[sample_idx, :, :] # extract a 2D slice
plt.figure()
plt.imshow(sample_image) # display it
pickle_file = train_datasets[9] # index 0 should be all As, 1 = all Bs, etc.
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f) # unpickle
sample_idx = np.random.randint(len(letter_set)) # pick a random image index
sample_image = letter_set[sample_idx, :, :] # extract a 2D slice
plt.figure()
plt.imshow(sample_image) # display it
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
plt.hist(train_labels);
plt.title('Number of images for each class in training set');
axes = plt.gca();
axes.set_ylim([0,22000]);
plt.hist(test_labels);
plt.title('Number of images for each class in test set');
axes = plt.gca();
axes.set_ylim([0,1100]);
pickle_file = 'notMNIST.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
flatten_train_dataset = train_dataset.reshape((train_size, image_size*image_size))
clf = LogisticRegression(multi_class='multinomial', solver='lbfgs', random_state=42, max_iter=1000)
clf.fit(flatten_train_dataset, train_labels)
from sklearn import metrics
pred = clf.predict(test_dataset.reshape(test_size, image_size*image_size))
metrics.confusion_matrix(test_labels, pred)
metrics.accuracy_score(test_labels, pred)
from sklearn.neural_network import MLPClassifier
clf1 = MLPClassifier(activation = 'relu', solver = 'sgd', random_state = 444, early_stopping = True,
learning_rate_init = .01)
clf1.fit(flatten_train_dataset, train_labels)
pred1 = clf1.predict(test_dataset.reshape(test_size, image_size*image_size))
metrics.confusion_matrix(test_labels, pred1)
metrics.accuracy_score(test_labels, pred1)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
import tensorflow as tf
num_nodes= 1024
batch_size = 128
beta = .0005
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights_1 = tf.Variable(
tf.truncated_normal([image_size * image_size, num_nodes]))
biases_1 = tf.Variable(tf.zeros([num_nodes]))
weights_2 = tf.Variable(
tf.truncated_normal([num_nodes, num_labels]))
biases_2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
drop_layer=tf.nn.dropout(tf.nn.relu(tf.matmul(tf_train_dataset, weights_1) + biases_1),0.5)
logits = tf.matmul(drop_layer, weights_2) + biases_2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels))
loss += beta * (tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2))
# Optimizer.
global_step = tf.Variable(0) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(0.5, global_step, 1250, .98)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weights_1) + biases_1), weights_2) + biases_2)
test_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weights_1) + biases_1), weights_2) + biases_2)
num_steps = 15000
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 1000 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
batch_size = 128
patch_size = 5
depth = 16
num_hidden = 100
beta = .0005
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1))
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 1, 1, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer1_biases)
pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer2_biases)
pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
shape = pool.get_shape().as_list()
reshape = tf.reshape(pool, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels))
loss += beta * (tf.nn.l2_loss(layer1_weights) + tf.nn.l2_loss(layer1_biases)
+ tf.nn.l2_loss(layer2_weights) + tf.nn.l2_loss(layer2_biases)
+ tf.nn.l2_loss(layer3_weights) + tf.nn.l2_loss(layer3_biases)
+ tf.nn.l2_loss(layer4_weights) + tf.nn.l2_loss(layer4_biases))
# Optimizer.
global_step = tf.Variable(0) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(0.01, global_step * batch_size, train_labels.shape[0], .95)
optimizer = tf.train.MomentumOptimizer(learning_rate,.95).minimize(loss,global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 100 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Deep Learning
Step3: Problem 1
Step5: Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
Step6: Problem 2
Step7: Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9.
Step8: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
Step9: Another check
Step10: Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it.
Step11: The logistic regression classifier resulted in an accuracy score of ~90% which means there is definitely something to be learned from this data. Still I think we can do better so I will move on to more complex classifiers and see if I can improve the results.
Step12: The MLPClassifier resulted in an accuracy of 94.77%. This is a signficant improvement over the logistic regression classifier.
Step13: The deep neural network did not have quite as high of an accuracy score as the sklearn MLP Classifier (94.2% vs 94.8%)
|
12,119
|
<ASSISTANT_TASK:>
Python Code:
!(date +%d\ %B\ %G)
%matplotlib inline
import numpy as np
import seaborn as sns
import time
from pyspark import SparkContext
from pyspark import SparkConf
from matplotlib import pyplot as plt
from pyspark.ml.feature import StandardScaler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import OneHotEncoder
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml.feature import StringIndexer
from distkeras.transformers import *
from distkeras.utils import *
# Modify these variables according to your needs.
application_name = "MNIST Preprocessing"
using_spark_2 = False
local = False
path_train = "data/mnist_train.csv"
path_test = "data/mnist_test.csv"
if local:
# Tell master to use local resources.
master = "local[*]"
num_processes = 3
num_executors = 1
else:
# Tell master to use YARN.
master = "yarn-client"
num_executors = 20
num_processes = 1
# This variable is derived from the number of cores and executors, and will be used to assign the number of model trainers.
num_workers = num_executors * num_processes
print("Number of desired executors: " + `num_executors`)
print("Number of desired processes / executor: " + `num_processes`)
print("Total number of workers: " + `num_workers`)
import os
# Use the DataBricks CSV reader, this has some nice functionality regarding invalid values.
os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages com.databricks:spark-csv_2.10:1.4.0 pyspark-shell'
conf = SparkConf()
conf.set("spark.app.name", application_name)
conf.set("spark.master", master)
conf.set("spark.executor.cores", `num_processes`)
conf.set("spark.executor.instances", `num_executors`)
conf.set("spark.executor.memory", "20g")
conf.set("spark.yarn.executor.memoryOverhead", "2")
conf.set("spark.locality.wait", "0")
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
# Check if the user is running Spark 2.0 +
if using_spark_2:
sc = SparkSession.builder.config(conf=conf) \
.appName(application_name) \
.getOrCreate()
else:
# Create the Spark context.
sc = SparkContext(conf=conf)
# Add the missing imports
from pyspark import SQLContext
sqlContext = SQLContext(sc)
# Record time of starting point.
time_start = time.time()
# Check if we are using Spark 2.0
if using_spark_2:
reader = sc
else:
reader = sqlContext
# Read the training set.
raw_dataset_train = reader.read.format('com.databricks.spark.csv') \
.options(header='true', inferSchema='true') \
.load(path_train)
# Read the test set.
raw_dataset_test = reader.read.format('com.databricks.spark.csv') \
.options(header='true', inferSchema='true') \
.load(path_test)
# Repartition the datasets.
raw_dataset_train = raw_dataset_train.repartition(num_workers)
raw_dataset_test = raw_dataset_test.repartition(num_workers)
# First, we would like to extract the desired features from the raw dataset.
# We do this by constructing a list with all desired columns.
features = raw_dataset_train.columns
features.remove('label')
# Next, we use Spark's VectorAssembler to "assemble" (create) a vector of all desired features.
# http://spark.apache.org/docs/latest/ml-features.html#vectorassembler
vector_assembler = VectorAssembler(inputCols=features, outputCol="features")
# This transformer will take all columns specified in features, and create an additional column "features" which will contain all the desired features aggregated into a single vector.
training_set = vector_assembler.transform(raw_dataset_train)
test_set = vector_assembler.transform(raw_dataset_test)
# Define the number of output classes.
nb_classes = 10
encoder = OneHotTransformer(nb_classes, input_col="label", output_col="label_encoded")
training_set = encoder.transform(training_set)
test_set = encoder.transform(test_set)
# Clear the datasets in the case you ran this cell before.
training_set = training_set.select("features", "label", "label_encoded")
test_set = test_set.select("features", "label", "label_encoded")
# Allocate a MinMaxTransformer using Distributed Keras.
# o_min -> original_minimum
# n_min -> new_minimum
transformer = MinMaxTransformer(n_min=0.0, n_max=1.0, \
o_min=0.0, o_max=250.0, \
input_col="features", \
output_col="features_normalized")
# Transform the datasets.
training_set = transformer.transform(training_set)
test_set = transformer.transform(test_set)
reshape_transformer = ReshapeTransformer("features_normalized", "matrix", (28, 28, 1))
training_set = reshape_transformer.transform(training_set)
test_set = reshape_transformer.transform(test_set)
dense_transformer = DenseTransformer(input_col="features_normalized", output_col="features_normalized_dense")
training_set = dense_transformer.transform(training_set)
test_set = dense_transformer.transform(test_set)
df = training_set
expansion = 10
for i in range(0, expansion):
df = df.unionAll(training_set)
training_set = df
training_set.cache()
training_set.write.parquet("data/mnist_train.parquet")
test_set.write.parquet("data/mnist_test.parquet")
# Record end of transformation.
time_end = time.time()
dt = time_end - time_start
print("Took " + str(dt) + " seconds.")
!hdfs dfs -rm -r data/mnist_test.parquet
!hdfs dfs -rm -r data/mnist_train.parquet
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparation
Step2: In the following cell, adapt the parameters to fit your personal requirements.
Step3: As shown in the output of the cell above, we see that every pixel is associated with a seperate column. In order to ensure compatibility with Apache Spark, we vectorize the columns, and add the resulting vectors as a seperate column. However, in order to achieve this, we first need a list of the required columns. This is shown in the cell below.
Step4: Once we have a list of columns names, we can pass this to Spark's VectorAssembler. This VectorAssembler will take a list of features, vectorize them, and place them in a column defined in outputCol.
Step5: Once we have the inputs for our Neural Network (features column) after applying the VectorAssembler, we should also define the outputs. Since we are dealing with a classification task, the output of our Neural Network should be a one-hot encoded vector with 10 elements. For this, we provide a OneHotTransformer which accomplish this exact task.
Step6: MNIST
Step7: Convolutions
Step8: Dense Transformation
Step9: Artificial Enlargement
Step10: Writing to HDFS
|
12,120
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
from collections import Counter
total_counts = Counter(word for review in reviews.values for word in review[0].split(' '))
print("Total words in data set: ", len(total_counts))
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
print(vocab[-1], ': ', total_counts[vocab[-1]])
word2idx = {word: idx for idx,word in enumerate(vocab)}
def text_to_vector(text):
pass
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)
trainY
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
model = tflearn.DNN(net)
return model
model = build_model()
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=10)
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing the data
Step2: Counting word frequency
Step3: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.
Step4: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
Step5: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
Step6: Text to vector function
Step7: If you do this right, the following code should return
Step8: Now, run through our entire review data set and convert each review to a word vector.
Step9: Train, Validation, Test sets
Step10: Building the network
Step11: Intializing the model
Step12: Training the network
Step13: Testing
Step14: Try out your own text!
|
12,121
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import pandas as pd
import tensorflow as tf
csv_file = tf.keras.utils.get_file('heart.csv', 'https://storage.googleapis.com/applied-dl/heart.csv')
df = pd.read_csv(csv_file)
df.head()
df.dtypes
df['thal'] = pd.Categorical(df['thal'])
df['thal'] = df.thal.cat.codes
df.head()
target = df.pop('target')
dataset = tf.data.Dataset.from_tensor_slices((df.values, target.values))
for feat, targ in dataset.take(5):
print ('Features: {}, Target: {}'.format(feat, targ))
tf.constant(df['thal'])
train_dataset = dataset.shuffle(len(df)).batch(1)
def get_compiled_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
model = get_compiled_model()
model.fit(train_dataset, epochs=15)
inputs = {key: tf.keras.layers.Input(shape=(), name=key) for key in df.keys()}
x = tf.stack(list(inputs.values()), axis=-1)
x = tf.keras.layers.Dense(10, activation='relu')(x)
output = tf.keras.layers.Dense(1)(x)
model_func = tf.keras.Model(inputs=inputs, outputs=output)
model_func.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
dict_slices = tf.data.Dataset.from_tensor_slices((df.to_dict('list'), target.values)).batch(16)
for dict_slice in dict_slices.take(1):
print (dict_slice)
model_func.fit(dict_slices, epochs=15)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Carregar um pandas.DataFrame
Step2: Fazer download do arquivo csv que contém o conjunto de dados do coração.
Step3: Ler o arquivo csv usando pandas.
Step4: Converta a coluna thal, que é um objeto no dataframe para um valor numérico discreto
Step5: Carregar dados usando o tf.data.Dataset
Step6: Como um pd.Series implementa o protocolo __array__, ele pode ser usado de forma transparente em praticamente qualquer lugar que você usaria um np.array ou um tf.Tensor.
Step7: Aleatório e lote do conjunto de dados.
Step8: Crirar e treinar um modelo
Step9: Alternativa para colunas de características
Step10: A maneira mais fácil de preservar a estrutura da coluna de um pd.DataFrame quando usado com tf.data é converter o pd.DataFrame em um dict e dividir esse dicionário.
|
12,122
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
std_iso_05 = np.genfromtxt('files/dmestar_00005.0myr_z+0.00_a+0.00_gas07_t010.iso')
std_iso_12 = np.genfromtxt('files/dmestar_00012.0myr_z+0.00_a+0.00_gas07_t010.iso')
std_iso_30 = np.genfromtxt('files/dmestar_00030.0myr_z+0.00_a+0.00_gas07_t010.iso')
mag_iso_05 = np.genfromtxt('files/dmestar_00005.0myr_gas07_z+0.00_a+0.00_mag25kG.iso')
mag_iso_12 = np.genfromtxt('files/dmestar_00012.0myr_gas07_z+0.00_a+0.00_mag25kG.iso')
mag_iso_30 = np.genfromtxt('files/dmestar_00030.0myr_gas07_z+0.00_a+0.00_mag25kG.iso')
fig, ax = plt.subplots(3, 1, figsize=(8, 12), sharex=True)
ax[2].set_xlabel('Mass ($M_{\\odot}$)', fontsize=20.)
ax[0].set_ylabel('Radius ($R_{\\odot}$)', fontsize=20.)
ax[1].set_ylabel('Temperature (K)', fontsize=20.)
ax[2].set_ylabel('Luminosity ($L_{\\odot}$)', fontsize=20.)
for axis in ax:
axis.tick_params(which='major', axis='both', length=10., labelsize=16.)
# Standard model Mass-Radius
ax[0].plot(std_iso_05[:, 0], 10**std_iso_05[:, 4], '-', lw=2, color='#555555')
ax[0].plot(std_iso_12[:, 0], 10**std_iso_12[:, 4], '-', lw=2, color='#1e90ff')
ax[0].plot(std_iso_30[:, 0], 10**std_iso_30[:, 4], '-', lw=2, color='#800000')
# Magnetic model Mass-Radius
ax[0].plot(mag_iso_05[:, 0], 10**mag_iso_05[:, 4], '--', lw=2, color='#555555', dashes=(20.,10.))
ax[0].plot(mag_iso_12[:, 0], 10**mag_iso_12[:, 4], '--', lw=2, color='#1e90ff', dashes=(20.,10.))
ax[0].plot(mag_iso_30[:, 0], 10**mag_iso_30[:, 4], '--', lw=2, color='#800000', dashes=(20.,10.))
# Standard model Mass-Teff
ax[1].plot(std_iso_05[:, 0], 10**std_iso_05[:, 1], '-', lw=2, color='#555555')
ax[1].plot(std_iso_12[:, 0], 10**std_iso_12[:, 1], '-', lw=2, color='#1e90ff')
ax[1].plot(std_iso_30[:, 0], 10**std_iso_30[:, 1], '-', lw=2, color='#800000')
# Magnetic model Mass-Teff
ax[1].plot(mag_iso_05[:, 0], 10**mag_iso_05[:, 1], '--', lw=2, color='#555555', dashes=(20.,10.))
ax[1].plot(mag_iso_12[:, 0], 10**mag_iso_12[:, 1], '--', lw=2, color='#1e90ff', dashes=(20.,10.))
ax[1].plot(mag_iso_30[:, 0], 10**mag_iso_30[:, 1], '--', lw=2, color='#800000', dashes=(20.,10.))
# Standard model Mass-Luminosity
ax[2].plot(std_iso_05[:, 0], 10**std_iso_05[:, 3], '-', lw=2, color='#555555')
ax[2].plot(std_iso_12[:, 0], 10**std_iso_12[:, 3], '-', lw=2, color='#1e90ff')
ax[2].plot(std_iso_30[:, 0], 10**std_iso_30[:, 3], '-', lw=2, color='#800000')
# Magnetic model Mass-Luminosity
ax[2].plot(mag_iso_05[:, 0], 10**mag_iso_05[:, 3], '--', lw=2, color='#555555', dashes=(20.,10.))
ax[2].plot(mag_iso_12[:, 0], 10**mag_iso_12[:, 3], '--', lw=2, color='#1e90ff', dashes=(20.,10.))
ax[2].plot(mag_iso_30[:, 0], 10**mag_iso_30[:, 3], '--', lw=2, color='#800000', dashes=(20.,10.))
fig.tight_layout()
fig, ax = plt.subplots(1, 1, figsize=(8.0, 8.0))
ax.set_xlabel('Effective Temperature (K)', fontsize=20.)
ax.set_ylabel('$\\log_{10} (L / L_{\\odot})$', fontsize=20.)
ax.set_xlim(5000., 2500.)
ax.tick_params(which='major', axis='both', length=10., labelsize=16.)
# Standard models
ax.plot(10**std_iso_05[:, 1], std_iso_05[:, 3], '-', lw=2, color='#555555')
ax.plot(10**std_iso_12[:, 1], std_iso_12[:, 3], '-', lw=2, color='#1e90ff')
ax.plot(10**std_iso_30[:, 1], std_iso_30[:, 3], '-', lw=2, color='#800000')
# Magnetic models
ax.plot(10**mag_iso_05[:, 1], mag_iso_05[:, 3], '--', lw=2, color='#555555', dashes=(20.,10.))
ax.plot(10**mag_iso_12[:, 1], mag_iso_12[:, 3], '--', lw=2, color='#1e90ff', dashes=(20.,10.))
ax.plot(10**mag_iso_30[:, 1], mag_iso_30[:, 3], '--', lw=2, color='#800000', dashes=(20.,10.))
fig, ax = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
ax[0].set_xlabel('Mass ($M_{\\odot}$)', fontsize=20.)
ax[1].set_xlabel('Temperature (K)', fontsize=20.)
ax[2].set_xlabel('$\\log_{10}(L/L_{\\odot})$', fontsize=20.)
ax[0].set_ylabel('A(Li)', fontsize=20.)
for axis in ax:
axis.set_ylim(1.5, 3.5)
axis.tick_params(which='major', axis='both', length=10., labelsize=16.)
# x-axis limits
ax[0].set_xlim(1.0, 0.1)
ax[1].set_xlim(4500., 2500.)
ax[2].set_xlim(0.0, -2.5)
# Standard model Mass-A(Li)
ax[0].plot(std_iso_05[:, 0], std_iso_05[:, 5], '-', lw=2, color='#555555')
ax[0].plot(std_iso_12[:, 0], std_iso_12[:, 5], '-', lw=2, color='#1e90ff')
ax[0].plot(std_iso_30[:, 0], std_iso_30[:, 5], '-', lw=2, color='#800000')
# Magnetic model Mass-A(Li)
ax[0].plot(mag_iso_05[:, 0], mag_iso_05[:, 5], '--', lw=2, color='#555555', dashes=(20.,10.))
ax[0].plot(mag_iso_12[:, 0], mag_iso_12[:, 5], '--', lw=2, color='#1e90ff', dashes=(20.,10.))
ax[0].plot(mag_iso_30[:, 0], mag_iso_30[:, 5], '--', lw=2, color='#800000', dashes=(20.,10.))
# Standard model Teff-A(Li)
ax[1].plot(10**std_iso_05[:, 1], std_iso_05[:, 5], '-', lw=2, color='#555555')
ax[1].plot(10**std_iso_12[:, 1], std_iso_12[:, 5], '-', lw=2, color='#1e90ff')
ax[1].plot(10**std_iso_30[:, 1], std_iso_30[:, 5], '-', lw=2, color='#800000')
# Magnetic model Teff-A(Li)
ax[1].plot(10**mag_iso_05[:, 1], mag_iso_05[:, 5], '--', lw=2, color='#555555', dashes=(20.,10.))
ax[1].plot(10**mag_iso_12[:, 1], mag_iso_12[:, 5], '--', lw=2, color='#1e90ff', dashes=(20.,10.))
ax[1].plot(10**mag_iso_30[:, 1], mag_iso_30[:, 5], '--', lw=2, color='#800000', dashes=(20.,10.))
# Standard model Luminosity-A(Li)
ax[2].plot(std_iso_05[:, 3], std_iso_05[:, 5], '-', lw=2, color='#555555')
ax[2].plot(std_iso_12[:, 3], std_iso_12[:, 5], '-', lw=2, color='#1e90ff')
ax[2].plot(std_iso_30[:, 3], std_iso_30[:, 5], '-', lw=2, color='#800000')
# Magnetic model Luminosity-A(Li)
ax[2].plot(mag_iso_05[:, 3], mag_iso_05[:, 5], '--', lw=2, color='#555555', dashes=(20.,10.))
ax[2].plot(mag_iso_12[:, 3], mag_iso_12[:, 5], '--', lw=2, color='#1e90ff', dashes=(20.,10.))
ax[2].plot(mag_iso_30[:, 3], mag_iso_30[:, 5], '--', lw=2, color='#800000', dashes=(20.,10.))
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Magnetic isochrones were computed earlier. Details can be found in this notebook entry on a small magnetic stellar grid. I'll focus on those computed with the Grevesse, Asplund, & Sauval (2007; henceforth GAS07) solar abundance distribution. Three ages will be examined
Step2: Examining, first, the mass-radius, mass-Teff, and mass-luminosity relationships as a function of age.
Step3: Note that, in the figure above, standard stellar evolution models are shown as solid lines and magnetic stellar evolution models as dashed lines. Ages are indicated by color
Step4: Line styles and colors represent the same model combinations, as before.
|
12,123
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
# defining the training data
xt = np.array([0.0, 1.0, 2.0, 2.5, 4.0])
yt = np.array([0.0, 1.0, 1.5, 1.1, 1.0])
# defining the models
sm_noise_free = KRG() # noise-free Kriging model
sm_noise_fixed = KRG(noise0=[1e-6]) # noisy Kriging model with fixed variance
sm_noise_estim = KRG(noise0=[1e-6], eval_noise=True) # noisy Kriging model with estimated variance
# training the models
sm_noise_free.set_training_values(xt, yt)
sm_noise_free.train()
sm_noise_fixed.set_training_values(xt, yt)
sm_noise_fixed.train()
sm_noise_estim.set_training_values(xt, yt)
sm_noise_estim.train()
# predictions
x = np.linspace(0, 4, 100).reshape(-1, 1)
y_noise_free = sm_noise_free.predict_values(x) # predictive mean
var_noise_free = sm_noise_free.predict_variances(x) # predictive variance
y_noise_fixed = sm_noise_fixed.predict_values(x) # predictive mean
var_noise_fixed = sm_noise_fixed.predict_variances(x) # predictive variance
y_noise_estim = sm_noise_estim.predict_values(x) # predictive mean
var_noise_estim = sm_noise_estim.predict_variances(x) # predictive variance
# plotting predictions +- 3 std confidence intervals
plt.rcParams['figure.figsize'] = [17, 4]
fig, axes = plt.subplots(1, 3)
axes[0].fill_between(np.ravel(x),
np.ravel(y_noise_free-3*np.sqrt(var_noise_free)),
np.ravel(y_noise_free+3*np.sqrt(var_noise_free)),
alpha=0.2, label='3-sd confidence intervals')
axes[0].scatter(xt, yt, label="training data")
axes[0].plot(x, y_noise_free, label='mean')
axes[0].set_title('noise-free Kriging model')
axes[0].legend(loc=0)
axes[0].set_xlabel(r'$x$')
axes[0].set_ylabel(r'$y$')
axes[1].fill_between(np.ravel(x),
np.ravel(y_noise_fixed-3*np.sqrt(var_noise_fixed)),
np.ravel(y_noise_fixed+3*np.sqrt(var_noise_fixed)),
alpha=0.2, label='3-sd confidence intervals')
axes[1].scatter(xt, yt, label="training data")
axes[1].plot(x, y_noise_fixed, label='mean')
axes[1].set_title('Kriging model with fixed noise')
axes[1].set_xlabel(r'$x$')
axes[1].set_ylabel(r'$y$')
axes[2].fill_between(np.ravel(x),
np.ravel(y_noise_estim-3*np.sqrt(var_noise_estim)),
np.ravel(y_noise_estim+3*np.sqrt(var_noise_estim)),
alpha=0.2, label='3-sd confidence intervals')
axes[2].scatter(xt, yt, label="training data")
axes[2].plot(x, y_noise_estim, label='mean')
axes[2].set_title('Kriging model with estimated noise')
axes[2].set_xlabel(r'$x$')
axes[2].set_ylabel(r'$y$')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
# defining the toy example
def target_fun(x):
return np.cos(5*x)
nobs = 50 # number of obsertvations
np.random.seed(0) # a seed for reproducibility
xt = np.random.uniform(size=nobs) # design points
y_free_noise = target_fun(xt) # noise-free observations
# adding a random noise to observations
yt = target_fun(xt) + np.random.normal(scale=0.05, size=nobs)
# training the model
sm = KRG(eval_noise=True)
sm.set_training_values(xt, yt)
sm.train()
# predictions
x = np.linspace(0, 1, 100).reshape(-1, 1)
y = sm.predict_values(x) # predictive mean
var = sm.predict_variances(x) # predictive variance
# plotting predictions +- 3 std confidence intervals
plt.rcParams['figure.figsize'] = [8, 4]
plt.fill_between(np.ravel(x),
np.ravel(y-3*np.sqrt(var)),
np.ravel(y+3*np.sqrt(var)),
alpha=0.2, label='3-sd confidence intervals')
plt.scatter(xt, yt, label="training noisy data")
plt.plot(x, y, label='mean')
plt.plot(x, target_fun(x), label='target function')
plt.title('Kriging model with noise observations')
plt.legend(loc=0)
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
# defining the training data
xt = np.array([0.0, 1.0, 2.0, 2.5, 4.0])
yt = np.array([0.0, 1.0, 1.5, 1.1, 1.0])
# defining the noise variance per observed data
noise0 = [0.05, 0.001, 0.01, 0.03, 0.05]
# the noise0 must be of the same length than yt. If its length is equal
# to one, the same noise variance is considered everywhere (homoscedastic case)
sm = KRG(noise0=noise0, use_het_noise=True)
sm.set_training_values(xt, yt)
sm.train()
x = np.linspace(0, 4, 100).reshape(-1, 1)
y = sm.predict_values(x)
var = sm.predict_variances(x)
# plotting the resulting Kriging model
plt.fill_between(np.ravel(x), np.ravel(y-3*np.sqrt(var)),
np.ravel(y+3*np.sqrt(var)), alpha=0.2,
label='3-sd confidence intervals')
plt.scatter(xt, yt, label="training data")
plt.plot(x, y, label='mean')
plt.title('heteroscedastic Kriging model with given noise variances')
plt.legend(loc=0)
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
# defining the training data
xt = np.array([0.0, 1.0, 2.0, 2.5, 4.0])
yt = np.array([0.0, 1.0, 1.5, 1.1, 1.0])
# adding noisy repetitions
xt_full = xt.copy()
yt_full = yt.copy()
for i in range(4):
xt_full = np.concatenate((xt_full, xt))
np.random.seed(i)
yt_full = np.concatenate((yt_full,
yt + np.std(yt)*np.random.uniform(size=yt.shape)))
# training the model
sm = KRG(use_het_noise=True, eval_noise=True)
sm.set_training_values(xt_full, yt_full)
sm.train()
# predictions
x = np.linspace(0, 4, 100).reshape(-1, 1)
y = sm.predict_values(x)
var = sm.predict_variances(x)
# plotting the resulting Kriging model
plt.fill_between(np.ravel(x), np.ravel(y-3*np.sqrt(var)),
np.ravel(y+3*np.sqrt(var)), alpha=0.2,
label='3-sd confidence intervals')
plt.scatter(xt_full, yt_full, label="training data with repetitions")
plt.plot(x, y, label='mean')
plt.title('heteroscedastic Kriging model with repetitions')
plt.legend(loc=0)
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 2.2
Step2: 3. Heteroscedastic Kriging example
Step3: Example 3.2
|
12,124
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
import numpy as np
iris = load_iris()
X = iris.data.astype(np.float32)
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=37
)
best_acc = 0
best_k = 0
import cv2
from sklearn.metrics import accuracy_score
for k in range(1, 20):
knn = cv2.ml.KNearest_create()
knn.setDefaultK(k)
knn.train(X_train, cv2.ml.ROW_SAMPLE, y_train)
_, y_test_hat = knn.predict(X_test)
acc = accuracy_score(y_test, y_test_hat)
if acc > best_acc:
best_acc = acc
best_k = k
best_acc, best_k
X_trainval, X_test, y_trainval, y_test = train_test_split(
X, y, random_state=37
)
X_trainval.shape
X_train, X_valid, y_train, y_valid = train_test_split(
X_trainval, y_trainval, random_state=37
)
X_train.shape
best_acc = 0.0
best_k = 0
for k in range(1, 20):
knn = cv2.ml.KNearest_create()
knn.setDefaultK(k)
knn.train(X_train, cv2.ml.ROW_SAMPLE, y_train)
_, y_valid_hat = knn.predict(X_valid)
acc = accuracy_score(y_valid, y_valid_hat)
if acc >= best_acc:
best_acc = acc
best_k = k
best_acc, best_k
knn = cv2.ml.KNearest_create()
knn.setDefaultK(best_k)
knn.train(X_trainval, cv2.ml.ROW_SAMPLE, y_trainval)
_, y_test_hat = knn.predict(X_test)
accuracy_score(y_test, y_test_hat), best_k
param_grid = {'n_neighbors': range(1, 20)}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid, cv=5)
grid_search.fit(X_trainval, y_trainval)
grid_search.best_score_, grid_search.best_params_
grid_search.score(X_test, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then the goal is to loop over all possible values of $k$. As we do this, we want to keep track of
Step2: Grid search then looks like an outer loop around the entire train and test procedure. After calculating the accuracy on the test set (acc), we compare it to the best accuracy found
Step3: When we are done, we can have a look at the best accuracy
Step4: Turns out, we can get 97.4% accuracy using $k=1$.
Step5: Second, split X_trainval again into proper training and validation sets
Step6: Then we repeat the manual grid search from the preceding code, but this time, we will use
Step7: We now find that a 100% validation score (best_acc) can be achieved with $k=7$ (best_k)!
Step8: With this procedure, we find a formidable score of 94.7% accuracy on the test set. Because
Step9: Here, we are searching for the best $k$ in the range [1, 19].
Step10: Then we can train the classifier using the fit method. In return, scikit-learn will inform us
Step11: This will allow us to find the best validation score and the corresponding value for $k$
Step12: We thus get a validation score of 96.4% for $k=3$. Since grid search with cross-validation is
|
12,125
|
<ASSISTANT_TASK:>
Python Code:
# iPython notebook magic commands
%load_ext autoreload
%autoreload 2
%matplotlib inline
#General modules
import os
from os.path import join, basename, isdir
from os import makedirs
import pandas as pd
import matplotlib.pyplot as plt
import time
import pickle
# Supervised Modules
from pyneurovault import api
import nibabel as nb
import numpy as np
from nltools.analysis import Predict, apply_mask, Roc
# Unsupervised Modules
from neurosynth import Dataset, Clusterer, Masker, Decoder
# from neurosynth.analysis.cluster import cluster_similarity
from nilearn import plotting, datasets
from sklearn.decomposition import RandomizedPCA
# Define output folder
out_folder = "/Users/lukechang/Downloads/nv_tmp"
tic = time.time() #Start Timer
# Pain Collection
collection = 504
# Will extract all collections and images in one query to work from
nv = api.NeuroVault()
# Download all images to file
standard = os.path.join(os.path.dirname(api.__file__),'data','MNI152_T1_2mm_brain.nii.gz')
nv.download_images(dest_dir = out_folder,target=standard, collection_ids=[collection],resample=True)
# Create Variables
collection_data = nv.get_images_df().ix[nv.get_images_df().collection_id == collection,:].reset_index()
img_index = sorted((e,i) for i,e in enumerate(collection_data.file))
index = [x[1] for x in img_index]
img_file = [x[0] for x in img_index]
dat = nb.funcs.concat_images([os.path.join(out_folder,'resampled','00' + str(x) + '.nii.gz') for x in collection_data.image_id[index]])
# dat = nb.funcs.concat_images([os.path.join(out_folder,'original',str(x) + '.nii.gz') for x in collection_data.image_id[index]])
holdout = [int(x.split('_')[-2]) for x in img_file]
heat_level = [x.split('_')[-1].split('.')[0] for x in img_file]
Y_dict = {'High':3,'Medium':2,'Low':1}
Y = np.array([Y_dict[x] for x in heat_level])
# Pickle for later use
# Saving the objects:
with open(os.path.join(out_folder,'Pain_Data.pkl'), 'w') as f:
pickle.dump([dat,holdout,Y], f)
print 'Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
# Getting back the objects:
with open(os.path.join(out_folder,'Pain_Data.pkl')) as f:
dat, holdout, Y = pickle.load(f)
print 'Load Pickled File - Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
# Test Prediction with kfold xVal t
svr = Predict(dat,Y,algorithm='svr',subject_id = holdout,
output_dir=out_folder, cv_dict = {'kfolds':5},
**{'kernel':"linear"})
svr.predict()
print 'Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
# Test Prediction with LOSO xVal
svr = Predict(dat,Y,algorithm='svr',subject_id = holdout,
output_dir=out_folder, cv_dict = {'loso':holdout},
**{'kernel':"linear"})
svr.predict()
print 'Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
# Test Ridge with kfold xVal + grid search for regularization
ridge = Predict(dat, Y, algorithm='ridgeCV',subject_id = holdout,
output_dir=out_folder, cv_dict = {'kfolds':5},
**{'alphas':np.linspace(.1, 10, 5)})
ridge.predict()
print 'Total Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
# Principal Components Regression
pcr = Predict(dat,Y,algorithm='pcr',subject_id = holdout,
output_dir=out_folder, cv_dict = {'kfolds':5})
pcr.predict()
print 'Total Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
mask = nb.load(os.path.join(out_folder,'Masks','subcortex_drewUpdated.nii'))
# Test Prediction with kfold xVal
svr = Predict(dat,Y,algorithm='svr',subject_id = holdout,
output_dir=out_folder, cv_dict = {'kfolds':5},
mask = mask, **{'kernel':"linear"})
svr.predict()
print 'Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
# Load data using nibabel
pines = nb.load(os.path.join(out_folder, 'ridgeCV_weightmap.nii.gz'))
pexpd = apply_mask(data=dat, weight_map=pines, output_dir=out_folder, method='dot_product', save_output=True)
pexpc = apply_mask(data=dat, weight_map=pines, output_dir=out_folder, method='correlation', save_output=True)
plt.subplot(2, 1, 1)
plt.plot(pexpd)
plt.title('Pattern Expression')
plt.ylabel('Dot Product')
plt.subplot(2, 1, 2)
plt.plot(pexpc)
plt.xlabel('Subject')
plt.ylabel('Correlation')
print 'Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
# Create Variables
include = (svr.Y==3) | (svr.Y==1)
input_values = svr.yfit[include]
binary_outcome = svr.Y[include]
binary_outcome = binary_outcome==3
# Single-Interval
roc = Roc(input_values=input_values, binary_outcome=binary_outcome)
roc.plot()
roc.summary()
print 'Total Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
tic = time.time() #Start Timer
# Forced Choice
roc_fc = Roc(input_values=input_values, binary_outcome=binary_outcome, forced_choice=True)
roc_fc.plot()
roc_fc.summary()
print 'Total Elapsed: %.2f seconds' % (time.time() - tic) #Stop timer
# Initialize main clustering object: use PCA with 100 components for dimensionality reduction;
# keep all voxels with minimum of 100 studies (approx. 1% base rate).
reducer = RandomizedPCA(100)
roi_mask = os.path.join(out_folder,'Clustering/Masks/ROIs/FSL_TPJ.nii.gz')
clstr = Clusterer(dataset, 'coactivation', output_dir=os.path.join(out_folder,'Clustering'),
min_studies_per_voxel=100, reduce_reference=reducer, roi_mask=roi_mask)
clstr.cluster(algorithm='kmeans', n_clusters=range(2,11,1),
bundle=False, coactivation_maps=True,
precomputed_distances=True)
K = range(2,11,1)
fig, axes = plt.subplots(len(K), 1)
for i, k in enumerate(K):
plotting.plot_roi(os.path.join(out_folder,'Clustering/kmeans_k%d/kmeans_k%dcluster_labels.nii.gz' % (k, k)),
title="Whole-brain k-means clustering (k = %d)" % k, display_mode='x',
cut_coords=range(-60,-45,5) + range(50,65,5), axes=axes[i])
fig.set_size_inches((15, 20))
fig.savefig(os.path.join(out_folder,'Clustering/Sagittal_Slice_Montage.png'))
# Decoding Polar Plots
K = range(2,11,1)
dcdr = Decoder(dataset, method='roi')
for i in K:
res = dcdr.decode(os.path.join(out_folder,'Clustering/kmeans_k' + str(i) + '/kmeans_k' + str(i) + 'cluster_labels.nii.gz'),
save=os.path.join(out_folder,'Clustering/kmeans_k' + str(i) + '/decoding_results_z.txt'), value='r')
_ = dcdr.plot_polar(res, overplot=True, n_top=2)
_.savefig(os.path.join(out_folder,'Clustering/kmeans_k' + str(i) + '/Decode_PolarPlot_k' + str(i) + '.pdf'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prediction/Classification
Step2: Load Pickled Data
Step3: Run Prediction Analyses
Step4: <p>Run Linear Support Vector Regression with leave one subject out cross-validation (LOSO)</p>
Step5: <p>Run Ridge Regression with 5 fold Cross-Validation and a nested cross-validation to estimate shrinkage parameter</p>
Step6: <p>Run Principal Components Regression with no Cross-Validation. This pattern should be very similar to the pain pattern reported in Krishnan et al., (Under Review). Principal Components Regression is much slower than the other linear methods, but scales well when feature set is large</p>
Step7: <p>You might be interested in only training a pattern on a subset of the brain using an anatomical mask. Here we use a mask of subcortex.</p>
Step8: Apply Mask
Step9: ROC Plot
Step10: <p>The above example uses single-interval classification, which attempts to determine the optimal classification interval. However, sometimes we are intersted in directly comparing responses to two images within the same person. In this situation we should use forced-choice classification, which looks at the relative classification accuracy between two images.</p>
Step11: Coactivation Based Clustering w/ Neurosynth
Step12: Run Clustering
Step13: Plot Slice Montages
Step14: Decode with Neurosynth
|
12,126
|
<ASSISTANT_TASK:>
Python Code:
# Import a bunch of stuff
import StarData
import HelperFunctions
import Fitters
import Mamajek_Table
import SpectralTypeRelations
import matplotlib.pyplot as plt
import logging
import triangle
from astropy.io import fits
import numpy as np
import sys
import os
%matplotlib inline
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Get the HDF5 filename. Might want to change this eventually.
#home = os.environ['HOME']
#sys.path.append('{}/School/Research/CHIRON_data/Chiron-Scripts/'.format(home))
#from Search_slow import hdf5_filename
#hdf5_filename = '/Volumes/DATADRIVE/Kurucz_Grid/TS23_grid_full.hdf5'
hdf5_filename = '/media/ExtraSpace/Kurucz_FullGrid/IGRINS_grid.hdf5'
flattened_orders = HelperFunctions.ReadExtensionFits('../20140708/HIP_5131_telluric_corrected_renormalized.fits')
%matplotlib notebook
for order in flattened_orders:
plt.plot(order.x, order.y, 'k-', alpha=0.4)
# Fit the RV to the flattened spectrum
teff, logg = 13193.3528155,4.5
reload(Fitters)
flat_fitter = Fitters.RVFitter(flattened_orders, model_library=hdf5_filename,
T=teff, logg=logg, feh=0.0)
#flat_fitter.fit(backend='multinest', n_live_points=1000, basename='RVFitter/HIP109521_20131021-', overwrite=False)
flat_fitter.fit(backend='multinest', n_live_points=1000, basename='RVFitter/HIP5131_20140708-', overwrite=True)
%matplotlib inline
flat_fitter.triangle()
flat_fitter.samples.describe()
%matplotlib notebook
flat_fitter.plot()
fname = '../20141015/HIP_109056_telluric_corrected_renormalized.fits'
orders = HelperFunctions.ReadExtensionFits(fname)
total = HelperFunctions.CombineXYpoints(orders)
denoised = HelperFunctions.Denoise(total.copy())
fig, ax = plt.subplots(1, 1, figsize=(13,6))
ax.plot(total.x, total.y, 'k-', alpha=0.5)
ax.plot(denoised.x, denoised.y, 'r-', alpha=0.6)
fft = np.fft.rfft(denoised.y)
freq = np.fft.rfftfreq(denoised.size(), d=denoised.x[1] - denoised.x[0])
fig, ax = plt.subplots(1, 1, figsize=(15,6))
ax.semilogy(freq, fft.imag**2 + fft.real**2)
ax.set_xlim((0, 5))
good = freq > 0.5
pars = np.polyfit(freq[good], np.log10(fft[good]), 4)
fit = np.poly1d(pars)
fft_fit = 10**fit(freq)
plt.plot(freq, fft_fit.imag**2 + fft_fit.real**2, 'r-')
good = (freq > 1) & (freq < 2)
fft[good] = fft_fit[good]
spec = np.fft.irfft(fft)
defringed = total.copy()
defringed.y = spec
fig, ax = plt.subplots(1, 1, figsize=(13,10))
ax.plot(total.x, total.y, 'k-', alpha=0.5)
#ax.plot(denoised.x, denoised.y, 'r-', alpha=0.6)
ax.plot(defringed.x, defringed.y, 'g-', alpha=0.6)
ax.set_xlim((1635, 1645))
ax.set_ylim((0.85, 0.99))
from astropy import units as u, constants
Ryd = 10967758.306 / u.m # From Allen's Astrophysical Quantities
n_f = 4.0
n_i = np.arange(11, 16, 1, dtype=np.float)
true_waves = ((Ryd * (1./n_f**2 - 1./n_i**2))**(-1)).to(u.nm)
# Correct for index of refraction of air (use IAU standard conversion from
# Morton, D. C. 1991, ApJS, 77, 119
wave_A = true_waves.to(u.angstrom).value # Wavelength in angstroms
n = 1.0 + 2.735182e-4 + 131.4182 / wave_A ** 2 + 2.76249e8 / wave_A ** 4
true_waves /= n
print(true_waves)
def get_centroid(xypt):
weights = 1.0 - xypt.y
centroid = np.sum(xypt.x*weights) / np.sum(weights)
return centroid
def get_min_quadratic(xypt):
pars = np.polyfit(xypt.x, xypt.y, 2)
return -pars[1] / (2.0*pars[0])
def get_min_gaussian(xypt):
#from fitters.linear import fit_gauss
gauss = fit_gauss(xypt.x, 1.0 - xypt.y, add_linear=False)
return gauss.parameters[-2]
obs_waves = np.empty_like(true_waves)
N=500
for i, true_wave in enumerate(true_waves):
idx = np.searchsorted(total.x, true_wave)
segment = defringed[idx-N:idx+N]
centroid = get_centroid(segment)
#centroid = get_min_quadratic(segment)
#centroid = get_min_gaussian(segment)
obs_waves[i] = centroid*u.nm
plt.plot(segment.x, segment.y)
plt.plot((centroid, centroid), plt.ylim(), 'r--')
v = (obs_waves - true_waves) / true_waves * constants.c.to(u.km/u.s)
print(v)
print(np.mean(v), np.std(v))
import StellarModel
reload(StellarModel)
HDF5_FILENAME = '/media/ExtraSpace/Kurucz_FullGrid/IGRINS_grid_air.hdf5'
model_list = StellarModel.GetModelList(metal=0.0, logg=4.0, temperature=9250,
type='hdf5', hdf5_file=HDF5_FILENAME)
modeldict, _ = StellarModel.MakeModelDicts(model_list, type='hdf5', hdf5_file=HDF5_FILENAME,
vsini_values=150, vac2air=True, logspace=True)
model = modeldict[9250][4.0][0.0][0.0][150]
import Correlate
ccf = Correlate.GetCCF([defringed], model, vsini=150, resolution=45000, addmode='ml')
corr = ccf['CCF']
plt.plot(corr.x, corr.y)
Correlate.get_rv(corr.x, corr.y, Npix=defringed.size())
good = (corr.y > 0.9*corr.y.max())
plt.plot(corr.x[good], corr.y[good])
pars = np.polyfit(corr.x[good], corr.y[good], 2)
plt.plot(corr.x[good], np.poly1d(pars)(corr.x[good]), 'r--')
centroid = pars[1] / (2.0*pars[0])
print(centroid)
def defringe_data(xypt):
fft = np.fft.rfft(xypt.y)
freq = np.fft.rfftfreq(xypt.size(), d=xypt.x[1] - xypt.x[0])
good = freq > 0.5
pars = np.polyfit(freq[good], np.log10(fft[good]), 4)
fit = np.poly1d(pars)
fft_fit = 10**fit(freq)
good = (freq > 1) & (freq < 2)
fft[good] = fft_fit[good]
spec = np.fft.irfft(fft)
defringed = xypt.copy()
defringed.y = spec
return defringed
from astropy.io import fits
from GenericSearch import HelCorr_IRAF
HDF5_FILENAME = '/media/ExtraSpace/Kurucz_FullGrid/IGRINS_grid_air.hdf5'
def get_rv_ccf(fname, teff=9000, logg=4.0, feh=0.0, vsini=150):
# Read in and denoise the data
orders = HelperFunctions.ReadExtensionFits(fname)
min_xspacing = np.min([np.min(np.diff(order.x)) for order in orders])
numpoints = round((orders[-1].x[-1] - orders[0].x[0]) / min_xspacing)
if numpoints % 2 != 0:
numpoints += 1
total = HelperFunctions.CombineXYpoints(orders, numpoints=numpoints)
denoised = HelperFunctions.Denoise(total.copy())
# De-fringe the data
defringed = defringe_data(denoised)
# Get the appropriate model
model_list = StellarModel.GetModelList(metal=feh, logg=logg, temperature=teff,
type='hdf5', hdf5_file=HDF5_FILENAME)
modeldict, _ = StellarModel.MakeModelDicts(model_list, type='hdf5', hdf5_file=HDF5_FILENAME,
vsini_values=vsini, vac2air=True, logspace=True)
model = modeldict[teff][logg][feh][0.0][vsini]
# Cut out the parts with no CCF power
#good = np.where((defringed.x > 1560) & (defringed.x < 1660))[0]
#defringed = defringed[good]
# Correlate
ccf = Correlate.GetCCF([defringed], model, vsini=vsini, resolution=45000, addmode='ml')['CCF']
rv, rv_err, peak_ccf = Correlate.get_rv(ccf.x, ccf.y, Npix=defringed.size())
good = (ccf.y > 0.9*ccf.y.max())
rv_quad = get_min_quadratic(ccf[good])
# Correct for barycentric velocity
header = fits.getheader(fname)
vbary = HelCorr_IRAF(header, observatory='mcdonald')
return rv+vbary, rv_quad+vbary, rv_err, peak_ccf
import StellarModel
reload(StellarModel)
import Correlate
reload(Correlate)
import GenericSearch
reload(GenericSearch)
HelCorr_IRAF = GenericSearch.HelCorr_IRAF
get_rv_ccf('../20141015/HIP_109056_1_telluric_corrected_renormalized.fits',
teff=9250, logg=4.0, feh=0.0)
import glob
#file_list = glob.glob('../20141015/HIP_109056*renormalized.fits')
file_list = ['../20141015/HIP_109056_{}_telluric_corrected_renormalized.fits'.format(i) for i in range(1, 6)]
file_list.append('../20141015/HIP_109056_telluric_corrected_renormalized.fits')
rv = []
rv_quad = []
rv_err = []
for fname in file_list:
v, v_quad, v_err, ccf = get_rv_ccf(fname, teff=10000, logg=4.0, feh=0.0)
rv.append(v)
rv_quad.append(v_quad)
rv_err.append(v_err)
import pandas as pd
df = pd.DataFrame(data=dict(fname=file_list, rv=rv, rv_quad=rv_quad, rv_err=rv_err))
df.describe()
df
def fit_2dspec(xl, yl, zl, x_degree=4, y_degree=3,
x_domain=None, y_domain=None):
from astropy.modeling import fitting
# Fit the data using astropy.modeling
if x_domain is None:
x_domain = [min(xl), max(xl)]
# more room for y_domain??
if y_domain is None:
#y_domain = [orders[0]-2, orders[-1]+2]
y_domain = [min(yl), max(yl)]
from astropy.modeling.polynomial import Chebyshev2D
p_init = Chebyshev2D(x_degree=x_degree, y_degree=y_degree,
x_domain=x_domain, y_domain=y_domain)
f = fitting.LinearLSQFitter()
p = f(p_init, xl, yl, zl)
for i in [0]:
dd = p(xl, yl) - zl
m = np.abs(dd) < 3.*dd.std()
p = f(p, xl[m], yl[m], zl[m])
return p, m
from FittingUtilities import FindLines
def fit_wavelength(orders, ordernums, first_order=None, last_order=None, x_degree=4, y_degree=3):
Fit the wavelength in a whole chip, and return the 2D polynomial callable
pixel_list = []
ordernum_list = []
wave_list = []
if first_order is None:
first_order = 0
if last_order is None:
last_order = len(orders) - 1
for order, ordernum in zip(orders[first_order:last_order+1], ordernums[first_order:last_order+1]):
lines = FindLines(order)
pixel_list.extend(lines)
ordernum_list.extend(np.ones_like(lines)*ordernum)
wave_list.extend(order.x[lines])
pixel_list = np.array(pixel_list)
ordernum_list = np.array(ordernum_list)
wave_list = np.array(wave_list)
p, m = fit_2dspec(pixel_list, ordernum_list, wave_list*ordernum_list,
x_degree=x_degree, y_degree=y_degree)
return p
import DataStructures
fname = '../20141015/Corrected_HIP_109056-0.fits'
hdulist = fits.open(fname)
model_orders = [DataStructures.xypoint(x=hdu.data['wavelength'], y=hdu.data['model']) for hdu in hdulist[1:]]
data_orders = [DataStructures.xypoint(x=hdu.data['wavelength'], y=hdu.data['flux'],
cont=hdu.data['continuum'], err=hdu.data['error']) for hdu in hdulist[1:]]
# H band
model_orders_H = [o.copy() for o in model_orders if o.x[-1] < 1870]
data_orders_H = [o.copy() for o in data_orders if o.x[-1] < 1870]
ordernums_H = 121.0 - np.arange(len(model_orders_H))
p_H = fit_wavelength(model_orders_H, ordernums_H, first_order=3, last_order=len(ordernums_H) - 4)
# K band
model_orders_K = [o.copy() for o in model_orders if o.x[-1] > 1870]
data_orders_K = [o.copy() for o in data_orders if o.x[-1] > 1870]
ordernums_K = 92.0 - np.arange(len(model_orders_K))
p_K = fit_wavelength(model_orders_K, ordernums_K, first_order=7, last_order=len(ordernums_K) - 4)
new_orders = []
for i, order in enumerate(data_orders):
pixels = np.arange(order.size(), dtype=np.float)
if order.x[-1] < 1870:
# H band
ordernum = ordernums_H[i] * np.ones_like(pixels)
wave = p_H(pixels, ordernum) / ordernum
else:
# K band
ordernum = ordernums_K[i-len(ordernums_H)] * np.ones_like(pixels)
wave = p_K(pixels, ordernum) / ordernum
#ordernum = np.ones_like(pixels) * ap2ord(i)
#wave = p(pixels, ordernum) / ordernum
new_orders.append(DataStructures.xypoint(x=wave, y=order.y, cont=order.cont, err=order.err))
for original, corrected in zip(data_orders, new_orders):
plt.plot(original.x, original.y, 'k-', alpha=0.5)
plt.plot(corrected.x, corrected.y, 'r-', alpha=0.5)
import SpecFlattener
SpecFlattener.flatten_spec?
header = fits.getheader('../20141015/HIP_109056.fits')
print(header.cards)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Try something else
Step2: Fringing. Check the FFT
Step3: There is definitely something there visible in the FFTs. I will fit the fft, and replace all values with frequencies between 1 and 2 cycles/nm to the fitted value.
Step4: Compare line locations to what they should be
Step5: Try cross-correlating the whole spectrum at once.
Step6: That's closer. Lets make it a function and try in a few other stars.
Step8: Is the wavelength solution bad?
|
12,127
|
<ASSISTANT_TASK:>
Python Code:
nperm = 1000
T_obs_bin,clusters_bin,clusters_pb_bin,H0_bin = mne.stats.spatio_temporal_cluster_test(X_bin,threshold=None,n_permutations=nperm,out_type='mask')
T_obs_ste,clusters_ste,clusters_pb_ste,H0_ste = mne.stats.spatio_temporal_cluster_test(X_ste,threshold=None,n_permutations=nperm,out_type='mask')
def extract_electrodes_times(clusters,clusters_pb,tmin_ind=500,tmax_ind=640,alpha=0.005,evoked = ev_bin_dev):
ch_list_temp = []
time_list_temp = []
for clust,pval in zip(clusters,clusters_pb):
if pval < alpha:
for j,curline in enumerate(clust[tmin_ind:tmax_ind]):
for k,el in enumerate(curline):
if el:
ch_list_temp.append(evoked.ch_names[k])
time_list_temp.append(evoked.times[j+tmin_ind])
return np.unique(ch_list_temp),np.unique(time_list_temp)
channels_deviance_ste,times_deviance_ste=extract_electrodes_times(clusters_ste,clusters_pb_ste)
channels_deviance_bin,times_deviance_bin=extract_electrodes_times(clusters_bin,clusters_pb_bin)
print(channels_deviance_bin),print(times_deviance_bin)
print(channels_deviance_ste),print(times_deviance_ste)
times_union = np.union1d(times_deviance_bin,times_deviance_ste)
ch_union = np.unique(np.hstack([channels_deviance_bin,channels_deviance_ste]))
print(ch_union)
#Selecting channels
epochs_bin_dev_ch = epochs_bin_dev.pick_channels(ch_union)
epochs_bin_std_ch = epochs_bin_std.pick_channels(ch_union)
epochs_ste_dev_ch = epochs_ste_dev.pick_channels(ch_union)
epochs_ste_std_ch = epochs_ste_std.pick_channels(ch_union)
X_diff = [epochs_bin_dev_ch.get_data().transpose(0, 2, 1) - epochs_bin_std_ch.get_data().transpose(0, 2, 1),
epochs_ste_dev_ch.get_data().transpose(0, 2, 1) - epochs_ste_std_ch.get_data().transpose(0, 2, 1)]
X_diff_ste_bin = X_diff[1]-X_diff[0]
epochs_bin_dev_ch.plot_sensors(show_names=True)
plt.show()
roi = ['E117','E116','E108','E109','E151','E139','E141','E152','E110','E131','E143','E154','E142','E153','E140','E127','E118']
roi_frontal = ['E224','E223','E2','E4','E5','E6','E13','E14','E15','E20','E21','E27','E28','E30','E36','E40','E41']
len(roi_frontal),len(roi)
from scipy.stats import ttest_1samp
from mne.stats import bonferroni_correction,fdr_correction
def ttest_amplitude(X,times_ind,ch_names,times):
# Selecting time points and averaging over time
amps = X[:,times_ind,:].mean(axis=1)
T, pval = ttest_1samp(amps, 0)
alpha = 0.05
n_samples, n_tests= amps.shape
threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
mask_fdr = pval_fdr < 0.05
mask_bonf = pval_bonferroni < 0.05
print('FDR from %02f to %02f' % ((times[times_ind[0]]),times[times_ind[-1]]))
for i,curi in enumerate(mask_fdr):
if curi:
print("Channel %s, T = %0.2f, p = %0.3f " % (ch_names[i], T[i],pval_fdr[i]))
print('Bonferonni from %02f to %02f' % ((times[times_ind[0]]),times[times_ind[-1]]))
for i,curi in enumerate(mask_bonf):
if curi:
print("Channel %s, T = %0.2f, p = %0.3f " % (ch_names[i], T[i],pval_bonferroni[i]))
return T,pval,pval_fdr,pval_bonferronia
def ttest_amplitude_roi(X,times_ind,ch_names_roi,times):
print(X.shape)
# Selecting time points and averaging over time
amps = X[:,times_ind,:].mean(axis=1)
# averaging over channels
amps = amps.mean(axis=1)
T, pval = ttest_1samp(amps, 0)
alpha = 0.05
n_samples, _, n_tests= X.shape
print('Uncorrected from %02f to %02f' % ((times[times_ind[0]]),times[times_ind[-1]]))
print("T = %0.2f, p = %0.3f " % (T,pval))
return T,pval,pval_fdr,pval_bonferroni
toi = np.arange(0.28,0.44,0.001)
toi_index = ev_bin_dev.time_as_index(toi)
wsize = 20
wstep = 10
toi
all_toi_indexes = []
for i in range(14):
print(toi[10*i],toi[10*i + 20])
cur_toi_ind = range(10*i+1,(10*i+21))
all_toi_indexes.append(ev_bin_dev.time_as_index(toi[cur_toi_ind]))
print(toi[10*14],toi[10*14 + 19])
cur_toi_ind = range(10*14+1,(10*14+19))
all_toi_indexes.append(ev_bin_dev.time_as_index(toi[cur_toi_ind]))
for cur_timewindow in all_toi_indexes:
T,pval,pval_fdr,pval_bonferroni = ttest_amplitude(X_diff_ste_bin,cur_timewindow,epochs_bin_dev_ch.ch_names,times=epochs_bin_dev_ch.times)
#Selecting channels
epochs_bin_dev = _matstruc2mne_epochs(mat_bin_dev).crop(tmax=tcrop)
epochs_bin_std = _matstruc2mne_epochs(mat_bin_std).crop(tmax=tcrop)
epochs_ste_dev = _matstruc2mne_epochs(mat_ste_dev).crop(tmax=tcrop)
epochs_ste_std = _matstruc2mne_epochs(mat_ste_std).crop(tmax=tcrop)
mne.equalize_channels([epochs_bin_dev,epochs_bin_std,epochs_ste_dev,epochs_ste_std])
epochs_bin_dev_ch = epochs_bin_dev.pick_channels(roi)
epochs_bin_std_ch = epochs_bin_std.pick_channels(roi)
epochs_ste_dev_ch = epochs_ste_dev.pick_channels(roi)
epochs_ste_std_ch = epochs_ste_std.pick_channels(roi)
X_diff_roi = [epochs_bin_dev_ch.get_data().transpose(0, 2, 1) - epochs_bin_std_ch.get_data().transpose(0, 2, 1),
epochs_ste_dev_ch.get_data().transpose(0, 2, 1) - epochs_ste_std_ch.get_data().transpose(0, 2, 1)]
X_diff_ste_bin_roi = X_diff_roi[1]-X_diff_roi[0]
for cur_timewindow in all_toi_indexes:
T,pval,pval_fdr,pval_bonferroni = ttest_amplitude_roi(X_diff_ste_bin_roi,cur_timewindow,roi,times=epochs_bin_dev_ch.times)
grav_bin_dev = epochs_bin_dev_ch.average()
grav_bin_std = epochs_bin_std_ch.average()
grav_ste_dev = epochs_ste_dev_ch.average()
grav_ste_std = epochs_ste_std_ch.average()
evoked_bin = mne.combine_evoked([grav_bin_dev, -grav_bin_std],
weights='equal')
evoked_ste = mne.combine_evoked([grav_ste_dev, -grav_ste_std],
weights='equal')
mne.viz.plot_compare_evokeds([grav_bin_std,grav_bin_dev,grav_ste_std,grav_ste_dev],picks=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
plt.show()
mne.viz.plot_compare_evokeds([evoked_bin,evoked_ste],picks=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
plt.show()
#Selecting channels
epochs_bin_dev = _matstruc2mne_epochs(mat_bin_dev).crop(tmax=tcrop)
epochs_bin_std = _matstruc2mne_epochs(mat_bin_std).crop(tmax=tcrop)
epochs_ste_dev = _matstruc2mne_epochs(mat_ste_dev).crop(tmax=tcrop)
epochs_ste_std = _matstruc2mne_epochs(mat_ste_std).crop(tmax=tcrop)
mne.equalize_channels([epochs_bin_dev,epochs_bin_std,epochs_ste_dev,epochs_ste_std])
epochs_bin_dev_ch = epochs_bin_dev.pick_channels(roi_frontal)
epochs_bin_std_ch = epochs_bin_std.pick_channels(roi_frontal)
epochs_ste_dev_ch = epochs_ste_dev.pick_channels(roi_frontal)
epochs_ste_std_ch = epochs_ste_std.pick_channels(roi_frontal)
X_diff_roi = [epochs_bin_dev_ch.get_data().transpose(0, 2, 1) - epochs_bin_std_ch.get_data().transpose(0, 2, 1),
epochs_ste_dev_ch.get_data().transpose(0, 2, 1) - epochs_ste_std_ch.get_data().transpose(0, 2, 1)]
X_diff_ste_bin_roi = X_diff_roi[1]-X_diff_roi[0]
for cur_timewindow in all_toi_indexes:
T,pval,pval_fdr,pval_bonferroni = ttest_amplitude_roi(X_diff_ste_bin_roi,cur_timewindow,roi,times=epochs_bin_dev_ch.times)
grav_bin_dev = epochs_bin_dev_ch.average()
grav_bin_std = epochs_bin_std_ch.average()
grav_ste_dev = epochs_ste_dev_ch.average()
grav_ste_std = epochs_ste_std_ch.average()
evoked_bin = mne.combine_evoked([grav_bin_dev, -grav_bin_std],
weights='equal')
evoked_ste = mne.combine_evoked([grav_ste_dev, -grav_ste_std],
weights='equal')
mne.viz.plot_compare_evokeds([grav_bin_std,grav_bin_dev,grav_ste_std,grav_ste_dev],picks=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
plt.show()
mne.viz.plot_compare_evokeds([evoked_bin,evoked_ste],picks=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
plt.show()
mne.viz.plot_compare_evokeds?
from scipy import stats
from mne.stats import bonferroni_correction,fdr_correction
T, pval = ttest_1samp(X_diff_ste_bin, 0)
alpha = 0.05
n_samples, n_tests,_ = X_diff_ste_bin.shape
threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
#threshold_fdr = np.min(np.abs(T)[reject_fdr])
masking_mat = pval<0.05
Tbis = np.zeros_like(T)
Tbis[masking_mat] = T[masking_mat]
plt.matshow(Tbis.T,cmap=plt.cm.RdBu_r)
plt.colorbar()
plt.show()
plt.matshow(-np.log10(pval).T)
plt.colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: On récupère les channels trouvés grace a l'analyse de clusters
Step2: One sample ttest FDR corrected (per electrode)
Step3: Tests de 280 a 440, par fenetres de 20 ms avec chevauchement de 10 ms
Step4: Printing and preparing all time windows
Step5: Tests on each time window
Step6: On a channel subset (ROI) - average over channels
Step7: Frontal roi
|
12,128
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
import numpy as np
import math
%pylab
%matplotlib inline
Image('../Bell_2003.png')
def bell_mass_to_light(color, band, color_str):
'''Отношение масса светимость вычисляется по калибровке из статьи Bell E. 2003 Table7.'''
coeffs = {'B-V' : {'B' : [-0.942, 1.737], 'V' : [-0.628, 1.305], 'R' : [-0.520, 1.094],
'I' : [-0.399, 0.824], 'J' : [-0.261, 0.433], 'H' : [-0.209, 0.210], 'K' : [-0.206, 0.135]},
'B-R' : {'B' : [-0.976, 1.111], 'V' : [-0.633, 0.816], 'R' : [-0.523, 0.683],
'I' : [-0.405, 0.518], 'J' : [-0.289, 0.297], 'H' : [-0.262, 0.180], 'K' : [-0.264, 0.138]},
'u-g' : { 'g': [-0.221, 0.485], 'r': [-0.099, 0.345], 'i': [-0.053, 0.268], 'z': [-0.105, 0.226]},
'u-r' : { 'g': [-0.390, 0.417], 'r': [-0.223, 0.299], 'i': [-0.151, 0.233], 'z': [-0.178, 0.192]},
'u-i' : { 'g': [-0.375, 0.359], 'r': [-0.212, 0.257], 'i': [-0.144, 0.201], 'z': [-0.171, 0.165]},
'u-z' : { 'g': [-0.400, 0.332], 'r': [-0.232, 0.239], 'i': [-0.161, 0.187], 'z': [-0.179, 0.151]},
'g-r' : { 'g': [-0.499, 1.519], 'r': [-0.306, 1.097], 'i': [-0.222, 0.864], 'z': [-0.223, 0.689]},
'g-i' : { 'g': [-0.379, 0.914], 'r': [-0.220, 0.661], 'i': [-0.152, 0.518], 'z': [-0.175, 0.421]},
'g-z' : { 'g': [-0.367, 0.698], 'r': [-0.215, 0.508], 'i': [-0.153, 0.402], 'z': [-0.171, 0.322]},
'r-i' : { 'g': [-0.106, 1.982], 'r': [-0.022, 1.431], 'i': [0.006, 1.114], 'z': [-0.052, 0.923]},
'r-z' : { 'g': [-0.124, 1.067], 'r': [-0.041, 0.780], 'i': [-0.018, 0.623], 'z': [-0.041, 0.463]}}
a = coeffs[color_str][band][0]
b = coeffs[color_str][band][1]
return np.power(10, a + b * color)
Image('../McGaugh_2014_table7.png')
def mcgaugh_mass_to_light(color, band) :
'''Отношение масса светимость вычисляется по калибровке из статьи McGaugh 2014 Table 7; возвращется 4 возможных значения. Цвет из B-V'''
Bell_2003 = {'V' : [-0.628, 1.305], 'I' : [-0.275, 0.612], 'mu36' : [-0.322, -0.007]}
Portinari_2004 = {'V' : [-0.654, 1.290], 'I' : [-0.321, 0.701], 'mu36' : [-0.594, 0.467]}
Zibetti_2009 = {'V' : [-1.075, 1.837], 'I' : [-0.477, 1.004], 'mu36' : [-1.147, 1.289]}
Into_2013 = {'V' : [-0.900, 1.627], 'I' : [-0.421, 0.898], 'mu36' : [-0.861, 0.849]}
return np.round([np.power(10, Bell_2003[band][0] + Bell_2003[band][1] * color),
np.power(10, Portinari_2004[band][0] + Portinari_2004[band][1] * color),
np.power(10, Zibetti_2009[band][0] + Zibetti_2009[band][1] * color),
np.power(10, Into_2013[band][0] + Into_2013[band][1] * color)], 2)
#TODO: добавить ссылку
def mu_bulge(r, mu_eff=None, r_eff=None, n=None):
'''поверхностная яркость балджа с квадр. угл. секунды
параметры - эфф. яркость, эфф. радиус, параметр Серсика, расстояние'''
bn = 1.9992*n - 0.3271
return mu_eff + 1.0857*bn*((r/r_eff)**(1./n) - 1)
def mu_bulge2(r, mu_eff=None, r_eff=None, n=None):
'''второй вариант формулы для поверхностной яркости балджа с квадр. угл. секунды
параметры те же'''
nu_n = 2*n - 1./3 + 4./(405*n) + 46./(25515*n**2)
return mu_eff + (2.5*nu_n/np.log(10))*((r/r_eff)**(1./n) - 1)
def mu_disc(r, mu0=None, h=None):
'''поверхностная яркость для диска'''
return mu0 + 1.0857*r/h
def total_mu_profile(*args):
'''Пересчет суммарного профиля яркости в звездных величинах для нескольких компонент, например disc+bulge
или disc1+ disc2. Важно, что они должны быть из одной полосы.'''
points = len(args[0])
sum_brightness = np.zeros(points)
for arg in args:
sum_brightness += np.power(10, -np.array(arg)/2.5)
return -2.5*np.log10(sum_brightness)
sun_abs_mags = {'U' : 5.61,
'B' : 5.48,
'V' : 4.83,
'R' : 4.42,
'I' : 4.08,
'J' : 3.64,
'H' : 3.32,
'K' : 3.28,
'3.6' : 3.24, # Oh et al. 2008
'u' : 6.77, #SDSS bands from http://mips.as.arizona.edu/~cnaw/sun.html
'g' : 5.36,
'r' : 4.67,
'i' : 4.48,
'z' : 4.42
}
def surface_brightness(mu=None, band=None):
'''Пересчет поверхностной яркости из mag/arcsec^2 в Lsun/pc^2 по формуле
http://www.astro.spbu.ru/staff/resh/Books/SurfPhot/node3.html#sec1.1'''
sun_mag = sun_abs_mags[band]
return 4.255 * math.pow(10, 8 + 0.4 * (sun_mag - mu))
def surf_density(mu=None, M_to_L=None, band=None):
'''Плотность для любого компонента галактики - пересчитываем mu в яркость и домножаем на M/L, размерность M_sun/pc^2'''
return M_to_L*surface_brightness(mu=mu, band=band)
def disc_totmag(mu0d, h, scale):
'''Полная звездная светимость диска, формула из Решетникова, масштаб нужен для перевода в кпк.'''
return mu0d - 5*log10(h*scale) - 38.57
def s4g_mass_to_light(color36, color45):
return np.power(10., -0.339*(color36 - color45) -0.336)
def s4g_surf_density(mu=None, M_to_L=None):
return M_to_L*np.power(10., 16.76 - 0.4*mu)/1000000.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Калибровки Bell et al. 2003
Step2: $$\log_{10}(M/L)=a_{\lambda} + b_{\lambda}\times Color$$
Step3: Самосогласованные калибровки из McGaugh 2014
Step4: Насколько я понял, последняя таблица самая точная - возьмем ее
Step5: Компоненты галактики
Step6: Абсолютная величина Солнца в нескольких полосах отсюда http
Step7: S4G калибровки и фотометрия
Step8: Хотя как хорошее приближение можно использовать 0.6, отличие будет небольшим.
|
12,129
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import pymc3 as pm
from scipy import stats
from scipy import optimize
import matplotlib.pyplot as plt
import seaborn as sns
import re
%matplotlib inline
def plot_traces(trcs, varnames=None):
'''Plot traces with overlaid means and values'''
nrows = len(trcs.varnames)
if varnames is not None:
nrows = len(varnames)
ax = pm.traceplot(trcs, varnames=varnames, figsize=(12,nrows*1.4),
lines={k: v['mean'] for k, v in
pm.df_summary(trcs,varnames=varnames).iterrows()})
for i, mn in enumerate(pm.df_summary(trcs, varnames=varnames)['mean']):
ax[i,0].annotate('{:.2f}'.format(mn), xy=(mn,0), xycoords='data',
xytext=(5,10), textcoords='offset points', rotation=90,
va='bottom', fontsize='large', color='#AA0022')
def strip_derived_rvs(rvs):
'''Remove PyMC3-generated RVs from a list'''
ret_rvs = []
for rv in rvs:
if not (re.search('_log',rv.name) or re.search('_interval',rv.name)):
ret_rvs.append(rv)
return ret_rvs
np.random.seed(123)
# Mean Poisson values
theta_noalcohol_meds = 1 # no alcohol, took an antihist
theta_alcohol_meds = 3 # alcohol, took an antihist
theta_noalcohol_nomeds = 6 # no alcohol, no antihist
theta_alcohol_nomeds = 36 # alcohol, no antihist
# Create samples
q = 1000
df_pois = pd.DataFrame({
'nsneeze': np.concatenate((np.random.poisson(theta_noalcohol_meds, q),
np.random.poisson(theta_alcohol_meds, q),
np.random.poisson(theta_noalcohol_nomeds, q),
np.random.poisson(theta_alcohol_nomeds, q))),
'alcohol': np.concatenate((np.repeat(False, q),
np.repeat(True, q),
np.repeat(False, q),
np.repeat(True, q))),
'nomeds': np.concatenate((np.repeat(False, q),
np.repeat(False, q),
np.repeat(True, q),
np.repeat(True, q)))})
df_pois.groupby(['nomeds', 'alcohol'])['nsneeze'].agg(['mean', 'var'])
# Gamma shape parameter
alpha = 10
def get_nb_vals(mu, alpha, size):
Generate negative binomially distributed samples by
drawing a sample from a gamma distribution with mean `mu` and
shape parameter `alpha', then drawing from a Poisson
distribution whose rate parameter is given by the sampled
gamma variable.
g = stats.gamma.rvs(alpha, scale=mu / alpha, size=size)
return stats.poisson.rvs(g)
# Create samples
n = 1000
df = pd.DataFrame({
'nsneeze': np.concatenate((get_nb_vals(theta_noalcohol_meds, alpha, n),
get_nb_vals(theta_alcohol_meds, alpha, n),
get_nb_vals(theta_noalcohol_nomeds, alpha, n),
get_nb_vals(theta_alcohol_nomeds, alpha, n))),
'alcohol': np.concatenate((np.repeat(False, n),
np.repeat(True, n),
np.repeat(False, n),
np.repeat(True, n))),
'nomeds': np.concatenate((np.repeat(False, n),
np.repeat(False, n),
np.repeat(True, n),
np.repeat(True, n)))})
df.groupby(['nomeds', 'alcohol'])['nsneeze'].agg(['mean', 'var'])
g = sns.factorplot(x='nsneeze', row='nomeds', col='alcohol', data=df, kind='count', aspect=1.5)
# Make x-axis ticklabels less crowded
ax = g.axes[1, 0]
labels = range(len(ax.get_xticklabels(which='both')))
ax.set_xticks(labels[::5])
ax.set_xticklabels(labels[::5]);
fml = 'nsneeze ~ alcohol + nomeds + alcohol:nomeds'
with pm.Model() as model:
pm.glm.GLM.from_formula(formula=fml, data=df, family=pm.glm.families.NegativeBinomial())
# Old initialization
# start = pm.find_MAP(fmin=optimize.fmin_powell)
# C = pm.approx_hessian(start)
# trace = pm.sample(4000, step=pm.NUTS(scaling=C))
trace = pm.sample(2000, njobs=2)
rvs = [rv.name for rv in strip_derived_rvs(model.unobserved_RVs)]
plot_traces(trace[1000:], varnames=rvs);
# Transform coefficients to recover parameter values
np.exp(pm.df_summary(trace[1000:], varnames=rvs)[['mean','hpd_2.5','hpd_97.5']])
np.percentile(trace[1000:]['mu'], [25,50,75])
df.nsneeze.mean()
trace[1000:]['alpha'].mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convenience Functions
Step2: Generate Data
Step4: Since the mean and variance of a Poisson distributed random variable are equal, the sample means and variances are very close.
Step5: As in the Poisson regression example, we see that drinking alcohol and/or not taking antihistamines increase the sneezing rate to varying degrees. Unlike in that example, for each combination of alcohol and nomeds, the variance of nsneeze is higher than the mean. This suggests that a Poisson distrubution would be a poor fit for the data since the mean and variance of a Poisson distribution are equal.
Step6: Negative Binomial Regression
Step7: View Results
Step8: The mean values are close to the values we specified when generating the data
|
12,130
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array([[1,2],[3,4]])
pos = [1, 2]
element = np.array([[3, 5], [6, 6]])
pos = np.array(pos) - np.arange(len(element))
a = np.insert(a, pos, element, axis=0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
12,131
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import os
import time
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
# 读取并为 py2 compat 解码
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
# 文本长度是指文本中的字符个数
print ('Length of text: {} characters'.format(len(text)))
# 看一看文本中的前 250 个字符
print(text[:250])
# 文本中的非重复字符
vocab = sorted(set(text))
print ('{} unique characters'.format(len(vocab)))
# 创建从非重复字符到索引的映射
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
text_as_int = np.array([char2idx[c] for c in text])
print('{')
for char,_ in zip(char2idx, range(20)):
print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))
print(' ...\n}')
# 显示文本首 13 个字符的整数映射
print ('{} ---- characters mapped to int ---- > {}'.format(repr(text[:13]), text_as_int[:13]))
# 设定每个输入句子长度的最大值
seq_length = 100
examples_per_epoch = len(text)//seq_length
# 创建训练样本 / 目标
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
for i in char_dataset.take(5):
print(idx2char[i.numpy()])
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
for item in sequences.take(5):
print(repr(''.join(idx2char[item.numpy()])))
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
for input_example, target_example in dataset.take(1):
print ('Input data: ', repr(''.join(idx2char[input_example.numpy()])))
print ('Target data:', repr(''.join(idx2char[target_example.numpy()])))
for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])):
print("Step {:4d}".format(i))
print(" input: {} ({:s})".format(input_idx, repr(idx2char[input_idx])))
print(" expected output: {} ({:s})".format(target_idx, repr(idx2char[target_idx])))
# 批大小
BATCH_SIZE = 64
# 设定缓冲区大小,以重新排列数据集
# (TF 数据被设计为可以处理可能是无限的序列,
# 所以它不会试图在内存中重新排列整个序列。相反,
# 它维持一个缓冲区,在缓冲区重新排列元素。)
BUFFER_SIZE = 10000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
dataset
# 词集的长度
vocab_size = len(vocab)
# 嵌入的维度
embedding_dim = 256
# RNN 的单元数量
rnn_units = 1024
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
tf.keras.layers.GRU(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
return model
model = build_model(
vocab_size = len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")
model.summary()
sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()
sampled_indices
print("Input: \n", repr("".join(idx2char[input_example_batch[0]])))
print()
print("Next Char Predictions: \n", repr("".join(idx2char[sampled_indices ])))
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)")
print("scalar_loss: ", example_batch_loss.numpy().mean())
model.compile(optimizer='adam', loss=loss)
# 检查点保存至的目录
checkpoint_dir = './training_checkpoints'
# 检查点的文件名
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
EPOCHS=10
history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])
tf.train.latest_checkpoint(checkpoint_dir)
model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.build(tf.TensorShape([1, None]))
model.summary()
def generate_text(model, start_string):
# 评估步骤(用学习过的模型生成文本)
# 要生成的字符个数
num_generate = 1000
# 将起始字符串转换为数字(向量化)
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# 空字符串用于存储结果
text_generated = []
# 低温度会生成更可预测的文本
# 较高温度会生成更令人惊讶的文本
# 可以通过试验以找到最好的设定
temperature = 1.0
# 这里批大小为 1
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
# 删除批次的维度
predictions = tf.squeeze(predictions, 0)
# 用分类分布预测模型返回的字符
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
# 把预测字符和前面的隐藏状态一起传递给模型作为下一个输入
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
print(generate_text(model, start_string=u"ROMEO: "))
model = build_model(
vocab_size = len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
optimizer = tf.keras.optimizers.Adam()
@tf.function
def train_step(inp, target):
with tf.GradientTape() as tape:
predictions = model(inp)
loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
target, predictions, from_logits=True))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
# 训练步骤
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
# 在每个训练周期开始时,初始化隐藏状态
# 隐藏状态最初为 None
hidden = model.reset_states()
for (batch_n, (inp, target)) in enumerate(dataset):
loss = train_step(inp, target)
if batch_n % 100 == 0:
template = 'Epoch {} Batch {} Loss {}'
print(template.format(epoch+1, batch_n, loss))
# 每 5 个训练周期,保存(检查点)1 次模型
if (epoch + 1) % 5 == 0:
model.save_weights(checkpoint_prefix.format(epoch=epoch))
print ('Epoch {} Loss {:.4f}'.format(epoch+1, loss))
print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
model.save_weights(checkpoint_prefix.format(epoch=epoch))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 循环神经网络(RNN)文本生成
Step2: 下载莎士比亚数据集
Step3: 读取数据
Step4: 处理文本
Step5: 现在,每个字符都有一个整数表示值。请注意,我们将字符映射至索引 0 至 len(unique).
Step6: 预测任务
Step7: batch 方法使我们能轻松把单个字符转换为所需长度的序列。
Step8: 对于每个序列,使用 map 方法先复制再顺移,以创建输入文本和目标文本。map 方法可以将一个简单的函数应用到每一个批次 (batch)。
Step9: 打印第一批样本的输入与目标值:
Step10: 这些向量的每个索引均作为一个时间步来处理。作为时间步 0 的输入,模型接收到 “F” 的索引,并尝试预测 “i” 的索引为下一个字符。在下一个时间步,模型执行相同的操作,但是 RNN 不仅考虑当前的输入字符,还会考虑上一步的信息。
Step11: 创建训练批次
Step12: 创建模型
Step13: 对于每个字符,模型会查找嵌入,把嵌入当作输入运行 GRU 一个时间步,并用密集层生成逻辑回归 (logits),预测下一个字符的对数可能性。
Step14: 在上面的例子中,输入的序列长度为 100, 但是这个模型可以在任何长度的输入上运行:
Step15: 为了获得模型的实际预测,我们需要从输出分布中抽样,以获得实际的字符索引。这个分布是根据对字符集的逻辑回归定义的。
Step16: 这使我们得到每个时间步预测的下一个字符的索引。
Step17: 解码它们,以查看此未经训练的模型预测的文本:
Step18: 训练模型
Step19: 使用 tf.keras.Model.compile 方法配置训练步骤。我们将使用 tf.keras.optimizers.Adam 并采用默认参数,以及损失函数。
Step20: 配置检查点
Step21: 执行训练
Step22: 生成文本
Step23: 预测循环
Step24: 若想改进结果,最简单的方式是延长训练时间 (试试 EPOCHS=30)。
|
12,132
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
from pyensae.datasource import download_data
file = download_data("features_bike_chicago.zip")
file
import pandas
features = pandas.read_csv("features_bike_chicago.txt", sep="\t", encoding="utf-8", low_memory=False, header=[0,1])
features.columns = ["station_id", "station_name", "weekday"] + list(features.columns[3:])
features.head()
features.shape
names = features.columns[3:]
from sklearn.cluster import KMeans
clus = KMeans(10)
clus.fit(features[names])
pred = clus.predict(features[names])
set(pred)
features["cluster"] = pred
features[["cluster", "weekday", "station_id"]].groupby(["cluster", "weekday"]).count()
nb = features[["cluster", "weekday", "station_id"]].groupby(["cluster", "weekday"]).count()
nb = nb.reset_index()
nb[nb.cluster.isin([0, 3, 5, 6])].pivot("weekday","cluster", "station_id").plot(kind="bar");
centers = clus.cluster_centers_.T
import matplotlib.pyplot as plt
fig, ax = plt.subplots(centers.shape[1], 2, figsize=(10,10))
nbf = centers.shape[0] // 2
x = list(range(0,nbf))
col = 0
dec = 0
colors = ["red", "yellow", "gray", "green", "brown", "orange", "blue"]
for i in range(centers.shape[1]):
if 2*i == centers.shape[1]:
col += 1
dec += centers.shape[1]
color = colors[i%len(colors)]
ax[2*i-dec, col].bar (x, centers[:nbf,i], width=1.0, color=color)
ax[2*i-dec, col].set_ylabel("cluster %d - start" % i, color=color)
ax[2*i+1-dec, col].bar (x, centers[nbf:,i], width=1.0, color=color)
ax[2*i+1-dec, col].set_ylabel("cluster %d - stop" % i, color=color)
from sklearn.cluster import DBSCAN
dbs = DBSCAN(eps=0.1)
pred_dbs = dbs.fit_predict(features[names])
set(pred_dbs)
features["cluster_dbs"] = pred_dbs
nbs = features[["cluster_dbs", "weekday", "station_id"]].groupby(["cluster_dbs", "weekday"]).count()
nbs = nbs.reset_index()
nbs[nbs.cluster_dbs.isin([0, 3, 5, 6])].pivot("weekday","cluster_dbs", "station_id").plot(kind="bar");
piv = features.pivot_table(index=["station_id", "station_name"],
columns="weekday", values="cluster")
piv.head()
piv["distincts"] = piv.apply(lambda row: len(set(row[i] for i in range(0,7))), axis=1)
pivn = piv.reset_index()
pivn.head()
pivn.columns = [str(_).replace(".0", "") for _ in pivn.columns.values]
pivn.head()
from pyensae.datasource import download_data
if False:
# Provient du site de Chicago
file = download_data("Divvy_Trips_2016_Q3Q4.zip",
url="https://s3.amazonaws.com/divvy-data/tripdata/")
else:
# Copie au cas où celui-ci tomberait en panne
file = download_data("Divvy_Trips_2016_Q3.zip")
stations = pandas.read_csv("Divvy_Stations_2016_Q3.csv")
stations.head()
data = stations.merge(pivn, left_on=["id", "name"],
right_on=["station_id", "station_name"], suffixes=('_s', '_c'))
data.sort_values("id").head()
def folium_html_stations_map(stations, html_width=None, html_height=None, **kwargs):
import folium
from pyensae.notebookhelper import folium_html_map
map_osm = None
for key, value in stations:
x, y = key
if map_osm is None:
if "zoom_start" not in kwargs:
kwargs["zoom_start"] = 11
if "location" not in kwargs:
map_osm = folium.Map(location=[x, y], **kwargs)
else:
map_osm = folium.Map(kwargs["location"], **kwargs)
if isinstance(value, tuple):
name, value = value
map_osm.add_child(folium.CircleMarker(
[x, y], popup=name, radius=15, fill_color=value, color=value))
else:
map_osm.add_child(folium.CircleMarker(
[x, y], radius=15, fill_color=value, color=value))
return folium_html_map(map_osm, width=html_width, height=html_height)
colors = ["red", "yellow", "gray", "green", "brown", "orange", "blue", "black", "pink", "violet"]
for i, c in enumerate(colors):
print("Cluster {0} is {1}".format(i, c))
xy = []
for els in data.apply(lambda row: (row["latitude"], row["longitude"], row["1"], row["name"]), axis=1):
try:
cl = int(els[2])
except:
# NaN
continue
name = "%s c%d" % (els[3], cl)
color = colors[cl]
xy.append( ( (els[0], els[1]), (name, color)))
folium_html_stations_map(xy, width="80%")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Les données
Step2: Les données sont agrégrées par tranche de 10 minutes soit 144 période durant la journée et 288 nombre pour les départs et arrivées de vélos. Cela explique les dimensions de la matrice.
Step3: Let's draw the clusters.
Step4: On a réussi à isoler plusieurs usages différents. On voit les départs les matin et les arrivées le soir, le modèle inversé, un autre... Mais les-a-t-on tous trouvés ?
Step5: L'algorithme dbscan utilise la proximité des points pour classer les observations. On se retrouve dans le cas où il n'y a pas vraiment de frontière entre les clusters et tous les points se retrouvent associés en un unique cluster excepté quelques points aberrants.
Step6: Une carte des stations un jour de semaine.
|
12,133
|
<ASSISTANT_TASK:>
Python Code:
password = input("Please enter the password: ")
if password=="Beeblebrox":
print("Welcome Zaphod. How improbable of you.")
else:
print("Get lost!")
speed = int(input("Please enter speed in mph: "))
if :
print("You are exceeding the speed limit. Please slow down.")
answer = input("Are you driving an emergency vehicle Y/N: ")
if answer=='Y':
is_emergency_vehicle = True
else:
is_emergency_vehicle = False
if is_emergency_vehicle==True:
print("Change these words")
answer = input("Are you driving an emergency vehicle Y/N: ")
if answer=='Y' or answer==:
is_emergency_vehicle = True
else:
is_emergency_vehicle = False
if is_emergency_vehicle==True:
print("You can exceed 60mph in an emergency if safe to do so")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Study the code you just ran. Hopefully you can see why getting the password right or wrong affects which print function is executed
Step2: Test your program three times with speeds of 30, 60 and 65.
Step3: Test your program twice, answering first N then Y.
|
12,134
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import re
import os
from scipy.stats import pearsonr
from datetime import datetime
from gensim.models import CoherenceModel
from gensim.corpora.dictionary import Dictionary
base_dir = os.path.join(os.path.expanduser('~'), "workshop/nlp/data/")
data_dir = os.path.join(base_dir, 'wiki-movie-subset')
if not os.path.exists(data_dir):
raise ValueError("SKIP: Please download the movie corpus.")
ref_dir = os.path.join(base_dir, 'reference')
topics_path = os.path.join(ref_dir, 'topicsMovie.txt')
human_scores_path = os.path.join(ref_dir, 'goldMovie.txt')
%%time
texts = []
file_num = 0
preprocessed = 0
listing = os.listdir(data_dir)
for fname in listing:
file_num += 1
if 'disambiguation' in fname:
continue # discard disambiguation and redirect pages
elif fname.startswith('File_'):
continue # discard images, gifs, etc.
elif fname.startswith('Category_'):
continue # discard category articles
# Not sure how to identify portal and redirect pages,
# as well as pages about a single year.
# As a result, this preprocessing differs from the paper.
with open(os.path.join(data_dir, fname)) as f:
for line in f:
# lower case all words
lowered = line.lower()
#remove punctuation and split into seperate words
words = re.findall(r'\w+', lowered, flags = re.UNICODE | re.LOCALE)
texts.append(words)
preprocessed += 1
if file_num % 10000 == 0:
print('PROGRESS: %d/%d, preprocessed %d, discarded %d' % (
file_num, len(listing), preprocessed, (file_num - preprocessed)))
%%time
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
print(len(corpus))
print(dictionary)
topics = [] # list of 100 topics
with open(topics_path) as f:
topics = [line.split() for line in f if line]
len(topics)
human_scores = []
with open(human_scores_path) as f:
for line in f:
human_scores.append(float(line.strip()))
len(human_scores)
# We first need to filter out any topics that contain terms not in our dictionary
# These may occur as a result of preprocessing steps differing from those used to
# produce the reference topics. In this case, this only occurs in one topic.
invalid_topic_indices = set(
i for i, topic in enumerate(topics)
if any(t not in dictionary.token2id for t in topic)
)
print("Topics with out-of-vocab terms: %s" % ', '.join(map(str, invalid_topic_indices)))
usable_topics = [topic for i, topic in enumerate(topics) if i not in invalid_topic_indices]
%%time
cm = CoherenceModel(topics=usable_topics, corpus=corpus, dictionary=dictionary, coherence='u_mass')
u_mass = cm.get_coherence_per_topic()
print("Calculated u_mass coherence for %d topics" % len(u_mass))
%%time
cm = CoherenceModel(topics=usable_topics, texts=texts, dictionary=dictionary, coherence='c_v')
c_v = cm.get_coherence_per_topic()
print("Calculated c_v coherence for %d topics" % len(c_v))
%%time
cm.coherence = 'c_uci'
c_uci = cm.get_coherence_per_topic()
print("Calculated c_uci coherence for %d topics" % len(c_uci))
%%time
cm.coherence = 'c_npmi'
c_npmi = cm.get_coherence_per_topic()
print("Calculated c_npmi coherence for %d topics" % len(c_npmi))
final_scores = [
score for i, score in enumerate(human_scores)
if i not in invalid_topic_indices
]
len(final_scores)
for our_scores in (u_mass, c_v, c_uci, c_npmi):
print(pearsonr(our_scores, final_scores)[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the dataset (movie.zip) and gold standard data (topicsMovie.txt and goldMovie.txt) from the link and plug in the locations below.
Step2: Cross validate the numbers
Step3: Deal with any vocabulary mismatch.
Step4: Start off with u_mass coherence measure.
Step5: Start c_v coherence measure
Step6: Start c_uci and c_npmi coherence measures
Step7: The values in the paper were
|
12,135
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from math import pi
import control as ct
def vehicle_update(t, x, u, params={}):
Vehicle dynamics for cruise control system.
Parameters
----------
x : array
System state: car velocity in m/s
u : array
System input: [throttle, gear, road_slope], where throttle is
a float between 0 and 1, gear is an integer between 1 and 5,
and road_slope is in rad.
Returns
-------
float
Vehicle acceleration
from math import copysign, sin
sign = lambda x: copysign(1, x) # define the sign() function
# Set up the system parameters
m = params.get('m', 1600.) # vehicle mass, kg
g = params.get('g', 9.8) # gravitational constant, m/s^2
Cr = params.get('Cr', 0.01) # coefficient of rolling friction
Cd = params.get('Cd', 0.32) # drag coefficient
rho = params.get('rho', 1.3) # density of air, kg/m^3
A = params.get('A', 2.4) # car area, m^2
alpha = params.get(
'alpha', [40, 25, 16, 12, 10]) # gear ratio / wheel radius
# Define variables for vehicle state and inputs
v = x[0] # vehicle velocity
throttle = np.clip(u[0], 0, 1) # vehicle throttle
gear = u[1] # vehicle gear
theta = u[2] # road slope
# Force generated by the engine
omega = alpha[int(gear)-1] * v # engine angular speed
F = alpha[int(gear)-1] * motor_torque(omega, params) * throttle
# Disturbance forces
#
# The disturbance force Fd has three major components: Fg, the forces due
# to gravity; Fr, the forces due to rolling friction; and Fa, the
# aerodynamic drag.
# Letting the slope of the road be \theta (theta), gravity gives the
# force Fg = m g sin \theta.
Fg = m * g * sin(theta)
# A simple model of rolling friction is Fr = m g Cr sgn(v), where Cr is
# the coefficient of rolling friction and sgn(v) is the sign of v (±1) or
# zero if v = 0.
Fr = m * g * Cr * sign(v)
# The aerodynamic drag is proportional to the square of the speed: Fa =
# 1/2 \rho Cd A |v| v, where \rho is the density of air, Cd is the
# shape-dependent aerodynamic drag coefficient, and A is the frontal area
# of the car.
Fa = 1/2 * rho * Cd * A * abs(v) * v
# Final acceleration on the car
Fd = Fg + Fr + Fa
dv = (F - Fd) / m
return dv
def motor_torque(omega, params={}):
# Set up the system parameters
Tm = params.get('Tm', 190.) # engine torque constant
omega_m = params.get('omega_m', 420.) # peak engine angular speed
beta = params.get('beta', 0.4) # peak engine rolloff
return np.clip(Tm * (1 - beta * (omega/omega_m - 1)**2), 0, None)
# Figure 4.2a - single torque curve as function of omega
omega_range = np.linspace(0, 700, 701)
plt.subplot(2, 2, 1)
plt.plot(omega_range, [motor_torque(w) for w in omega_range])
plt.xlabel('Angular velocity $\omega$ [rad/s]')
plt.ylabel('Torque $T$ [Nm]')
plt.grid(True, linestyle='dotted')
# Figure 4.2b - torque curves in different gears, as function of velocity
plt.subplot(2, 2, 2)
v_range = np.linspace(0, 70, 71)
alpha = [40, 25, 16, 12, 10]
for gear in range(5):
omega_range = alpha[gear] * v_range
plt.plot(v_range, [motor_torque(w) for w in omega_range],
color='blue', linestyle='solid')
# Set up the axes and style
plt.axis([0, 70, 100, 200])
plt.grid(True, linestyle='dotted')
# Add labels
plt.text(11.5, 120, '$n$=1')
plt.text(24, 120, '$n$=2')
plt.text(42.5, 120, '$n$=3')
plt.text(58.5, 120, '$n$=4')
plt.text(58.5, 185, '$n$=5')
plt.xlabel('Velocity $v$ [m/s]')
plt.ylabel('Torque $T$ [Nm]')
plt.tight_layout()
plt.suptitle('Torque curves for typical car engine');
vehicle = ct.NonlinearIOSystem(
vehicle_update, None, name='vehicle',
inputs = ('u', 'gear', 'theta'), outputs = ('v'), states=('v'))
# Define a generator for creating a "standard" cruise control plot
def cruise_plot(sys, t, y, t_hill=5, vref=20, antiwindup=False, linetype='b-',
subplots=[None, None]):
# Figure out the plot bounds and indices
v_min = vref-1.2; v_max = vref+0.5; v_ind = sys.find_output('v')
u_min = 0; u_max = 2 if antiwindup else 1; u_ind = sys.find_output('u')
# Make sure the upper and lower bounds on v are OK
while max(y[v_ind]) > v_max: v_max += 1
while min(y[v_ind]) < v_min: v_min -= 1
# Create arrays for return values
subplot_axes = subplots.copy()
# Velocity profile
if subplot_axes[0] is None:
subplot_axes[0] = plt.subplot(2, 1, 1)
else:
plt.sca(subplots[0])
plt.plot(t, y[v_ind], linetype)
plt.plot(t, vref*np.ones(t.shape), 'k-')
plt.plot([t_hill, t_hill], [v_min, v_max], 'k--')
plt.axis([0, t[-1], v_min, v_max])
plt.xlabel('Time $t$ [s]')
plt.ylabel('Velocity $v$ [m/s]')
# Commanded input profile
if subplot_axes[1] is None:
subplot_axes[1] = plt.subplot(2, 1, 2)
else:
plt.sca(subplots[1])
plt.plot(t, y[u_ind], 'r--' if antiwindup else linetype)
plt.plot([t_hill, t_hill], [u_min, u_max], 'k--')
plt.axis([0, t[-1], u_min, u_max])
plt.xlabel('Time $t$ [s]')
plt.ylabel('Throttle $u$')
# Applied input profile
if antiwindup:
plt.plot(t, np.clip(y[u_ind], 0, 1), linetype)
plt.legend(['Commanded', 'Applied'], frameon=False)
return subplot_axes
def sf_update(t, z, u, params={}):
y, r = u[1], u[2]
return y - r
def sf_output(t, z, u, params={}):
# Get the controller parameters that we need
K = params.get('K', 0)
ki = params.get('ki', 0)
kf = params.get('kf', 0)
xd = params.get('xd', 0)
yd = params.get('yd', 0)
ud = params.get('ud', 0)
# Get the system state and reference input
x, y, r = u[0], u[1], u[2]
return ud - K * (x - xd) - ki * z + kf * (r - yd)
# Create the input/output system for the controller
control_sf = ct.NonlinearIOSystem(
sf_update, sf_output, name='control',
inputs=('x', 'y', 'r'),
outputs=('u'),
states=('z'))
# Create the closed loop system for the state space controller
cruise_sf = ct.InterconnectedSystem(
(vehicle, control_sf), name='cruise',
connections=(
('vehicle.u', 'control.u'),
('control.x', 'vehicle.v'),
('control.y', 'vehicle.v')),
inplist=('control.r', 'vehicle.gear', 'vehicle.theta'),
outlist=('control.u', 'vehicle.v'), outputs=['u', 'v'])
# Define the time and input vectors
T = np.linspace(0, 25, 501)
vref = 20 * np.ones(T.shape)
gear = 4 * np.ones(T.shape)
theta0 = np.zeros(T.shape)
# Find the equilibrium point for the system
Xeq, Ueq = ct.find_eqpt(
vehicle, [vref[0]], [0, gear[0], theta0[0]], y0=[vref[0]], iu=[1, 2])
print("Xeq = ", Xeq)
print("Ueq = ", Ueq)
# Compute the linearized system at the eq pt
cruise_linearized = ct.linearize(vehicle, Xeq, [Ueq[0], gear[0], 0])
# Construct the gain matrices for the system
A, B, C = cruise_linearized.A, cruise_linearized.B[0, 0], cruise_linearized.C
K = 0.5
kf = -1 / (C * np.linalg.inv(A - B * K) * B)
# Compute the steady state velocity and throttle setting
xd = Xeq[0]
ud = Ueq[0]
yd = vref[-1]
# Response of the system with no integral feedback term
plt.figure()
theta_hill = [
0 if t <= 5 else
4./180. * pi * (t-5) if t <= 6 else
4./180. * pi for t in T]
t, y_sfb = ct.input_output_response(
cruise_sf, T, [vref, gear, theta_hill], [Xeq[0], 0],
params={'K':K, 'ki':0.0, 'kf':kf, 'xd':xd, 'ud':ud, 'yd':yd})
subplots = cruise_plot(cruise_sf, t, y_sfb, t_hill=5, linetype='b--')
# Response of the system with state feedback + integral action
t, y_sfb_int = ct.input_output_response(
cruise_sf, T, [vref, gear, theta_hill], [Xeq[0], 0],
params={'K':K, 'ki':0.1, 'kf':kf, 'xd':xd, 'ud':ud, 'yd':yd})
cruise_plot(cruise_sf, t, y_sfb_int, t_hill=5, linetype='b-', subplots=subplots)
# Add title and legend
plt.suptitle('Cruise control with state feedback, integral action')
import matplotlib.lines as mlines
p_line = mlines.Line2D([], [], color='blue', linestyle='--', label='State feedback')
pi_line = mlines.Line2D([], [], color='blue', linestyle='-', label='w/ integral action')
plt.legend(handles=[p_line, pi_line], frameon=False, loc='lower right');
# Get the transfer function from throttle input + hill to vehicle speed
P = ct.ss2tf(cruise_linearized[0, 0])
# Construction a controller that cancels the pole
kp = 0.5
a = -P.pole()[0]
b = np.real(P(0)) * a
ki = a * kp
C = ct.tf2ss(ct.TransferFunction([kp, ki], [1, 0]))
control_pz = ct.LinearIOSystem(C, name='control', inputs='u', outputs='y')
print("system: a = ", a, ", b = ", b)
print("pzcancel: kp =", kp, ", ki =", ki, ", 1/(kp b) = ", 1/(kp * b))
print("sfb_int: K = ", K, ", ki = 0.1")
# Construct the closed loop system and plot the response
# Create the closed loop system for the state space controller
cruise_pz = ct.InterconnectedSystem(
(vehicle, control_pz), name='cruise_pz',
connections = (
('control.u', '-vehicle.v'),
('vehicle.u', 'control.y')),
inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'),
inputs = ('vref', 'gear', 'theta'),
outlist = ('vehicle.v', 'vehicle.u'),
outputs = ('v', 'u'))
# Find the equilibrium point
X0, U0 = ct.find_eqpt(
cruise_pz, [vref[0], 0], [vref[0], gear[0], theta0[0]],
iu=[1, 2], y0=[vref[0], 0], iy=[0])
# Response of the system with PI controller canceling process pole
t, y_pzcancel = ct.input_output_response(
cruise_pz, T, [vref, gear, theta_hill], X0)
subplots = cruise_plot(cruise_pz, t, y_pzcancel, t_hill=5, linetype='b-')
cruise_plot(cruise_sf, t, y_sfb_int, t_hill=5, linetype='b--', subplots=subplots);
# Values of the first order transfer function P(s) = b/(s + a) are set above
# Define the input that we want to track
T = np.linspace(0, 40, 101)
vref = 20 * np.ones(T.shape)
gear = 4 * np.ones(T.shape)
theta_hill = np.array([
0 if t <= 5 else
4./180. * pi * (t-5) if t <= 6 else
4./180. * pi for t in T])
# Fix \omega_0 and vary \zeta
w0 = 0.5
subplots = [None, None]
for zeta in [0.5, 1, 2]:
# Create the controller transfer function (as an I/O system)
kp = (2*zeta*w0 - a)/b
ki = w0**2 / b
control_tf = ct.tf2io(
ct.TransferFunction([kp, ki], [1, 0.01*ki/kp]),
name='control', inputs='u', outputs='y')
# Construct the closed loop system by interconnecting process and controller
cruise_tf = ct.InterconnectedSystem(
(vehicle, control_tf), name='cruise',
connections = [('control.u', '-vehicle.v'), ('vehicle.u', 'control.y')],
inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'),
inputs = ('vref', 'gear', 'theta'),
outlist = ('vehicle.v', 'vehicle.u'), outputs = ('v', 'u'))
# Plot the velocity response
X0, U0 = ct.find_eqpt(
cruise_tf, [vref[0], 0], [vref[0], gear[0], theta_hill[0]],
iu=[1, 2], y0=[vref[0], 0], iy=[0])
t, y = ct.input_output_response(cruise_tf, T, [vref, gear, theta_hill], X0)
subplots = cruise_plot(cruise_tf, t, y, t_hill=5, subplots=subplots)
# Fix \zeta and vary \omega_0
zeta = 1
subplots = [None, None]
for w0 in [0.2, 0.5, 1]:
# Create the controller transfer function (as an I/O system)
kp = (2*zeta*w0 - a)/b
ki = w0**2 / b
control_tf = ct.tf2io(
ct.TransferFunction([kp, ki], [1, 0.01*ki/kp]),
name='control', inputs='u', outputs='y')
# Construct the closed loop system by interconnecting process and controller
cruise_tf = ct.InterconnectedSystem(
(vehicle, control_tf), name='cruise',
connections = [('control.u', '-vehicle.v'), ('vehicle.u', 'control.y')],
inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'),
inputs = ('vref', 'gear', 'theta'),
outlist = ('vehicle.v', 'vehicle.u'), outputs = ('v', 'u'))
# Plot the velocity response
X0, U0 = ct.find_eqpt(
cruise_tf, [vref[0], 0], [vref[0], gear[0], theta_hill[0]],
iu=[1, 2], y0=[vref[0], 0], iy=[0])
t, y = ct.input_output_response(cruise_tf, T, [vref, gear, theta_hill], X0)
subplots = cruise_plot(cruise_tf, t, y, t_hill=5, subplots=subplots)
# Nominal controller design for remaining analyses
# Construct a PI controller with rolloff, as a transfer function
Kp = 0.5 # proportional gain
Ki = 0.1 # integral gain
control_tf = ct.tf2io(
ct.TransferFunction([Kp, Ki], [1, 0.01*Ki/Kp]),
name='control', inputs='u', outputs='y')
cruise_tf = ct.InterconnectedSystem(
(vehicle, control_tf), name='cruise',
connections = [('control.u', '-vehicle.v'), ('vehicle.u', 'control.y')],
inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'), inputs = ('vref', 'gear', 'theta'),
outlist = ('vehicle.v', 'vehicle.u'), outputs = ('v', 'u'))
# Define the time and input vectors
T = np.linspace(0, 25, 101)
vref = 20 * np.ones(T.shape)
gear = 4 * np.ones(T.shape)
theta0 = np.zeros(T.shape)
# Now simulate the effect of a hill at t = 5 seconds
plt.figure()
plt.suptitle('Response to change in road slope')
theta_hill = np.array([
0 if t <= 5 else
4./180. * pi * (t-5) if t <= 6 else
4./180. * pi for t in T])
subplots = [None, None]
linecolor = ['red', 'blue', 'green']
handles = []
for i, m in enumerate([1200, 1600, 2000]):
# Compute the equilibrium state for the system
X0, U0 = ct.find_eqpt(
cruise_tf, [vref[0], 0], [vref[0], gear[0], theta0[0]],
iu=[1, 2], y0=[vref[0], 0], iy=[0], params={'m':m})
t, y = ct.input_output_response(
cruise_tf, T, [vref, gear, theta_hill], X0, params={'m':m})
subplots = cruise_plot(cruise_tf, t, y, t_hill=5, subplots=subplots,
linetype=linecolor[i][0] + '-')
handles.append(mlines.Line2D([], [], color=linecolor[i], linestyle='-',
label="m = %d" % m))
# Add labels to the plots
plt.sca(subplots[0])
plt.ylabel('Speed [m/s]')
plt.legend(handles=handles, frameon=False, loc='lower right');
plt.sca(subplots[1])
plt.ylabel('Throttle')
plt.xlabel('Time [s]');
def pi_update(t, x, u, params={}):
# Get the controller parameters that we need
ki = params.get('ki', 0.1)
kaw = params.get('kaw', 2) # anti-windup gain
# Assign variables for inputs and states (for readability)
v = u[0] # current velocity
vref = u[1] # reference velocity
z = x[0] # integrated error
# Compute the nominal controller output (needed for anti-windup)
u_a = pi_output(t, x, u, params)
# Compute anti-windup compensation (scale by ki to account for structure)
u_aw = kaw/ki * (np.clip(u_a, 0, 1) - u_a) if ki != 0 else 0
# State is the integrated error, minus anti-windup compensation
return (vref - v) + u_aw
def pi_output(t, x, u, params={}):
# Get the controller parameters that we need
kp = params.get('kp', 0.5)
ki = params.get('ki', 0.1)
# Assign variables for inputs and states (for readability)
v = u[0] # current velocity
vref = u[1] # reference velocity
z = x[0] # integrated error
# PI controller
return kp * (vref - v) + ki * z
control_pi = ct.NonlinearIOSystem(
pi_update, pi_output, name='control',
inputs = ['v', 'vref'], outputs = ['u'], states = ['z'],
params = {'kp':0.5, 'ki':0.1})
# Create the closed loop system
cruise_pi = ct.InterconnectedSystem(
(vehicle, control_pi), name='cruise',
connections=(
('vehicle.u', 'control.u'),
('control.v', 'vehicle.v')),
inplist=('control.vref', 'vehicle.gear', 'vehicle.theta'),
outlist=('control.u', 'vehicle.v'), outputs=['u', 'v'])
# Compute the equilibrium throttle setting for the desired speed
X0, U0, Y0 = ct.find_eqpt(
cruise_pi, [vref[0], 0], [vref[0], gear[0], theta0[0]],
y0=[0, vref[0]], iu=[1, 2], iy=[1], return_y=True)
# Now simulate the effect of a hill at t = 5 seconds
plt.figure()
plt.suptitle('Car with cruise control encountering sloping road')
theta_hill = [
0 if t <= 5 else
4./180. * pi * (t-5) if t <= 6 else
4./180. * pi for t in T]
t, y = ct.input_output_response(
cruise_pi, T, [vref, gear, theta_hill], X0)
cruise_plot(cruise_pi, t, y);
plt.figure()
plt.suptitle('Cruise control with integrator windup')
T = np.linspace(0, 50, 101)
vref = 20 * np.ones(T.shape)
theta_hill = [
0 if t <= 5 else
6./180. * pi * (t-5) if t <= 6 else
6./180. * pi for t in T]
t, y = ct.input_output_response(
cruise_pi, T, [vref, gear, theta_hill], X0,
params={'kaw':0})
cruise_plot(cruise_pi, t, y, antiwindup=True);
plt.figure()
plt.suptitle('Cruise control with integrator anti-windup protection')
t, y = ct.input_output_response(
cruise_pi, T, [vref, gear, theta_hill], X0,
params={'kaw':2.})
cruise_plot(cruise_pi, t, y, antiwindup=True);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Process Model
Step3: Engine model
Step4: Torque curves for a typical car engine. The graph on the left shows the torque generated by the engine as a function of the angular velocity of the engine, while the curve on the right shows torque as a function of car speed for different gears.
Step5: Input/ouput model for the vehicle system
Step6: State space controller
Step7: Pole/zero cancellation
Step8: PI Controller
Step9: Robustness to change in mass
Step10: PI controller with antiwindup protection
Step11: Response to a small hill
Step12: Effect of Windup
Step13: PI controller with anti-windup compensation
|
12,136
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Rango de tiempo
tt = np.linspace(0, 1, 100)
# Solución Analítica
def y(t):
return (np.exp(-2*t)*(-3*np.exp(2)+np.exp(4)-np.exp(4*t)+ 3*np.exp(2+4*t)))/(-1+np.exp(4))
yy = y(tt)
# Matriz de diferencias finitas que depende de n
def DiffMatrix(n, h):
m = np.zeros((n,n), dtype=float)
np.fill_diagonal(m, -4.0*h**2.0-2)
dix, diy = np.diag_indices(n)
dix = dix[:-1]
diy = diy[:-1] + 1
m[(dix, diy)] = 1
dix = dix + 1
diy = diy - 1
m[(dix, diy)] = 1
return m
plt.figure(figsize=(10,7))
# Para distintas precisiones
for n in [1, 2, 5]:
# Calcular tamaño del intervalo
h = 1.0/(n + 1.0)
# Armar vector de coeficientes
b = np.zeros((n))
b[0] = -1
b[-1] = -3
# Resolver el sistema A*w = b
# A es la matriz de diferencias finitas
# w el vector de aproximaciones finitas
A = DiffMatrix(n, h)
w = np.concatenate(([1], np.linalg.solve(A, b), [3]))
_t = np.linspace(0, 1, n+2)
# Plot aproximación
plt.plot(_t, w, 'o--',lw=2, label="$n ="+str(n)+"$")
# Plots
plt.plot(tt, yy, 'm', lw=2, label="$y(t)$")
plt.legend(loc='best', fontsize=16)
plt.xlabel("$t$", fontsize=16)
plt.show()
print(A)
from scipy.interpolate import BarycentricInterpolator
y_data = y(0.75)
exp = np.linspace(1, 3, 10)
N = np.round(np.power(10, exp))
err = []
for n in N:
n = int(n)
h = 1.0/(n + 1.0)
A = DiffMatrix(n, h)
b = np.zeros((n))
b[0] = -1
b[-1] = -3
w = np.concatenate(([1], np.linalg.solve(A, b), [3]))
_t = np.linspace(0, 1, n+2)
f = BarycentricInterpolator(_t, w)
err.append(np.abs(f(0.75) - y(0.75)))
logerr = np.log10(err)
plt.figure(figsize=(10,7))
plt.plot(exp, logerr, 'bo')
slope = (logerr[1] - logerr[0]) / (exp[1] - exp[0])
print("pendiente:",slope)
plt.xlabel("$n$",fontsize=20)
plt.ylabel("$|w-y|_{t=0.75}$",fontsize=20)
exp_n = [1,2,3]
plt.xticks(exp_n, ["$10^{"+str(i)+"}$" for i in exp_n], fontsize=16)
exp_error = [-7, -6, -5, -4 , -3]
plt.yticks(exp_error, ["$10^{"+str(i)+"}$" for i in exp_error], fontsize=16)
plt.show()
# Rango de tiempo
tt = np.linspace(0, np.pi, 100)
# Solución Analítica
def y(t):
return np.sin(t) - np.cos(t)
y0 = y(0)
yM = y(np.pi)
yy = y(tt)
# Matriz de diferencias finitas que depende de n
def DiffMatrix(n, h):
m = np.zeros((n,n), dtype=float)
m += np.diag((-h-2)*np.ones(n), k=0)
m += np.diag(np.ones(n-1), k=-1)
m += np.diag((1+h)*np.ones(n-1), k=1)
return m
plt.figure(figsize=(10,7))
# Para distintas precisiones
for n in [20]:
# Calcular tamaño del intervalo
h = (np.pi - 0.0)/(n + 1.0)
t = np.arange(0, np.pi+h, h)
# Armar vector de coeficientes
b = np.zeros((n))
b[0] = 2.0*(h**2)*np.cos(t[1]) - y0
b[-1] = 2.0*(h**2)*np.cos(t[-2]) - (1+h)*yM
b[1:-1] = 2.0*(h**2)*np.cos(t[2:-2])
# Resolver el sistema A*w = b
# A es la matriz de diferencias finitas
# w el vector de aproximaciones finitas
A = DiffMatrix(n, h)
w = np.concatenate(([y0], np.linalg.solve(A, b), [yM]))
# Plot aproximación
plt.plot(t, w, 'o--',lw=2, label="$n ="+str(n)+"$")
# Plots
plt.plot(tt, yy, 'm', lw=2, label="$y(t)$")
plt.legend(loc='best', fontsize=16)
plt.xlabel("$t$", fontsize=16)
plt.show()
ya = 1
yb = 4
n_iter = 20
# Matriz de diferencias finitas que depende de n
def jacobian(n, w, h):
m = np.zeros((n,n), dtype=float)
np.fill_diagonal(m, 2.0*h**2.0 * w -2-h**2)
dix, diy = np.diag_indices(n)
dix = dix[:-1]
diy = diy[:-1] + 1
m[(dix, diy)] = 1
dix = dix + 1
diy = diy - 1
m[(dix, diy)] = 1
return m
# Función con el sistema de ecuaciones a la cual le encontraremos el cero
def f(n, w, h):
y = np.zeros((n))
y[0] = ya - (2+h**2)*w[0] + h**2*w[0]**2 + w[1]
y[n-1] = w[n-2] - (2+h**2)*w[n-1] + h**2*w[n-1]**2 + yb
for i in range(1, n-1):
y[i] = w[i-1] - (2+h**2)*w[i] + h**2*w[i]**2 + w[i+1]
return y
plt.figure(figsize=(10,7))
# Probar distintos intervalos
for n in [2, 4, 40]:
h = 1.0 / (n + 1)
t = np.linspace(0, 1, n+2)
w = np.zeros((n))
for i in range(n_iter):
# wk+1 = wk - delta w
w = w - np.linalg.solve(jacobian(n, w, h), f(n, w, h))
plt.plot(t, np.concatenate([[ya],w,[yb]]), 'o-', label="$n = "+str(n)+"$")
plt.legend(loc='best', fontsize=16)
plt.xlabel("$t$", fontsize=20)
plt.ylabel("$y(t)$", fontsize=20)
plt.show()
xx = np.linspace(0, 1, 200)
# Solución Analítica
def y(x):
dem = (1.+np.exp(2.))*(1+4*np.pi**2.)
num = np.exp(-x)*(3*(1+4*np.pi**2)*np.exp(x)*x +3*(1+4*np.pi**2)*np.exp(x+2)*x
-2*(1+6*np.pi**2)*np.exp(2*x+1)-2*(1+4*np.pi**2)*np.exp(2*x)
-(1+np.exp(2))*np.cos(2*np.pi*x) + np.exp(2)*(2+8*np.pi**2)
-2*np.e*(1+6*np.pi**2))
return num / dem
yy = y(xx)
# Matriz de derivacion
def DiffMatrix(n, h):
D = np.diag((-2-h**2)*np.ones(n),0) + np.diag(np.ones(n-1),-1) + np.diag(np.ones(n-1),1)
D[0][0] = -1.-h**2.
return D
n = 30
h = 1/(n+1)
x = np.linspace(0, 1, n+2)
b = (h**2)*(-3*x[1:-1]+np.cos(2*np.pi*x[1:-1]))
D = DiffMatrix(n, h)
b[0] += h
w = np.linalg.solve(D, b)
w = np.concatenate([[w[0]-h], w, [0]])
plt.figure(figsize=(10,7))
plt.plot(xx, yy, lw=2, label="$y(x)$")
plt.plot(x, w, 'ro-', lw=1.5)
plt.legend(loc='best', fontsize=16)
plt.xlabel("$x$", fontsize=16)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Los errores de éste método son dos principalmente
Step2: A continuación otro ejemplo de BVP, esta vez note que hay involucrada una función explícitamente dependiente del tiempo. Basta con evaluarla en la grilla de tiempo según sea necesario, la consecuencia directa es que el vector que solía contener sólo condiciones de borde ahora tendrá estos valores asociados a $f(t)$.
Step3: 2. Problemas de Valor de Frontera No Lineales
Step4: 3. Condiciones de Borde
|
12,137
|
<ASSISTANT_TASK:>
Python Code:
import pywt
from matplotlib import pyplot
%matplotlib inline
import numpy
from PIL import Image
import urllib.request
import io
import torch
URL = 'https://upload.wikimedia.org/wikipedia/commons/thumb/b/bc/Zuse-Z4-Totale_deutsches-museum.jpg/315px-Zuse-Z4-Totale_deutsches-museum.jpg'
print(pywt.families())
w=pywt.Wavelet('bior2.2')
pyplot.plot(w.dec_hi[::-1], label="dec hi")
pyplot.plot(w.dec_lo[::-1], label="dec lo")
pyplot.plot(w.rec_hi, label="rec hi")
pyplot.plot(w.rec_lo, label="rec lo")
pyplot.title("Bior 2.2 Wavelets")
pyplot.legend()
dec_hi = torch.tensor(w.dec_hi[::-1])
dec_lo = torch.tensor(w.dec_lo[::-1])
rec_hi = torch.tensor(w.rec_hi)
rec_lo = torch.tensor(w.rec_lo)
imgraw = Image.open(io.BytesIO(urllib.request.urlopen(URL).read())).resize((256,256))
img = numpy.array(imgraw).mean(2)/255
img = torch.from_numpy(img).float()
pyplot.figure()
pyplot.imshow(img, cmap=pyplot.cm.gray)
filters = torch.stack([dec_lo.unsqueeze(0)*dec_lo.unsqueeze(1),
dec_lo.unsqueeze(0)*dec_hi.unsqueeze(1),
dec_hi.unsqueeze(0)*dec_lo.unsqueeze(1),
dec_hi.unsqueeze(0)*dec_hi.unsqueeze(1)], dim=0)
inv_filters = torch.stack([rec_lo.unsqueeze(0)*rec_lo.unsqueeze(1),
rec_lo.unsqueeze(0)*rec_hi.unsqueeze(1),
rec_hi.unsqueeze(0)*rec_lo.unsqueeze(1),
rec_hi.unsqueeze(0)*rec_hi.unsqueeze(1)], dim=0)
def wt(vimg, levels=1):
h = vimg.size(2)
w = vimg.size(3)
padded = torch.nn.functional.pad(vimg,(2,2,2,2))
res = torch.nn.functional.conv2d(padded, filters[:,None],stride=2)
if levels>1:
res[:,:1] = wt(res[:,:1],levels-1)
res = res.view(-1,2,h//2,w//2).transpose(1,2).contiguous().view(-1,1,h,w)
return res
def iwt(vres, levels=1):
h = vres.size(2)
w = vres.size(3)
res = vres.view(-1,h//2,2,w//2).transpose(1,2).contiguous().view(-1,4,h//2,w//2).clone()
if levels>1:
res[:,:1] = iwt(res[:,:1], levels=levels-1)
res = torch.nn.functional.conv_transpose2d(res, inv_filters[:,None],stride=2)
res = res[:,:,2:-2,2:-2]
return res
vimg = img[None,None]
res = wt(vimg,4)
pyplot.figure()
pyplot.imshow(res[0,0].data.numpy(),cmap=pyplot.cm.gray)
rec = iwt(res, levels=4)
pyplot.imshow(rec[0,0].data.numpy(),cmap=pyplot.cm.gray)
pyplot.imshow((rec-vimg).data[0,0].numpy(), cmap=pyplot.cm.gray)
pyplot.colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us see what wavelets are available
Step2: For this demo we will use the Biorthogonal 2.2 Wavelets. As we will not properly deal with boundaries, this is a compromise between not using the (almost trivial) Haar wavelet and using more elaborate but larger wavelets.
Step3: Let us have a black and white picture
Step4: We define the tensor product filter banks, i.e. we multiply filters for the two coordinates.
Step5: We can now define the wavelet transform and its inverse using pytorch conv2d and conv_transpose2d.
Step6: Similar, we do the reconstruction (Inverse Wavelet Transform) using the conv_transpose2d function. We drop the excess coefficients.
Step7: We can do this on our image. First the decomposition
Step8: And then the reconstruction.
Step9: We can see where the reconstruction errors are
|
12,138
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
%matplotlib inline
from qinfer import ScoreMixin, SimplePrecessionModel, RandomizedBenchmarkingModel
import numpy as np
import matplotlib.pyplot as plt
try:
plt.style.use('ggplot')
except:
pass
class NumericalSimplePrecessionModel(ScoreMixin, SimplePrecessionModel):
pass
analytic_model = SimplePrecessionModel()
numerical_model = NumericalSimplePrecessionModel()
expparams = np.linspace(1, 10, 50)
modelparams = np.linspace(.1,1,50)[:, np.newaxis]
analytic_score = analytic_model.score(np.array([0],dtype=int),modelparams, expparams)[0,0,...]
print(analytic_score.shape)
numerical_score = numerical_model.score(np.array([0],dtype=int),modelparams, expparams)[0,0,...]
print(numerical_score.shape)
plt.subplot(1,2,1)
plt.imshow(analytic_score)
plt.subplot(1,2,2)
plt.imshow(numerical_score)
analytic_fisher_info = analytic_model.fisher_information(modelparams, expparams)[0,0,...]
numerical_fisher_info = numerical_model.fisher_information(modelparams, expparams)[0,0,...]
plt.subplot(1,2,1)
plt.imshow(analytic_fisher_info)
plt.subplot(1,2,2)
plt.imshow(numerical_fisher_info)
class NumericalRandomizedBenchmarkingModel(ScoreMixin, RandomizedBenchmarkingModel):
pass
analytic_model = RandomizedBenchmarkingModel()
numerical_model = NumericalRandomizedBenchmarkingModel()
expparams = np.empty((150,), dtype=analytic_model.expparams_dtype)
expparams['m'] = np.arange(1, 151)
modelparams = np.empty((500, 3))
modelparams[:, 0] = np.linspace(0.1, 0.999, 500)
modelparams[:, 1] = 0.5
modelparams[:, 2] = 0.5
afi = analytic_model.fisher_information(modelparams, expparams)
assert afi.shape == (3, 3, modelparams.shape[0], expparams.shape[0])
nfi = numerical_model.fisher_information(modelparams, expparams)
assert nfi.shape == (3, 3, modelparams.shape[0], expparams.shape[0])
np.linalg.norm(afi - nfi) / np.linalg.norm(afi)
def tr_inv(arr):
try:
return np.trace(np.linalg.inv(arr.reshape(3, 3)))
except LinAlgError:
return float('inf')
def crb(fi):
return np.apply_along_axis(tr_inv, 0, np.sum(fi.reshape((9, modelparams.shape[0], expparams.shape[0])), axis=-1))
plt.figure(figsize=(15, 6))
for idx, fi in enumerate([afi, nfi]):
plt.subplot(1,2, 1 + idx)
plt.semilogy(modelparams[:, 0], crb(fi))
plt.ylabel(r'$\operatorname{Tr}\left(\left(\sum_m F(p, m)\right)^{-1}\right)$')
plt.xlabel('$p$')
%timeit analytic_model.fisher_information(modelparams, expparams)
%timeit numerical_model.fisher_information(modelparams, expparams)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple Precession Model Test
Step2: We verify that both models compute the same score by plotting the score for a range of experiment and model parameters. Since this is a single-parameter model, the score is a scalar.
Step3: Next, we verify that both models give the same Fisher information.
Step4: Randomized Benchmarking Model
Step5: We now make experiment and parameters to test with.
Step6: Let's make sure that the returned Fisher information has the right shape. Note that the Fisher information is a four-index tensor here, with the two indices for the information matrix itself, plus two indices that vary over the input model parameters and experiment parameters.
Step7: We check that each Fisher information matrix has errors that are small compared to the analytic FI alone.
Step8: Next, we plot the trace-inverse of each to check that we get the same Cramer-Rao bounds.
Step9: Finally, we note that the numerical FI calculations are not much slower than the analytic calculations.
|
12,139
|
<ASSISTANT_TASK:>
Python Code:
import SimpleITK as sitk
import registration_utilities as ru
import registration_callbacks as rc
from __future__ import print_function
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact, fixed
#utility method that either downloads data from the MIDAS repository or
#if already downloaded returns the file name for reading from disk (cached data)
from downloaddata import fetch_data as fdata
%run popi_utilities_setup.py
images = []
masks = []
points = []
for i in range(0,10):
image_file_name = 'POPI/meta/{0}0-P.mhd'.format(i)
mask_file_name = 'POPI/masks/{0}0-air-body-lungs.mhd'.format(i)
points_file_name = 'POPI/landmarks/{0}0-Landmarks.pts'.format(i)
images.append(sitk.ReadImage(fdata(image_file_name), sitk.sitkFloat32)) #read and cast to format required for registration
masks.append(sitk.ReadImage(fdata(mask_file_name)))
points.append(read_POPI_points(fdata(points_file_name)))
interact(display_coronal_with_overlay, temporal_slice=(0,len(images)-1),
coronal_slice = (0, images[0].GetSize()[1]-1),
images = fixed(images), masks = fixed(masks),
label=fixed(lung_label), window_min = fixed(-1024), window_max=fixed(976));
label_shape_statistics_filter = sitk.LabelShapeStatisticsImageFilter()
for i, mask in enumerate(masks):
label_shape_statistics_filter.Execute(mask)
print('Lung volume in image {0} is {1} liters.'.format(i,0.000001*label_shape_statistics_filter.GetPhysicalSize(lung_label)))
def bspline_intra_modal_registration(fixed_image, moving_image, fixed_image_mask=None, fixed_points=None, moving_points=None):
registration_method = sitk.ImageRegistrationMethod()
# Determine the number of Bspline control points using the physical spacing we want for the control grid.
grid_physical_spacing = [50.0, 50.0, 50.0] # A control point every 50mm
image_physical_size = [size*spacing for size,spacing in zip(fixed_image.GetSize(), fixed_image.GetSpacing())]
mesh_size = [int(image_size/grid_spacing + 0.5) \
for image_size,grid_spacing in zip(image_physical_size,grid_physical_spacing)]
initial_transform = sitk.BSplineTransformInitializer(image1 = fixed_image,
transformDomainMeshSize = mesh_size, order=3)
registration_method.SetInitialTransform(initial_transform)
registration_method.SetMetricAsMeanSquares()
# Settings for metric sampling, usage of a mask is optional. When given a mask the sample points will be
# generated inside that region. Also, this implicitly speeds things up as the mask is smaller than the
# whole image.
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
if fixed_image_mask:
registration_method.SetMetricFixedMask(fixed_image_mask)
# Multi-resolution framework.
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsLBFGSB(gradientConvergenceTolerance=1e-5, numberOfIterations=100)
# If corresponding points in the fixed and moving image are given then we display the similarity metric
# and the TRE during the registration.
if fixed_points and moving_points:
registration_method.AddCommand(sitk.sitkStartEvent, rc.metric_and_reference_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, rc.metric_and_reference_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: rc.metric_and_reference_plot_values(registration_method, fixed_points, moving_points))
return registration_method.Execute(fixed_image, moving_image)
#%%timeit -r1 -n1
# Select the fixed and moving images, valid entries are in [0,9].
fixed_image_index = 0
moving_image_index = 7
tx = bspline_intra_modal_registration(fixed_image = images[fixed_image_index],
moving_image = images[moving_image_index],
fixed_image_mask = (masks[fixed_image_index] == lung_label),
fixed_points = points[fixed_image_index],
moving_points = points[moving_image_index]
)
initial_errors_mean, initial_errors_std, _, initial_errors_max, initial_errors = ru.registration_errors(sitk.Euler3DTransform(), points[fixed_image_index], points[moving_image_index])
final_errors_mean, final_errors_std, _, final_errors_max, final_errors = ru.registration_errors(tx, points[fixed_image_index], points[moving_image_index])
plt.hist(initial_errors, bins=20, alpha=0.5, label='before registration', color='blue')
plt.hist(final_errors, bins=20, alpha=0.5, label='after registration', color='green')
plt.legend()
plt.title('TRE histogram');
print('Initial alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(initial_errors_mean, initial_errors_std, initial_errors_max))
print('Final alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(final_errors_mean, final_errors_std, final_errors_max))
# Transfer the segmentation via the estimated transformation. Use Nearest Neighbor interpolation to retain the labels.
transformed_labels = sitk.Resample(masks[moving_image_index],
images[fixed_image_index],
tx,
sitk.sitkNearestNeighbor,
0.0,
masks[moving_image_index].GetPixelIDValue())
segmentations_before_and_after = [masks[moving_image_index], transformed_labels]
interact(display_coronal_with_label_maps_overlay, coronal_slice = (0, images[0].GetSize()[1]-1),
mask_index=(0,len(segmentations_before_and_after)-1),
image = fixed(images[fixed_image_index]), masks = fixed(segmentations_before_and_after),
label=fixed(lung_label), window_min = fixed(-1024), window_max=fixed(976));
# Compute the Dice coefficient and Hausdorf distance between the segmentations before, and after registration.
ground_truth = masks[fixed_image_index] == lung_label
before_registration = masks[moving_image_index] == lung_label
after_registration = transformed_labels == lung_label
label_overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
label_overlap_measures_filter.Execute(ground_truth, before_registration)
print("Dice coefficient before registration: {:.2f}".format(label_overlap_measures_filter.GetDiceCoefficient()))
label_overlap_measures_filter.Execute(ground_truth, after_registration)
print("Dice coefficient after registration: {:.2f}".format(label_overlap_measures_filter.GetDiceCoefficient()))
hausdorff_distance_image_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_image_filter.Execute(ground_truth, before_registration)
print("Hausdorff distance before registration: {:.2f}".format(hausdorff_distance_image_filter.GetHausdorffDistance()))
hausdorff_distance_image_filter.Execute(ground_truth, after_registration)
print("Hausdorff distance after registration: {:.2f}".format(hausdorff_distance_image_filter.GetHausdorffDistance()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utilities
Step2: Loading Data
Step3: Geting to know your data
Step4: Free Form Deformation
Step5: Perform Registration
Step6: Another option for evaluating the registration is to use segmentation. In this case, we transfer the segmentation from one image to the other and compare the overlaps, both visually, and quantitatively.
|
12,140
|
<ASSISTANT_TASK:>
Python Code:
# # TODO: add putty connection too.
# #read SSH connection parameters
# with open('ssh_settings.json') as settings_file:
# settings = json.load(settings_file)
# hostname = settings['hostname']
# username = settings['username']
# password = settings['password']
# local_key_dir = settings['local_key_dir']
# census_dir = 'synthetic_population/'
# Remote directory with census data
# results_dir = 'craigslist_census/'
# Remote directory for results
# # estbalish SSH connection
# ssh = paramiko.SSHClient()
# ssh.load_host_keys(local_key_dir)
# ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# ssh.connect(hostname,username=username, password=password)
# sftp = ssh.open_sftp()
# def read_listings_file(fname):
# Read csv file via SFTP and return as dataframe.
# with sftp.open(os.path.join(listings_dir,fname)) as f:
# df = pd.read_csv(f, delimiter=',', dtype={'fips_block':str,'state':str,'mpo_id':str}, date_parser=['date'])
# # TODO: parse dates.
# return df
def log_var(x):
Return log of x, but NaN if zero.
if x==0:
return np.nan
else:
return np.log(x)
def create_census_vars(df):
Make meaningful variables and return the dataframe.
df['pct_white'] = df['race_of_head_1']/df['hhs_tot']
df['pct_black'] = df['race_of_head_2']/df['hhs_tot']
df['pct_amer_native'] = df['race_of_head_3']/df['hhs_tot']
df['pct_alaska_native'] = df['race_of_head_4']/df['hhs_tot']
df['pct_any_native'] = df['race_of_head_5']/df['hhs_tot']
df['pct_asian'] = df['race_of_head_6']/df['hhs_tot']
df['pct_pacific'] = df['race_of_head_7']/df['hhs_tot']
df['pct_other_race'] = df['race_of_head_8']/df['hhs_tot']
df['pct_mixed_race'] = df['race_of_head_9']/df['hhs_tot']
df['pct_mover'] = df['recent_mover_1']/df['hhs_tot']
df['pct_owner'] = df['tenure_1']/df['hhs_tot']
df['avg_hh_size'] = df['persons_tot']/df['hhs_tot']
df['cars_per_hh'] = df['cars_tot']/df['hhs_tot']
df['ln_rent'] = df['rent'].apply(log_var)
df['ln_income'] = df.income_med.apply(log_var)
return df
def filter_outliers(df, rent_range=(100,10000),sqft_range=(10,5000)):
Drop outliers from listings dataframe. For now, only need to filter out rent and sq ft.
Args:
df: Dataframe with listings. Cols names include ['rent','sqft']
rent_range (tuple): min and max rent
sqft_range (tuple): min and max sqft
Returns:
DataFrame: listings data without outliers.
n0=len(df)
df=df[(df.rent>=rent_range[0])&(df.rent<rent_range[1])]
n1=len(df)
print('Dropped {} outside rent range ${}-${}'.format(n0-n1,rent_range[0],rent_range[1]))
df=df[(df.sqft>=sqft_range[0])&(df.sqft<sqft_range[1])]
n2=len(df)
print('Dropped {} outside sqft range {}-{} sqft. {} rows remaining'.format(n1-n2,sqft_range[0],sqft_range[1],len(df)))
return(df)
# get list of files and load.
# for remotely stored data by state (just do one state for now)
state='CA'
infile='cl_census_{}.csv'.format(state)
#data = read_listings_file(infile) # uncomment to get remote data.
# for local data:
data_dir = '../data/'
data_file = r'..\data\sfbay_listings_03172017.csv'
data = pd.read_csv(os.path.join(data_file),parse_dates=[1],dtype={'listing_id':str, 'rent':float, 'bedrooms':float, 'bathrooms':float, 'sqft':float,
'rent_sqft':float, 'fips_block':str, 'state':str, 'region':str, 'mpo_id':str, 'lng':float, 'lat':float,
'cars_tot':float, 'children_tot':float, 'persons_tot':float, 'workers_tot':float,
'age_of_head_med':float, 'income_med':float, 'hhs_tot':float, 'race_of_head_1':float,
'race_of_head_2':float, 'race_of_head_3':float, 'race_of_head_4':float, 'race_of_head_5':float,
'race_of_head_6':float, 'race_of_head_7':float, 'race_of_head_8':float, 'race_of_head_9':float,
'recent_mover_0':float, 'recent_mover_1':float, 'tenure_1':float, 'tenure_2':float})
print(len(data))
data.head()
# for census vars, NA really means 0...
census_cols = ['cars_tot', 'children_tot','persons_tot', 'workers_tot', 'age_of_head_med', 'income_med','hhs_tot', 'race_of_head_1', 'race_of_head_2', 'race_of_head_3','race_of_head_4', 'race_of_head_5', 'race_of_head_6', 'race_of_head_7','race_of_head_8', 'race_of_head_9', 'recent_mover_0', 'recent_mover_1','tenure_1', 'tenure_2']
for col in census_cols:
data[col] = data[col].fillna(0)
# create useful variables
data = create_census_vars(data)
# define some feature to include in the model.
features_to_examine = ['rent','ln_rent', 'bedrooms','bathrooms','sqft','pct_white', 'pct_black','pct_asian','pct_mover','pct_owner','income_med','age_of_head_med','avg_hh_size','cars_per_hh']
data[features_to_examine].describe()
# I've already identified these ranges as good at exluding outliers
rent_range=(100,10000)
sqft_range=(10,5000)
data = filter_outliers(data, rent_range=rent_range, sqft_range=sqft_range)
# Use this to explore outliers yourself.
g=sns.distplot(data['rent'], kde=False)
g.set_xlim(0,10000)
g=sns.distplot(data['sqft'], kde=False)
g.set_xlim(0,10000)
# examine NA's
print('Total rows:',len(data))
print('Rows with any NA:',len(data[pd.isnull(data).any(axis=1)]))
print('Rows with bathroom NA:',len(data[pd.isnull(data.bathrooms)]))
print('% rows missing bathroom col:',len(data[pd.isnull(data.bathrooms)])/len(data))
#for d in range(1,31):
# print(d,'% rows missing bathroom col:',len(data[pd.isnull(data.bathrooms)&((data.date.dt.month==12)&(data.date.dt.day==d))])/len(data[(data.date.dt.month==12)&(data.date.dt.day==d)]))
# uncommon to only use data after Dec 21.
#data=data[(data.date.dt.month>=12)&(data.date.dt.day>=22)]
#data.shape
# Uncomment to drop NA's
#data = data.dropna()
#print('Dropped {} rows with NAs'.format(n0-len(data)))
p=sns.distplot(data.rent, kde=False)
p.set_title('rent')
p=sns.distplot(data.ln_rent, kde=False)
p.set_title('ln rent')
plot_rows = math.ceil(len(features_to_examine)/2)
f, axes = plt.subplots(plot_rows,2, figsize=(8,15))
sns.despine(left=True)
for i,col in enumerate(features_to_examine):
row_position = math.floor(i/2)
col_position = i%2
data_notnull = data[pd.notnull(data[col])] # exclude NA values from plot
sns.distplot(data_notnull[col], ax=axes[row_position, col_position],kde=False)
axes[row_position, col_position].set_title('{}'.format(col))
plt.tight_layout()
plt.show()
data_notnull = data[pd.notnull(data['ln_income'])]
p=sns.distplot(data_notnull['ln_income'],kde=False)
p.set_title('ln med income')
# ln med income is not more normal.. use med income instead.
# correlation heatmap
corrmat=data[features_to_examine].corr()
corrmat.head()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True)
f.tight_layout()
print(data.columns)
#'pct_amer_native','pct_alaska_native',
x_cols = ['bedrooms','bathrooms', 'sqft','age_of_head_med', 'income_med','pct_white', 'pct_black', 'pct_any_native', 'pct_asian', 'pct_pacific',
'pct_other_race', 'pct_mixed_race', 'pct_mover', 'pct_owner', 'avg_hh_size', 'cars_per_hh']
y_col = 'ln_rent'
print(len(data))
# exclude missing values
data_notnull= data[(pd.notnull(data[x_cols])).all(axis=1)]
data_notnull= data_notnull[(pd.notnull(data_notnull[y_col]))]
print('using {} rows of {} total'.format(len(data_notnull),len(data)))
data_notnull.head()
data_notnull.to_csv(r'..\data\sfbay_listings_03172017_notnull.csv',index=False)
from sklearn import linear_model, cross_validation
# create training and testing datasets.
# this creates a test set that is 30% of total obs.
X_train, X_test, y_train, y_test = cross_validation.train_test_split(data_notnull[x_cols],data_notnull[y_col], test_size = .3, random_state = 201)
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
# Intercept
print('Intercept:', regr.intercept_)
# The coefficients
print('Coefficients:')
pd.Series(regr.coef_, index=x_cols)
# See mean square error, using test data
print("Mean squared error: %.2f" % np.mean((regr.predict(X_test) - y_test) ** 2))
print("RMSE:", np.sqrt(np.mean((regr.predict(X_test) - y_test) ** 2)))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(X_test, y_test))
# Plot predicted values vs. observed
plt.scatter(regr.predict(X_train),y_train, color='blue',s=1, alpha=.5)
plt.show()
# plot residuals vs predicted values
plt.scatter(regr.predict(X_train), regr.predict(X_train)- y_train, color='blue',s=1, alpha=.5)
plt.scatter(regr.predict(X_test), regr.predict(X_test)- y_test, color='green',s=1, alpha=.5)
plt.show()
print("Training set. Mean squared error: %.5f" % np.mean((regr.predict(X_train) - y_train) ** 2), '| Variance score: %.5f' % regr.score(X_train, y_train))
print("Test set. Mean squared error: %.5f" % np.mean((regr.predict(X_test) - y_test) ** 2), '| Variance score: %.5f' % regr.score(X_test, y_test))
from sklearn.linear_model import Ridge
# try a range of different regularization terms.
for a in [10,1,0.1,.01,.001,.00001]:
ridgereg = Ridge(alpha=a)
ridgereg.fit(X_train, y_train)
print('\n alpha:',a)
print("Mean squared error: %.5f" % np.mean((ridgereg.predict(X_test) - y_test) ** 2),'| Variance score: %.5f' % ridgereg.score(X_test, y_test))
# Intercept
print('Intercept:', ridgereg.intercept_)
# The coefficients
print('Coefficients:')
pd.Series(ridgereg.coef_, index=x_cols)
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
def RMSE(y_actual, y_predicted):
return np.sqrt(mean_squared_error(y_actual, y_predicted))
def cross_val_rf(X, y,max_f='auto', n_trees = 50, cv_method='kfold', k=5):
Estimate a random forest model using cross-validation and return the average error across the folds.
Args:
X (DataFrame): features data
y (Series): target data
max_f (str or int): how to select max features to consider for the best split.
If “auto”, then max_features=n_features.
If “sqrt”, then max_features=sqrt(n_features)
If “log2”, then max_features=log2(n_features)
If int, then consider max_features features at each split
n_trees (number of trees to build)
cv_method (str): how to split the data ('kfold' (default) or 'timeseries')
k (int): number of folds (default=5)
Returns:
float: mean error (RMSE) across all training/test sets.
if cv_method == 'kfold':
kf = KFold(n_splits=k, shuffle=True, random_state=2012016) # use random seed for reproducibility.
E = np.ones(k) # this array will hold the errors.
i=0
for train, test in kf.split(X, y):
train_data_x = X.iloc[train]
train_data_y = y.iloc[train]
test_data_x = X.iloc[test]
test_data_y = y.iloc[test]
# n_estimators is number of trees to build.
# max_features = 'auto' means the max_features = n_features. This is a parameter we should tune.
random_forest = RandomForestRegressor(n_estimators=n_trees, max_features=max_f, criterion='mse', max_depth=None)
random_forest.fit(train_data_x,train_data_y)
predict_y=random_forest.predict(test_data_x)
E[i] = RMSE(test_data_y, predict_y)
i+=1
return np.mean(E)
def optimize_rf(df_X, df_y, max_n_trees=100, n_step = 20, cv_method='kfold', k=5):
Optimize hyperparameters for a random forest regressor.
Args:
df_X (DataFrame): features data
df_y (Series): target data
max_n_trees (int): max number of trees to generate
n_step (int): intervals to use for max_n_trees
cv_method (str): how to split the data ('kfold' (default) or 'timeseries')
k (int): number of folds (default=5)
max_features_methods = ['auto','sqrt','log2'] # methods of defining max_features to try.
# create a place to store the results, for easy plotting later.
results = pd.DataFrame(columns=max_features_methods, index=[x for x in range(10,max_n_trees+n_step,n_step)])
for m in max_features_methods:
print('max_features:',m)
for n in results.index:
error = cross_val_rf(df_X, df_y,max_f=m, n_trees=n)
print('n_trees:',n,' error:',error)
results.ix[n,m] = error
return results
# data to use - exclude nulls
df_X = data_notnull[x_cols]
df_y = data_notnull[y_col]
print(df_X.shape, df_y.shape)
#df_all = pd.concat([data_notnull[x_cols],data_notnull[y_col]], axis=1)
#df_all.shape
# basic model to make sure it workds
random_forest = RandomForestRegressor(n_estimators=10, criterion='mse', max_depth=None)
random_forest.fit(df_X,df_y)
y_predict = random_forest.predict(df_X)
RMSE(df_y,y_predict)
# without parameter tuning
cross_val_rf(df_X,df_y)
# tune the parameters
rf_results = optimize_rf(df_X,df_y, max_n_trees = 100, n_step = 20) # this is sufficient; very little improvement after n_trees=100.
#rf_results2 = optimize_rf(df_X,df_y, max_n_trees = 500, n_step=100)
rf_results
ax = rf_results.plot()
ax.set_xlabel('number of trees')
ax.set_ylabel('RMSE')
#rf_results2.plot()
random_forest = RandomForestRegressor(n_estimators=100, max_features='sqrt', criterion='mse', max_depth=None)
random_forest.fit(df_X,df_y)
predict_y=random_forest.predict(df_X)
# plot the importances
rf_o = pd.DataFrame({'features':x_cols,'importance':random_forest.feature_importances_})
rf_o= rf_o.sort_values(by='importance',ascending=False)
plt.figure(1,figsize=(12, 6))
plt.xticks(range(len(rf_o)), rf_o.features,rotation=45)
plt.plot(range(len(rf_o)),rf_o.importance,"o")
plt.title('Feature importances')
plt.show()
from sklearn.model_selection import TimeSeriesSplit
tscv = TimeSeriesSplit(n_splits=5)
from sklearn.ensemble import GradientBoostingRegressor
def cross_val_gb(X,y,cv_method='kfold',k=5, **params):
Estimate gradient boosting regressor using cross validation.
Args:
X (DataFrame): features data
y (Series): target data
cv_method (str): how to split the data ('kfold' (default) or 'timeseries')
k (int): number of folds (default=5)
**params: keyword arguments for regressor
Returns:
float: mean error (RMSE) across all training/test sets.
if cv_method == 'kfold':
kf = KFold(n_splits=k, shuffle=True, random_state=2012016) # use random seed for reproducibility.
E = np.ones(k) # this array will hold the errors.
i=0
for train, test in kf.split(X, y):
train_data_x = X.iloc[train]
train_data_y = y.iloc[train]
test_data_x = X.iloc[test]
test_data_y = y.iloc[test]
# n_estimators is number of trees to build.
grad_boost = GradientBoostingRegressor(loss='ls',criterion='mse', **params)
grad_boost.fit(train_data_x,train_data_y)
predict_y=grad_boost.predict(test_data_x)
E[i] = RMSE(test_data_y, predict_y)
i+=1
return np.mean(E)
params = {'n_estimators':100,
'learning_rate':0.1,
'max_depth':1,
'min_samples_leaf':4
}
grad_boost = GradientBoostingRegressor(loss='ls',criterion='mse', **params)
grad_boost.fit(df_X,df_y)
RMSE(y_test, grad_boost.predict(X_test))
n_trees = 100
l_rate = 0.1
max_d = 1
cross_val_gb(df_X,df_y, l_rate,max_d)
from sklearn.model_selection import GridSearchCV
param_grid = {'learning_rate':[.1, .05, .02, .01],
'max_depth':[2,4,6],
'min_samples_leaf': [3,5,9,17],
'max_features': [1, .3, .1]
}
est= GradientBoostingRegressor(n_estimators = 1000)
gs_cv = GridSearchCV(est,param_grid).fit(df_X,df_y)
print(gs_cv.best_params_)
print(gs_cv.best_score_)
# best parameters
params = {'n_estimators':1000,
'learning_rate':0.05,
'max_depth':6,
'min_samples_leaf':3
}
grad_boost = GradientBoostingRegressor(loss='ls',criterion='mse', **params)
grad_boost.fit(df_X,df_y)
RMSE(y_test, grad_boost.predict(X_test))
# plot the importances
gb_o = pd.DataFrame({'features':x_cols,'importance':grad_boost.feature_importances_})
gb_o= gb_o.sort_values(by='importance',ascending=False)
plt.figure(1,figsize=(12, 6))
plt.xticks(range(len(gb_o)), gb_o.features,rotation=45)
plt.plot(range(len(gb_o)),gb_o.importance,"o")
plt.title('Feature importances')
plt.show()
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
df_X.columns
features = [2, 15, 4, 12,(4,2), (4,15)]
names = df_X.columns
fig, axs = plot_partial_dependence(grad_boost, df_X, features,feature_names=names, grid_resolution=50, figsize = (9,6))
fig.suptitle('Partial dependence of rental price features')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: This notebook explores merged craigslist listings/census data and fits some initial models
Step7: Data Preparation
Step8: create variables
Step9: Filter outliers
Step10: Examine missing data
Step11: uh oh, 74% are missing bathrooms feature. Might have to omit that one. Only 0.02% of rows have other missing values, so that should be ok.
Step12: Bathrooms were added on Dec 21. After that, if bathrooms aren't in the listing, the listing is thrown out. Let's try to find the date when the bathrooms column was added. So if need to use bathrooms feature, can use listings Dec 22 and after.
Step13: Look at distributions
Step14: look at correlations
Step15: The correlations appear as expected, except for cars_per_hh. Maybe this is because cars_per_hh is reflecting the size of the household more than income. Might want to try cars per adult instead..
Step16: Comparison of models
Step17: The residuals look pretty normally distributed.
Step18: Try Ridge Regression (linear regression with regularization )
Step21: As expected, Ridge regression doesn't help much.
Step22: We can use k-fold validation if we believe the samples are independently and identically distributed. That's probably fine right now because we have only 1.5 months of data, but later we may have some time-dependent processes in these timeseries data. If we do use k-fold, I think we should shuffle the samples, because they do not come in a non-random sequence.
Step23: Using m=sqrt(n_features) and log2(n_features) gives similar performance, and a slight improvement over m = n_features. After about 100 trees the error levels off. One of the nice things about random forest is that using additional trees doesn't lead to overfitting, so we could use more, but it's not necessary. Now we can fit the model using n_trees = 100 and m = sqrt.
Step24: The 'importance' score provides an ordered qualitative ranking of the importance of each feature. It is calculated from the improvement in MSE provided by each feature when it is used to split the tree.
Step25: It's not surprising sqft is the most important predictor, although it is strange cars_per_hh is the second most important. I would have expected incometo be higher in the list.
Step27: Try Boosted Forest
Step28: tune parameters
Step29: Wow, that's a big improvement on the random forest model!
Step30: Let's use partial_dependence to look at feature interactions. Look at the four most important features.
|
12,141
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
medians_df = pd.read_csv('assets/median_age.csv')
medians_df.head()
medians_df.loc[[0, 1, 2, 5], 'County']
medians_df.iloc[10:15, :4]
medians_df[['Median_age', 'Avg_MonthlyIncome']].head()
from arcgis.features import GeoAccessor, GeoSeriesAccessor
counties_fc_path = r'C:\Users\jdadams\AppData\Roaming\Esri\ArcGISPro\Favorites\opensgid.agrc.utah.gov.sde\opensgid.boundaries.county_boundaries'
counties_df = pd.DataFrame.spatial.from_featureclass(counties_fc_path)
counties_df.head()
counties_df.loc[counties_df['stateplane'] == 'Central', ['name', 'stateplane', 'fips_str']]
counties_df['emperor'] = 'Jake'
counties_df.head()
counties_df.groupby('stateplane').count()
counties_df['acres'] = counties_df['SHAPE'].apply(lambda shape: shape.area / 4046.8564)
counties_df.groupby('stateplane')['acres'].sum()
counties_df.loc[(counties_df['pop_lastcensus'] < 100000) & (counties_df['stateplane'] == 'North'), 'emperor'] = 'Erik'
counties_df[['name', 'pop_lastcensus', 'stateplane', 'emperor']].sort_values('name').head()
census_fc_path = r'C:\Users\jdadams\AppData\Roaming\Esri\ArcGISPro\Favorites\opensgid.agrc.utah.gov.sde\opensgid.demographic.census_counties_2020'
census_df = pd.DataFrame.spatial.from_featureclass(census_fc_path)
counties_with_census_df = counties_df.merge(census_df[['geoid20', 'aland20']], left_on='fips_str', right_on='geoid20')
counties_with_census_df.head()
renames = {
'name': 'County Name',
'pop_lastcensus': 'Last Census Population',
'emperor': 'Benevolent Dictator for Life',
'acres': 'Acres',
'aland20': 'Land Area',
}
counties_with_census_df.rename(columns=renames, inplace=True)
counties_with_census_df.head()
field_order = [
'County Name',
'Benevolent Dictator for Life',
'Acres',
'Land Area',
'Last Census Population',
'SHAPE'
]
final_counties_df = counties_with_census_df.reindex(columns=field_order)
final_counties_df.head()
final_counties_df.spatial.to_featureclass(r'C:\gis\Projects\HousingInventory\HousingInventory.gdb\counties_ugic')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can access individual rows and columns using .loc (with index labels) or .iloc (with indices)
Step2: We can also get just a few columns from all rows
Step3: Extending pandas Spatially
Step4: pandas lets you work on rows that meet a certain condition
Step5: You can easily add new columns
Step6: pandas provides powerful built in grouping and aggregation tools, along with Spatially Enabled DataFrames' geometry operations
Step7: pandas Solutions to our Arcpy Problems
Step9: Joined Tables Solution
Step10: Renaming/Reordering Fields Solution
Step11: Now that we've got it all looking good, let's reorder the fields and get rid of the ones we don't want
Step12: Intermediate Feature Classes
|
12,142
|
<ASSISTANT_TASK:>
Python Code:
print('Hello IPython World!')
!pip list
result1 = 1+1
result2 = 2*3
result2
result1
print('2nd = ' + str(result2))
print('1st = ' + str(result1))
print('2nd = ' + str(result2))
print('1st = ' + str(result1))
!ls -alh
!ifconfig
# Import NetworkX library, which is already installed in your Docker container
import networkx as nx
# Create a ranom graph with 100 nodes using Barabashi-Albert Model ()
ba=nx.barabasi_albert_graph(100,5)
# Check the performance of a NetworkX function (calculate betweenness centrality) by running 10 times
%timeit -n 10 nx.betweenness_centrality(ba)
%%timeit -n 10
# Or, check performance of the entire cell
ws = nx.watts_strogatz_graph(100,3,0.1)
btw = nx.betweenness_centrality(ws)
%%writefile data/small_network.sif
node1 is_a node2
node2 child_of node3
node3 child_of node1
!cat data/small_network.sif
%%bash
export FOO='Env var 1'
echo $FOO
%%javascript
var foo = function(a) {
return a+1;
};
console.log(foo(2));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run System Command
Step2: Magic!
Step3: Create file manually with %%writefile
Step4: Execute under other interpreters
|
12,143
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import tensorflow as tf
import helper
from tensorflow.examples.tutorials.mnist import input_data
print('Getting MNIST Dataset...')
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print('Data Extracted.')
# Save the shapes of weights for each layer
layer_1_weight_shape = (mnist.train.images.shape[1], 256)
layer_2_weight_shape = (256, 128)
layer_3_weight_shape = (128, mnist.train.labels.shape[1])
all_zero_weights = [
tf.Variable(tf.zeros(layer_1_weight_shape)),
tf.Variable(tf.zeros(layer_2_weight_shape)),
tf.Variable(tf.zeros(layer_3_weight_shape))
]
all_one_weights = [
tf.Variable(tf.ones(layer_1_weight_shape)),
tf.Variable(tf.ones(layer_2_weight_shape)),
tf.Variable(tf.ones(layer_3_weight_shape))
]
helper.compare_init_weights(
mnist,
'All Zeros vs All Ones',
[
(all_zero_weights, 'All Zeros'),
(all_one_weights, 'All Ones')])
helper.hist_dist('Random Uniform (minval=-3, maxval=3)', tf.random_uniform([1000], -3, 3))
# Default for tf.random_uniform is minval=0 and maxval=1
basline_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape)),
tf.Variable(tf.random_uniform(layer_2_weight_shape)),
tf.Variable(tf.random_uniform(layer_3_weight_shape))
]
helper.compare_init_weights(
mnist,
'Baseline',
[(basline_weights, 'tf.random_uniform [0, 1)')])
uniform_neg1to1_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -1, 1)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -1, 1)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -1, 1))
]
helper.compare_init_weights(
mnist,
'[0, 1) vs [-1, 1)',
[
(basline_weights, 'tf.random_uniform [0, 1)'),
(uniform_neg1to1_weights, 'tf.random_uniform [-1, 1)')])
uniform_neg01to01_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.1, 0.1)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.1, 0.1)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.1, 0.1))
]
uniform_neg001to001_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.01, 0.01)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.01, 0.01)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.01, 0.01))
]
uniform_neg0001to0001_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.001, 0.001)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.001, 0.001)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.001, 0.001))
]
helper.compare_init_weights(
mnist,
'[-1, 1) vs [-0.1, 0.1) vs [-0.01, 0.01) vs [-0.001, 0.001)',
[
(uniform_neg1to1_weights, '[-1, 1)'),
(uniform_neg01to01_weights, '[-0.1, 0.1)'),
(uniform_neg001to001_weights, '[-0.01, 0.01)'),
(uniform_neg0001to0001_weights, '[-0.001, 0.001)')],
plot_n_batches=None)
import numpy as np
general_rule_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -1/np.sqrt(layer_1_weight_shape[0]), 1/np.sqrt(layer_1_weight_shape[0]))),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -1/np.sqrt(layer_2_weight_shape[0]), 1/np.sqrt(layer_2_weight_shape[0]))),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -1/np.sqrt(layer_3_weight_shape[0]), 1/np.sqrt(layer_3_weight_shape[0])))
]
helper.compare_init_weights(
mnist,
'[-0.1, 0.1) vs General Rule',
[
(uniform_neg01to01_weights, '[-0.1, 0.1)'),
(general_rule_weights, 'General Rule')],
plot_n_batches=None)
helper.hist_dist('Random Normal (mean=0.0, stddev=1.0)', tf.random_normal([1000]))
normal_01_weights = [
tf.Variable(tf.random_normal(layer_1_weight_shape, stddev=0.1)),
tf.Variable(tf.random_normal(layer_2_weight_shape, stddev=0.1)),
tf.Variable(tf.random_normal(layer_3_weight_shape, stddev=0.1))
]
helper.compare_init_weights(
mnist,
'Uniform [-0.1, 0.1) vs Normal stddev 0.1',
[
(uniform_neg01to01_weights, 'Uniform [-0.1, 0.1)'),
(normal_01_weights, 'Normal stddev 0.1')])
helper.hist_dist('Truncated Normal (mean=0.0, stddev=1.0)', tf.truncated_normal([1000]))
trunc_normal_01_weights = [
tf.Variable(tf.truncated_normal(layer_1_weight_shape, stddev=0.1)),
tf.Variable(tf.truncated_normal(layer_2_weight_shape, stddev=0.1)),
tf.Variable(tf.truncated_normal(layer_3_weight_shape, stddev=0.1))
]
helper.compare_init_weights(
mnist,
'Normal vs Truncated Normal',
[
(normal_01_weights, 'Normal'),
(trunc_normal_01_weights, 'Truncated Normal')])
helper.compare_init_weights(
mnist,
'Baseline vs Truncated Normal',
[
(basline_weights, 'Baseline'),
(trunc_normal_01_weights, 'Truncated Normal')])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Neural Network
Step2: Initialize Weights
Step3: As you can see the accuracy is close to guessing for both zeros and ones, around 10%.
Step4: The histogram used 500 buckets for the 1000 values. Since the chance for any single bucket is the same, there should be around 2 values for each bucket. That's exactly what we see with the histogram. Some buckets have more and some have less, but they trend around 2.
Step5: The loss graph is showing the neural network is learning, which it didn't with all zeros or all ones. We're headed in the right direction.
Step6: We're going in the right direction, the accuracy and loss is better with [-1, 1). We still want smaller weights. How far can we go before it's too small?
Step7: Looks like anything [-0.01, 0.01) or smaller is too small. Let's compare this to our typical rule of using the range $y=1/\sqrt{n}$.
Step8: The range we found and $y=1/\sqrt{n}$ are really close.
Step9: Let's compare the normal distribution against the previous uniform distribution.
Step10: The normal distribution gave a slight increase in accuracy and loss. Let's move closer to 0 and drop picked numbers that are x number of standard deviations away. This distribution is called Truncated Normal Distribution.
Step11: Again, let's compare the previous results with the previous distribution.
Step12: There's no difference between the two, but that's because the neural network we're using is too small. A larger neural network will pick more points on the normal distribution, increasing the likelihood it's choices are larger than 2 standard deviations.
|
12,144
|
<ASSISTANT_TASK:>
Python Code:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'gray'
%matplotlib inline
# input batch size for training (default: 64)
batch_size = 64
# input batch size for testing (default: 1000)
test_batch_size = 1000
# number of epochs to train (default: 10)
epochs = 10
# learning rate (default: 0.01)
lr = 0.01
# SGD momentum (default: 0.5)
momentum = 0.5
# disables CUDA training
no_cuda = True
# random seed (default: 1)
seed = 1
# how many batches to wait before logging training status
log_interval = 10
# Setting seed for reproducibility.
torch.manual_seed(seed)
cuda = not no_cuda and torch.cuda.is_available()
print("CUDA: {}".format(cuda))
if cuda:
torch.cuda.manual_seed(seed)
cudakwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
mnist_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) # Precalcualted values.
])
train_set = datasets.MNIST(
root='data',
train=True,
transform=mnist_transform,
download=True,
)
test_set = datasets.MNIST(
root='data',
train=False,
transform=mnist_transform,
download=True,
)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True,
**cudakwargs
)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=test_batch_size,
shuffle=True,
**cudakwargs
)
def train(model, loader, optimizer, epoch, log_interval=100):
model.train() # Set model to training mode.
for batch_idx, (data, target) in enumerate(loader): # Getting the next batch.
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad() # Setting gradients to zero, to avoid accumulation.
output = model.forward(data) # Passing data through the network.
loss = F.nll_loss(output, target) # Calculating the loss.
loss.backward() # Compute gradients.
optimizer.step() # Update weights.
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test(model, loader):
model.eval()
test_loss = 0
correct = 0
for data, target in loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(loader.dataset),
100. * correct / len(loader.dataset)))
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv2d_1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2d_2 = nn.Conv2d(32, 32, kernel_size=3)
self.dense_1 = nn.Linear(3872, 64)
self.dense_2 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv2d_1(x), kernel_size=2))
x = F.relu(self.conv2d_2(x))
x = F.dropout(x, training=self.training)
x = x.view(-1, 3872)
x = F.relu(self.dense_1(x))
x = F.dropout(x, training=self.training)
x = self.dense_2(x)
return F.log_softmax(x)
model = Net()
if cuda:
model.cuda()
for p in model.parameters():
print(p.data.shape)
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
for epoch in range(1, 3):
train(model, train_loader, optimizer, epoch)
test(model, test_loader)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
model_seq = nn.Sequential(OrderedDict([
('conv2d_1', nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3)),
('relu_1', nn.ReLU()),
('max_pooling2d_1', nn.MaxPool2d(kernel_size=2)),
('conv2d_2', nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3)),
('relu_2', nn.ReLU()),
('dropout_1', nn.Dropout(p=0.25)),
('flatten_1', Flatten()),
('dense_1', nn.Linear(3872, 64)),
('relu_3', nn.ReLU()),
('dropout_2', nn.Dropout(p=0.5)),
('dense_2', nn.Linear(64, 10)),
('readout', nn.LogSoftmax())
]))
if cuda:
model_seq.cuda()
model.__call__
model.forward
for p in model_seq.parameters():
print(p.data.shape)
for m in model_seq.modules():
print(m)
for m in model_seq.children():
print(m)
model_seq.named_modules
for l in model_seq:
print(l)
model_seq[0]
model_seq._modules
optimizer = torch.optim.SGD(model_seq.parameters(), lr=lr)
for epoch in range(1, 3):
train(model_seq, train_loader, optimizer, epoch)
test(model_seq, test_loader)
model_file = 'example_torch_mnist_model'
if cuda: model_file += '_gpu'
name = model_file + '.pth'
torch.save(model_seq.state_dict(), name)
model2 = Net()
if cuda:
model2.cuda()
model2.load_state_dict(torch.load(name))
test(model2, test_loader)
name = model_file + '.pth.tar'
model.eval()
torch.save(model, name)
model3 = torch.load(name)
test(model3, test_loader)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up the data loaders
Step2: Encapsulate training and testing in functions
Step3: Implementing the test procedure
Step4: nn.Module API
Step5: Creation of the model
Step6: Sequential API (http
Step7: Training loop
Step8: Saving and loading the model
Step9: Method 1
Step10: Method 2
|
12,145
|
<ASSISTANT_TASK:>
Python Code:
import george
george.__version__
import numpy as np
import matplotlib.pyplot as plt
from george import kernels
np.random.seed(42)
N = 256
t = np.sort(np.random.uniform(0, 10, N))
theta = np.random.uniform(-np.pi, np.pi, N)
X = np.vstack((t, theta)).T
yerr = np.random.uniform(0.05, 0.25, N)
kernel1 = 2.0 * kernels.Matern32Kernel([5.0, 0.5], ndim=2)
kernel2 = 2.0 * kernels.ExpSine2Kernel(gamma=10.0, log_period=np.log(5.), ndim=2, axes=0)
kernel2 *= kernels.ExpSquaredKernel([15.0], ndim=2, axes=0)
kernel = kernel1 + kernel2
gp = george.GP(kernel)
y = gp.sample(X)
y += yerr * np.random.randn(N)
gp.compute(X, yerr)
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
plt.ylim(-6.5, 6.5)
plt.xlim(0, 10)
plt.xlabel("t")
plt.ylabel("y");
from scipy.optimize import minimize
def nll(params):
gp.set_parameter_vector(params)
l = gp.log_likelihood(y, quiet=True)
g = gp.grad_log_likelihood(y, quiet=True)
return -l, -g
params = gp.get_parameter_vector()
params += 0.05*np.random.randn(len(params))
soln = minimize(nll, params, jac=True)
gp.set_parameter_vector(soln.x)
print(soln.success, soln.x)
# Compute the predictive means - note the "kernel" argument
mu1 = gp.predict(y, X, return_cov=False, kernel=kernel1)
mu2 = gp.predict(y, X, return_cov=False, kernel=kernel2)
plt.plot(t, y, ".k", mec="none", alpha=0.3)
plt.plot(t, y - mu1, ".k")
plt.plot(t, mu2)
plt.ylim(-6.5, 6.5)
plt.xlim(0, 10)
plt.xlabel("t")
plt.ylabel("y");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It can be useful to model a dataset using a mixture of GPs.
Step2: The physical (oscillatory) component is not obvious in this dataset because it is swamped by the systematics.
Step3: Now let's use the trick from above to compute the prediction of component 1 and remove it to see the periodic signal.
|
12,146
|
<ASSISTANT_TASK:>
Python Code:
train = pd.read_json("train.json")
matplotlib.style.use('ggplot')
cuisine_group = train.groupby('cuisine')
cuisine_group.size().sort_values(ascending=True).plot.barh()
plt.show()
lemmatizer = WordNetLemmatizer()
train = pd.read_json("train.json")
train['ing'] = [' '.join([lemmatizer.lemmatize(preprocess(ingr)) for ingr in recette]).strip() for recette in train['ingredients']]
tfidf = TfidfVectorizer(sublinear_tf=True,max_df=0.5,ngram_range=(1,2),stop_words='english',norm='l2',binary=False)
tfidf.fit(train['ing'])
X_train = tfidf.transform(train['ing'])
y_train = train['cuisine']
# encode string labels
lenc = LabelEncoder()
lenc.fit(y_train)
y_train_enc = lenc.transform(y_train)
#power normalization
X_train.data**=0.5
normalize(X_train,copy=False)
test = pd.read_json("test.json")
test['ing'] = [' '.join([lemmatizer.lemmatize(preprocess(ingr)) for ingr in recette]).strip() for recette in test['ingredients']]
X_test = tfidf.transform(test['ing'])
X_test.data**=0.5
normalize(X_test,copy=False)
categories = train['cuisine'].unique()
clf = LinearSVC(C=0.5,multi_class='ovr',dual=True)
crossValidateClassifier(X_train,y_train,clf)
clf = LogisticRegression(C=10.0)
crossValidateClassifier(X_train,y_train,clf)
clf = LinearSVC(C=0.5,multi_class='ovr',dual=True)
test['cuisine']=train_and_test(clf,X_train,y_train,X_test)
test[['id','cuisine']].to_csv("lr_c0.5_power_norm.csv",index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Italian and mexican categories dominate the recipes dataset. We may want later to take this into account in order to make the problem more balanced.
Step2: Note that here we user power scaling which reduces further the effect of frequent terms. After the scaling we re-normalize the data. We use the square root as default value, but one should optimize this value through random search.
Step3: We choose Support Vector Machines in order to train the model, as they provide state-of-the-art results in text classification problems. The cross-validation gives an average of 79.19% in terms of accuracy. Let's try a logistic regression model.
Step4: Accuracy is slightly smaller than SVM's. One should normally try a search (grid/random) in the parameters space for each classifier in order to select the best one.
|
12,147
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True, add_eeg_ref=False)
raw.set_eeg_reference()
raw.pick_types(meg=True, ecg=True, eog=True, stim=True)
projs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, average=True)
print(projs)
ecg_projs = projs[-2:]
mne.viz.plot_projs_topomap(ecg_projs)
# Now for EOG
projs, events = compute_proj_eog(raw, n_grad=1, n_mag=1, average=True)
print(projs)
eog_projs = projs[-2:]
mne.viz.plot_projs_topomap(eog_projs)
raw.info['projs'] += eog_projs + ecg_projs
events = mne.find_events(raw, stim_channel='STI 014')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# this can be highly data dependent
event_id = {'auditory/left': 1}
epochs_no_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5,
proj=False, baseline=(None, 0), reject=reject)
epochs_no_proj.average().plot(spatial_colors=True)
epochs_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, proj=True,
baseline=(None, 0), reject=reject)
epochs_proj.average().plot(spatial_colors=True)
evoked = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5,
proj='delayed', baseline=(None, 0),
reject=reject).average()
# set time instants in seconds (from 50 to 150ms in a step of 10ms)
times = np.arange(0.05, 0.15, 0.01)
evoked.plot_topomap(times, proj='interactive')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compute SSP projections
Step2: Apply SSP projections
Step3: Yes this was it. Now MNE will apply the projs on demand at any later stage,
Step4: Looks cool right? It is however often not clear how many components you
|
12,148
|
<ASSISTANT_TASK:>
Python Code:
doc_skill = buildDocSkillMat(jd_docs, skill_df, folder=SKILL_DIR)
with(open(SKILL_DIR + 'doc_skill.mtx', 'w')) as f:
mmwrite(f, doc_skill)
extracted_skill_df = getSkills4Docs(docs=doc_index['doc'], doc_term=doc_skill, skills=skills)
df = pd.merge(doc_index, extracted_skill_df, left_index=True, right_index=True)
print(df.shape)
df.head()
# sanity check
# df.head(3).to_csv(LDA_DIR + 'tmp/skills_3_sample_docs.csv', index=False)
df.to_csv(SKILL_DIR + 'doc_index.csv') # later no need to extract skill again
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get skills in each JD
|
12,149
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_context('poster')
moma = pd.read_csv('Artworks.csv', index_col=12, parse_dates=[10])
moma = moma.dropna(subset=['DateAcquired'])
firsts = moma.drop_duplicates('Artist')
import sexmachine.detector as detector # requires python2
g = detector.Detector()
def infer_gender(fullname):
try:
return g.get_gender(fullname.split()[0])
except:
return
firsts.loc[:, 'Gender'] = firsts['Artist'].apply(infer_gender)
firsts.groupby('Gender').size()
gender_trends = (firsts
.groupby([pd.Grouper(key='DateAcquired', freq='5A'), 'Gender'])
.size()
.unstack())
gender_trends['percent female'] = 100. * gender_trends['female'] / (gender_trends['male'] + gender_trends['female'])
ax = gender_trends['percent female'].plot()
ax.set_title('Estimated percentage of new artists added to the MOMA collection who are female')
ax.set_xlabel('')
ax.plot(ax.get_xlim(), [50, 50], 'r--')
ax.set_ylim(0, 100);
fig, ax = plt.subplots()
ax.set_ylim(0, 100)
ax.set_xlim(1930, 2300)
ax = sns.regplot(x=gender_trends.reset_index()['DateAcquired'].apply(lambda x: x.year),
y=gender_trends["percent female"])
ax.set_title('Estimated percentage of new artists added to the MOMA collection who are female')
ax.set_xlabel('')
ax.set_ylabel('')
ax.plot(ax.get_xlim(), [50, 50], 'r--');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then we define a helper function that takes a full name as a string and returns sexmachine's best guess for the gender of the first word in that string (which is assumed to be the first name)
Step2: Now we apply the infer_gender function to the Artists field in the dataset of unique artists, and take a look at the distributions of inferred gender.
Step3: As expected, its mostly male. But more improtantly, we see a couple of the problems with sexmachine. Names it cannot guess are called andy (for androgynous?!). Names that it is not confident are mostly_male or mostly_female. Among the 12921 artists in the collection, nearly a quarter have first names whose gender sexmachine is unable to guess.
Step4: Assuming you think it's desirable that the MOMA collection represents female artists, there's bad news and good news from this plot
|
12,150
|
<ASSISTANT_TASK:>
Python Code:
import random
num = [random.randint(0,10) for i in range(1000)]
hist = {}
for i in num:
hist[i] = hist.get(i, 0) + 1
hist
def count1(num):
hist = {}
for i in num:
hist[i] = hist.get(i, 0) + 1
return hist
%timeit count1(num)
def count2(num):
hist = {}
for i in num:
if i in hist:
hist[i] += 1
else:
hist[i] = 1
return hist
%timeit count2(num)
from collections import Counter
def count3(num):
return Counter(num)
%timeit count3(num)
from cytoolz import count, groupby
import cytoolz
def count4(num):
return list(cytoolz.map(lambda x: (x[0], count(x[1])),
groupby(lambda x:x, num).items()))
%timeit count4(num)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Mesurer le temps que cela prend
Step2: Comparons avec une autre implémentation
Step3: Une version plus rapide
Step4: Une dernière avec cytoolz
|
12,151
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.5
from google.cloud import bigquery
import tensorflow as tf
import numpy as np
import shutil
print(tf.__version__)
CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key']
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
def read_dataset(filename, mode, batch_size = 512):
def decode_csv(value_column):
columns = tf.compat.v1.decode_csv(value_column, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Create list of file names that match "glob" pattern (i.e. data_file_*.csv)
filenames_dataset = tf.data.Dataset.list_files(filename)
# Read lines from text files
textlines_dataset = filenames_dataset.flat_map(tf.data.TextLineDataset)
# Parse text lines as comma-separated values (CSV)
dataset = textlines_dataset.map(decode_csv)
# Note:
# use tf.data.Dataset.flat_map to apply one to many transformations (here: filename -> text lines)
# use tf.data.Dataset.map to apply one to one transformations (here: text line -> feature list)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset
INPUT_COLUMNS = [
tf.feature_column.numeric_column('pickuplon'),
tf.feature_column.numeric_column('pickuplat'),
tf.feature_column.numeric_column('dropofflat'),
tf.feature_column.numeric_column('dropofflon'),
tf.feature_column.numeric_column('passengers'),
]
def add_more_features(feats):
# Nothing to add (yet!)
return feats
feature_cols = add_more_features(INPUT_COLUMNS)
## TODO: Create serving input function
def serving_input_fn():
#ADD CODE HERE
return tf.estimator.export.ServingInputReceiver(features, json_feature_placeholders)
## TODO: Create train and evaluate function using tf.estimator
def train_and_evaluate(output_dir, num_train_steps):
#ADD CODE HERE
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
OUTDIR = './taxi_trained'
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
tf.compat.v1.summary.FileWriterCache.clear()
train_and_evaluate(OUTDIR, num_train_steps = 2000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Input </h2>
Step2: <h2> Create features out of input data </h2>
Step3: <h2> Serving input function </h2>
Step4: <h2> tf.estimator.train_and_evaluate </h2>
Step5: <h2>Run training</h2>
|
12,152
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import shutil
import numpy as np
from os import remove
import cPickle as pkl
from os.path import expanduser, exists
import os
import sys
import logging
logging.basicConfig(level=10)
logger = logging.getLogger()
# Add the mlp python src director to the import search path
mlp_folder = "../mlp"
sys.path.append(os.path.abspath(mlp_folder))
# Now we are able to use all the packages defined in mlp_folder
# import training.acceleration_dataset
from training.acceleration_dataset import AccelerationDataset
dataset = AccelerationDataset('datasets/combined/18-09-15-triceps-biceps-lateral.zip')
print "Number of training examples:", dataset.num_train_examples
print "Number of test examples:", dataset.num_test_examples
print "Number of features:", dataset.num_features
print "Number of labels:", dataset.num_labels
from ipy_table import *
from operator import itemgetter
def calculate_dist(labels):
dist = np.zeros((dataset.num_labels, 1))
for i in range(0, len(labels)):
dist[labels[i], 0] += 1
return dist
train_dist = calculate_dist(dataset.y_train)
test_dist = calculate_dist(dataset.y_test)
train_ratio = train_dist / dataset.num_train_examples
test_ratio = test_dist / dataset.num_test_examples
# Fiddle around to get it into table shape
table = np.hstack((np.zeros((dataset.num_labels,1), dtype=int), train_dist, train_ratio, test_dist, test_ratio))
table = np.vstack((np.zeros((1, 5), dtype=int), table)).tolist()
human_labels = map(dataset.human_label_for, range(0,dataset.num_labels))
for i,s in enumerate(human_labels):
table[i + 1][0] = s
table.sort(lambda x,y: cmp(x[1], y[1]))
table[0][0] = ""
table[0][1] = "Train"
table[0][2] = "Train %"
table[0][3] = "Test"
table[0][4] = "Test %"
make_table(table)
set_global_style(float_format='%0.0f', align="center")
set_column_style(2, float_format='%0.2f%%')
set_column_style(4, float_format='%0.2f%%')
set_column_style(0, align="left")
from matplotlib import pyplot, cm
from pylab import *
# Choose some random examples to plot from the training data
number_of_examples_to_plot = 3
plot_ids = np.random.random_integers(0, dataset.num_train_examples - 1, number_of_examples_to_plot)
print "Ids of plotted examples:",plot_ids
# Retrieve a human readable label given the idx of an example
def label_of_example(i):
return dataset.human_label_for(dataset.y_train[i])
figure(figsize=(20,10))
ax1 = subplot(311)
setp(ax1.get_xticklabels(), visible=False)
ax1.set_ylabel('X - Acceleration')
ax2 = subplot(312, sharex=ax1)
setp(ax2.get_xticklabels(), visible=False)
ax2.set_ylabel('Y - Acceleration')
ax3 = subplot(313, sharex=ax1)
ax3.set_ylabel('Z - Acceleration')
for i in plot_ids:
c = np.random.random((3,))
ax1.plot(range(0, dataset.num_features / 3), dataset.X_train[i,0:400], '-o', c=c)
ax2.plot(range(0, dataset.num_features / 3), dataset.X_train[i,400:800], '-o', c=c)
ax3.plot(range(0, dataset.num_features / 3), dataset.X_train[i,800:1200], '-o', c=c)
legend(map(label_of_example, plot_ids))
suptitle('Feature values for the first three training examples', fontsize=16)
xlabel('Time')
show()
from training.mlp_model import MLPMeasurementModel
from neon.layers import Affine, Dropout, GeneralizedCost
from neon.transforms import Rectlin, Logistic
from neon.initializers import Uniform, Constant
from neon.models import Model
# Lets define our neural network
init_norm = Uniform(low=-0.1,high=0.1)
bias_init = Constant(val = 1.0)
layers = []
layers.append(Affine(
nout=250,
init=init_norm,
bias=bias_init,
activation=Rectlin()))
layers.append(Dropout(
name="do_2",
keep = 0.9))
layers.append(Affine(
nout=100,
init=init_norm,
bias=bias_init,
activation=Rectlin()))
layers.append(Dropout(
name="do_3",
keep = 0.9))
layers.append(Affine(
nout = dataset.num_labels,
init=init_norm,
bias=bias_init,
activation=Logistic()))
mlpmodel = MLPMeasurementModel('models')
model = mlpmodel.train(dataset)
import numpy as np
import math
from matplotlib import pyplot, cm
from pylab import *
from IPython.html import widgets
from IPython.html.widgets import interact
layer_names = map(lambda l: l[1].name+"_"+str(l[0]), filter(lambda l: l[1].has_params, enumerate(model.layers)))
def closestSqrt(i):
N = int(math.sqrt(i))
while True:
M = int(i / N)
if N * M == i:
return N, M
N -= 1
def plot_filters(**kwargs):
n = kwargs['n']
layer_name = kwargs['layer']
dest_path = os.path.join('models', 'workout-mlp-ep_' + str(n))
params = pkl.load(open(dest_path, 'r'))
wts = params['layer_params_states'][layer_names.index(layer_name)]['params']
nrows, ncols = closestSqrt(wts.shape[0])
fr, fc = closestSqrt(wts.shape[1])
fi = 0
W = np.zeros((fr*nrows, fc*ncols))
for row, col in [(row, col) for row in range(nrows) for col in range(ncols)]:
W[fr*row:fr*(row+1):,fc*col:fc*(col+1)] = wts[fi].reshape(fr,fc)
fi = fi + 1
matshow(W, cmap=cm.gray)
title('Visualizing weights of '+layer_name+' in epoch ' + str(n) )
show()
_i = interact(plot_filters,
layer=widgets.widget_selection.ToggleButtons(options = layer_names),
n=widgets.IntSlider(description='epochs',
min=0, max=mlpmodel.max_epochs-1, value=0, step=1))
import h5py
from neon.transforms import Misclassification
logged = h5py.File(os.path.join('models', 'workout-mlp.h5'), "r")
epochs = logged['config'].attrs['total_epochs']
minibatches = logged['config'].attrs['total_minibatches']
print('Test error = %.1f%%'
% (model.eval(dataset.test(), metric=Misclassification())*100))
print('Train error = %.1f%%'
% (model.eval(dataset.train(), metric=Misclassification())*100))
print "Minibatches per epoch: %0.2f" % (minibatches * 1.0/epochs)
pyplot.plot(range(0, minibatches), logged['cost/train'], linewidth=3, label='train')
pyplot.plot(range(minibatches / epochs, minibatches, minibatches / epochs), logged['cost/validation'], linewidth=3, label='test')
pyplot.grid()
pyplot.legend()
pyplot.xlabel("minibatch")
pyplot.ylabel("cost")
pyplot.show()
from sklearn.metrics import confusion_matrix
from ipy_table import *
def predict(model, dataset):
running_error = 0.0
nprocessed = 0
dataset.reset()
predictions = np.empty((3, 0), dtype="float32")
nprocessed = 0
for x, t in dataset:
pred = model.fprop(x, inference=True).asnumpyarray()
bsz = min(dataset.ndata - nprocessed, model.be.bsz)
nprocessed += bsz
predictions = np.hstack((predictions, pred[:,:bsz]))
return predictions
# confusion_matrix(y_true, y_pred)
predicted = predict(model, dataset.test())
y_true = dataset.y_test
y_pred = np.argmax(predicted, axis = 0)
confusion_mat = confusion_matrix(y_true, y_pred, range(0,dataset.num_labels))
# Fiddle around with cm to get it into table shape
confusion_mat = vstack((np.zeros((1,dataset.num_labels), dtype=int), confusion_mat))
confusion_mat = hstack((np.zeros((dataset.num_labels + 1, 1), dtype=int), confusion_mat))
table = confusion_mat.tolist()
human_labels = map(dataset.human_label_for, range(0,dataset.num_labels))
for i,s in enumerate(human_labels):
table[0][i+1] = s
table[i+1][0] = s
table[0][0] = "actual \ predicted"
mt = make_table(table)
set_row_style(0, color='lightGray', rotate = "315deg")
set_column_style(0, color='lightGray')
set_global_style(align='center')
for i in range(1, dataset.num_labels + 1):
for j in range(1, dataset.num_labels + 1):
if i == j:
set_cell_style(i,j, color='lightGreen', width = 80)
elif table[i][j] > 20:
set_cell_style(i,j, color='Pink')
elif table[i][j] > 0:
set_cell_style(i,j, color='lightYellow')
mt
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This time we are not going to generate the data but rather use real world annotated training examples.
Step2: At first we want to inspect the class distribution of the training and test examples.
Step3: Let's have a look at the generated data. We will plot some of the examples of the different classes.
Step4: Now we are going to create a neon model. We will start with a realy simple one layer preceptron having 500 hidden units.
Step5: To check weather the network is learning something we will plot the weight matrices of the different training epochs.
Step6: Let's visualize the error to see if the network has trained properly or if we are overfitting.
Step7: Let's also have a look at the confusion matrix for the test dataset.
|
12,153
|
<ASSISTANT_TASK:>
Python Code:
from beampy import *
# We first create a new document for our presentation
# Remove quiet=True to see Beampy compiler output
doc = document(quiet=True)
# Then we create a new slide with the title "My first new slide"
with slide('My first slide title'):
# All the slide contents are functions added inside the with statement.
# Here we add a text content using the Beampy module text
text('Hello Beampy!')
# At the end we save our presentation to an HTML file.
# The save command will launch the compilation of all slides of the
# presentation.
save('hello.html')
# If you want to save it to pdf just change the file extension.
# save('hello.pdf')
# This line is just for displaying the slide in this tutorial webpage
# Remove it in your presentation
display_matplotlib(gcs())
with slide('My first slide title'):
text('Hello Beampy!', x=0, y=0)
display_matplotlib(gcs())
with slide('My first slide title'):
text('Hello Beampy!', x=0.5, y=0.5*3/4.)
display_matplotlib(gcs())
with slide('My first slide title'):
text('Hello Beampy!', x=100, y=100)
display_matplotlib(gcs())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Change the position of the text element
Step2: When value of x and y are lower than 1.0, they are by default in percent
Step3: Now we could also set fixed position for x and y. To do so, if x and y
|
12,154
|
<ASSISTANT_TASK:>
Python Code:
# For numerical stuff
import pandas as pd
# Plotting
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 7.0)
# Some preprocessing utilities
from sklearn.cross_validation import train_test_split # Data splitting
from sklearn.utils import shuffle
# The different classifiers
from sklearn.neighbors import KNeighborsClassifier # Nearest Neighbor - Analogizer
from sklearn.naive_bayes import GaussianNB # Bayesian Classifier - Bayesian
from sklearn.neural_network import MLPClassifier # Neural Network - Connectionist
# Model result function
from sklearn.metrics import classification_report,accuracy_score
data = pd.read_csv('Data/pulsar.csv')
# Show some information
print ('Dataset has %d rows and %d columns including features and labels'%(data.shape[0],data.shape[1]))
print (data.columns.values[0:-1])
ax = plt.figure().gca(projection='3d')
ax.scatter3D(data['std_pf'], data['mean_dm'], data['mean_int_pf'],c=data['class'],alpha=.25)
ax.set_xlabel('std_pf')
ax.set_ylabel('mean_dm')
ax.set_zlabel('mean_int_pf')
# Lets shuffle the rows of the data 10 times
for i in range(10):
data = shuffle(data)
# Now split the dataset into seperate variabels for features and labels
features = data.ix[:,data.columns != 'class'].values # All columns except class
labels = data['class'].values # Class labels
# Do a 70 - 30 split of the whole data for training and testing
# The last argument specifies the fraction of samples for testing
train_data,test_data,train_labels,test_labels = train_test_split(features,labels,test_size=.3)
#Print some info
print ('Number of training data points : %d'%(train_data.shape[0]))
print ('Number of testing data points : %d'%(test_data.shape[0]))
# K nearest neighbor
knn = KNeighborsClassifier()
knn.fit(train_data,train_labels)
# Naive Bayes
nb = GaussianNB()
nb.fit(train_data,train_labels)
# MLP
mlp = MLPClassifier(solver='sgd',hidden_layer_sizes=(5, 1))
mlp.fit(train_data,train_labels)
# Pretty function to test a model and print accuracy score
def evaluate(model,modelname,test_data,test_labels):
predictions = model.predict(test_data) # Do the actual prediction
print('====================================================')
print('Classification Report for %s'%modelname)
print('===================================================')
print(classification_report(test_labels,predictions,target_names=['Non Pulsar','Pulsar']))
print('\n The model is %f accurate' %(accuracy_score(test_labels,predictions)*100))
print('====================================================\n\n')
# Making some stuff easy
models =[knn,nb,mlp]
model_names =['KNN','Naive Bayes','Neural Network']
for i in range(0,3):
evaluate(models[i],model_names[i],test_data,test_labels)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load dataset
Step2: Lets print the feature names
Step3: Do a scatter plot
Step4: Get the features and labels
Step5: Split data to training and validation sets
Step6: Lets do the training on different algorithms
Step7: Scikit Naive Bayes
Step8: Scikit MLP
Step9: Fancy function to print results for model evaluation
Step10: Now Lets test each classifier and disply their accuracy
|
12,155
|
<ASSISTANT_TASK:>
Python Code:
mdest = '../result/random_network/mixture/'
sdest = '../result/random_network/sparse/'
m_f = '%d_%.2f_%.2f_%.2f_%.2f_%.2f_%.2f.pkl'
s_f = '%d_%.2f_%.2f_%.2f.pkl'
colors = cm.rainbow(np.linspace(0, 1, 7))
np.random.shuffle(colors)
colors = itertools.cycle(colors)
def degree_dist_list(graph, ddist):
_ddict = nx.degree(graph)
_ddist = defaultdict(int)
for k, v in _ddict.items():
_ddist[v] += 1
for k, v in _ddist.items():
ddist[k].append(v)
del _ddict, _ddist
return ddist
def avg_degree_dist(path_list):
Compute average degree distribution over repeated simulations
ddist = defaultdict(list)
for path in path_list:
sample = pickle.load(open(path, 'rb'))
G = sparse_to_networkx(sample[0])
degree_dist_list(G, ddist)
del G, sample
avg_dist = dict()
for k, v in ddist.items():
avg_dist[k] = sum(ddist[k])/len(ddist[k])
return avg_dist
def scatter(_ddist, path, color=None):
print scatter plot of given degree distribution dictionary
plt.scatter(list(_ddist.keys()), list(_ddist.values()), label=os.path.basename(path), color=color)
def degree_dist(graph):
Compute digree distribution of given graph
_ddict = nx.degree(graph)
_ddist = defaultdict(int)
for k, v in _ddict.items():
_ddist[v] += 1
return _ddist
alpha = 100
sigma = 0.5
tau = 1
d_alpha = 100
d_sigma = -1
d_taus = [0.1, 1]
n_samples = 5
plt.figure(figsize=(12, 8))
for d_tau in d_taus:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
alphas = [100, 150]
for alpha in alphas:
path_list = list()
for i in range(n_samples):
path_list.append(os.path.join(sdest, s_f % (i, alpha, sigma, tau)))
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
sigmas = [0, 0.5, 0.9]
alpha = 100
tau = 1
d_alpha = 100
d_sigma = -1
d_tau = 1
plt.figure(figsize=(12, 8))
for sigma in sigmas:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
alpha = 100
sigma = 0.5
taus = [0.1, 0.5, 1]
d_alpha = 100
d_sigma = -1
d_tau = 1
plt.figure(figsize=(12, 8))
for tau in taus:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
alpha = 100
sigma = 0.5
tau = 1
d_alpha = 100
d_tau = 1
sigmas = [-0.5, -1, -2]
plt.figure(figsize=(12, 8))
plt.figure(figsize=(12, 8))
for d_sigma in sigmas:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
alpha = 100
sigma = 0.5
tau = 1
d_alpha = 100
d_sigma = -1
taus = [0.1, 0.5, 1]
plt.figure(figsize=(12, 8))
for d_tau in taus:
path_list = [os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau)) for i in range(n_samples)]
ddist = avg_degree_dist(path_list)
scatter(ddist, path_list[0], next(colors))
# for d_tau in taus:
# mfile = os.path.join(mdest, m_f % (i, alpha, sigma, tau, d_alpha, d_sigma, d_tau))
# if os.path.exists(mfile):
# sample = pickle.load(open(mfile, 'rb'))
# G = sparse_to_networkx(sample[0])
# ddist = degree_dist(G)
# scatter(ddist, mfile, next(colors))
ax = plt.subplot()
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('# of nodes')
plt.xlabel('Node degree')
plt.ylim(0.5); plt.xlim(0.5); plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Properties
Step4: Comparision bewteen sparse and mixed graph
Step5: Varying sigma in the sparse part of the mixed graph
Step6: Varying tau in the sparse part of the mixed graph
Step7: Varying sigma in the dense part of the mixed graph
Step8: Varying tau in the dense part of the mixed graph
|
12,156
|
<ASSISTANT_TASK:>
Python Code:
def sqrt(n):
"compute square root of n"
PRECISION = 0.00000001 # stop iterating when we converge with this delta
x_0 = 1.0 # pick any old initial value
x_prev = x_0
while True: # Python doesn't have repeat-until loop so fake it
#print(x_prev)
x_new = 0.5 * (x_prev + n/x_prev)
if abs(x_new - x_prev) < PRECISION:
return x_new
x_prev = x_new # x_i+1 becomes x_i (previous value)
sqrt(100)
import numpy as np
def check(n):
assert np.isclose(sqrt(n), np.sqrt(n))
def test_sqrt():
check(125348)
check(89.2342)
check(100)
check(1)
check(0)
test_sqrt()
def sqrt_with_trace(n):
"compute square root of n"
PRECISION = 0.00000001 # stop iterating when we converge with this delta
x_0 = 1.0 # pick any old initial value
x_prev = x_0
while True: # Python doesn't have repeat-until loop so fake it
print(x_prev)
x_new = 0.5 * (x_prev + n/x_prev)
if abs(x_new - x_prev) < PRECISION:
return x_new
x_prev = x_new
sqrt_with_trace(125348.000000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To test our square root approximation, we can compare it to math.sqrt() and use numpy's isclose to do the comparison.
Step2: As you can see we can define a function within a function. It's not special in any way except that code outside of test_sqrt() cannot see function check(). On the other hand, check() can see the symbols outside of test_sqrt(), such as our sqrt().
|
12,157
|
<ASSISTANT_TASK:>
Python Code:
from folium import plugins
m = folium.Map([45, 3], zoom_start=4)
plugins.ScrollZoomToggler().add_to(m)
m.save(os.path.join('results', 'Plugins_0.html'))
m
import numpy as np
N = 100
data = np.array(
[
np.random.uniform(low=35, high=60, size=N), # Random latitudes in Europe.
np.random.uniform(low=-12, high=30, size=N), # Random longitudes in Europe.
range(N), # Popups texts are simple numbers.
]
).T
m = folium.Map([45, 3], zoom_start=4)
plugins.MarkerCluster(data).add_to(m)
m.save(os.path.join('results', 'Plugins_1.html'))
m
m = folium.Map([45, 3], zoom_start=1)
plugins.Terminator().add_to(m)
m.save(os.path.join('results', 'Plugins_2.html'))
m
m = folium.Map([30, 0], zoom_start=3)
plugins.BoatMarker(
location=(34, -43),
heading=45,
wind_heading=150,
wind_speed=45,
color='#8f8'
).add_to(m)
plugins.BoatMarker(
location=(46, -30),
heading=-20,
wind_heading=46,
wind_speed=25,
color='#88f'
).add_to(m)
m.save(os.path.join('results', 'Plugins_3.html'))
m
from folium.plugins.beautify_icon import BeautifyIcon
from folium import Marker
m = folium.Map([45.5, -122], zoom_start=3)
icon_plane = BeautifyIcon(
icon='plane',
border_color='#b3334f',
text_color='#b3334f',
icon_shape='triangle')
icon_number = BeautifyIcon(
border_color='#00ABDC',
text_color='#00ABDC',
number=10,
inner_icon_style='margin-top:0;')
Marker(
location=[46, -122],
popup='Portland, OR',
icon=icon_plane
).add_to(m)
Marker(
location=[50, -122],
popup='Portland, OR',
icon=icon_number
).add_to(m)
m.save(os.path.join('results', 'Plugins_4.html'))
m
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
plugins.Fullscreen(
position='topright',
title='Expand me',
title_cancel='Exit me',
force_separate_button=True).add_to(m)
m.save(os.path.join('results', 'Plugins_5.html'))
m
from folium import plugins
m = folium.Map(
location=[35.68159659061569, 139.76451516151428],
zoom_start=16
)
# Lon, Lat order.
lines = [
{
'coordinates': [
[139.76451516151428, 35.68159659061569],
[139.75964426994324, 35.682590062684206],
],
'dates': [
'2017-06-02T00:00:00',
'2017-06-02T00:10:00'
],
'color': 'red'
},
{
'coordinates': [
[139.75964426994324, 35.682590062684206],
[139.7575843334198, 35.679505030038506],
],
'dates': [
'2017-06-02T00:10:00',
'2017-06-02T00:20:00'
],
'color': 'blue'
},
{
'coordinates': [
[139.7575843334198, 35.679505030038506],
[139.76337790489197, 35.678040905014065],
],
'dates': [
'2017-06-02T00:20:00',
'2017-06-02T00:30:00'
],
'color': 'green',
'weight': 15,
},
{
'coordinates': [
[139.76337790489197, 35.678040905014065],
[139.76451516151428, 35.68159659061569],
],
'dates': [
'2017-06-02T00:30:00',
'2017-06-02T00:40:00'
],
'color': '#FFFFFF',
},
]
features = [
{
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': line['coordinates'],
},
'properties': {
'times': line['dates'],
'style': {
'color': line['color'],
'weight': line['weight'] if 'weight' in line else 5
}
}
}
for line in lines
]
plugins.TimestampedGeoJson({
'type': 'FeatureCollection',
'features': features,
}, period='PT1M', add_last_point=True).add_to(m)
m.save(os.path.join('results', 'Plugins_6.html'))
m
from folium import plugins
points = [
{"time":'2017-06-02',
"popup":"<h1>address1</h1>",
"coordinates":[-2.548828, 51.467697]},
{"time":'2017-07-02',
"popup":"<h2 style=\"color:blue;\">address2<h2>",
"coordinates":[-0.087891, 51.536086]},
{"time":'2017-08-02',
"popup":"<h2 style=\"color:orange;\">address3<h2>",
"coordinates":[-6.240234, 53.383328]},
{"time":'2017-09-02',
"popup":"<h2 style=\"color:green;\">address4<h2>",
"coordinates":[-1.40625, 60.261617]},
{"time":'2017-10-02',
"popup":<table style=\"width:100%\">
<tr>
<th>Firstname</th>
<th>Lastname</th>
<th>Age</th>
</tr>
<tr>
<td>Jill</td>
<td>Smith</td>
<td>50</td>
</tr>
<tr>
<td>Eve</td>
<td>Jackson</td>
<td>94</td>
</tr>
</table>,
"coordinates":[-1.516113, 53.800651]}
]
features = [{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": point['coordinates'],
},
"properties": {
"time": point['time'],
"popup": point['popup'],
"id":"house",
'icon':'marker',
'iconstyle':{
'iconUrl': 'http://downloadicons.net/sites/default/files/small-house-with-a-chimney-icon-70053.png',
'iconSize': [20, 20]
}
}
} for point in points]
features.append({
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [[-2.548828, 51.467697], [-0.087891, 51.536086],
[-6.240234, 53.383328], [-1.40625, 60.261617],
[-1.516113, 53.800651]
],
},
'properties': {
'popup':'Current address',
'times': ['2017-06-02', '2017-07-02',
'2017-08-02', '2017-09-02',
'2017-10-02'],
'icon':'circle',
'iconstyle':{
'fillColor': 'green',
'fillOpacity': 0.6,
'stroke': 'false',
'radius': 13
},
'style': {'weight':0
},
'id':'man'
}
})
m = folium.Map(
location=[56.096555, -3.64746],
tiles = 'cartodbpositron',
zoom_start=5
)
plugins.TimestampedGeoJson(
{
'type': 'FeatureCollection',
'features': features
},
period='P1M',
add_last_point=True,
auto_play=False,
loop=False,
max_speed=1,
loop_button=True,
date_options='YYYY/MM/DD',
time_slider_drag_update=True,
duration='P2M'
).add_to(m)
m.save(os.path.join('results', 'Plugins_7.html'))
m
m = folium.Map(
location=[0, 0],
zoom_start=6
)
fg = folium.FeatureGroup()
m.add_child(fg)
g1 = plugins.FeatureGroupSubGroup(fg, 'g1')
m.add_child(g1)
g2 = plugins.FeatureGroupSubGroup(fg, 'g2')
m.add_child(g2)
folium.Marker([-1,-1]).add_to(g1)
folium.Marker([1,1]).add_to(g1)
folium.Marker([-1,1]).add_to(g2)
folium.Marker([1,-1]).add_to(g2)
l = folium.LayerControl().add_to(m)
m
m = folium.Map(
location=[0, 0],
zoom_start=6
)
mcg = folium.plugins.MarkerCluster(control=False)
m.add_child(mcg)
g1 = folium.plugins.FeatureGroupSubGroup(mcg, 'g1')
m.add_child(g1)
g2 = folium.plugins.FeatureGroupSubGroup(mcg, 'g2')
m.add_child(g2)
folium.Marker([-1,-1]).add_to(g1)
folium.Marker([1,1]).add_to(g1)
folium.Marker([-1,1]).add_to(g2)
folium.Marker([1,-1]).add_to(g2)
l = folium.LayerControl().add_to(m)
m
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this notebook we show a few illustrations of folium's plugin extensions.
Step2: MarkerCluster
Step3: Terminator
Step4: Leaflet.boatmarker
Step5: Leaflet.BeautifyIcon
Step6: Fullscreen
Step8: Timestamped GeoJSON
Step9: FeatureGroupSubGroup
Step10: Marker clusters across groups
|
12,158
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'sandbox-3', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
12,159
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inm', 'sandbox-2', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
12,160
|
<ASSISTANT_TASK:>
Python Code:
from lp_visu import LPVisu
from scipy.optimize import linprog
import numpy as np
A = [[1.0, 0.0], [1.0, 2.0], [2.0, 1.0]]
b = [8.0, 15.0, 18.0]
c = [4.0, 3.0]
x1_bounds = (0, None)
x2_bounds = (0, None)
x1_gui_bounds = (-1, 16)
x2_gui_bounds = (-1, 10)
visu = LPVisu(A, b, c,
x1_bounds, x2_bounds,
x1_gui_bounds, x2_gui_bounds,
scale = 0.8, pivot_scale = 2.0,
xk = (1, 1), obj = 40)
def lp_simple_callback(optimizeResult):
A simple callback function to see what is happening to print each
step of the algorithm and to use the visualization.
print("current iteration: " + str(optimizeResult["nit"]))
print("current slack: " + str(optimizeResult["slack"]))
print("current solution: " + str(optimizeResult["x"]))
print()
LPVisu(A, b, c,
x1_bounds, x2_bounds,
x1_gui_bounds, x2_gui_bounds,
scale = 0.8, pivot_scale = 2.0,
xk = optimizeResult["x"])
res = linprog(-1.0 * np.array(c), A_ub=A, b_ub=b,
bounds=(x1_bounds, x2_bounds),
callback=lp_simple_callback,
options={"disp": True})
print(res)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the problem
Step2: Define the bounds for the two variables x1 and x2, the GUI bounds and create the visualization object (add a "fake" pivot at (1, 2) and draw objective function for value 40)
Step4: Define a simple callback function to be called at each step of the linprog simplexe algorithm. This callback function must use a OptimizeResult object as parameter
Step5: Solve the problem using the callback function and print the result
|
12,161
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
# NBER recessions
from pandas_datareader.data import DataReader
from datetime import datetime
usrec = DataReader('USREC', 'fred', start=datetime(1947, 1, 1), end=datetime(2013, 4, 1))
# Get the federal funds rate data
from statsmodels.tsa.regime_switching.tests.test_markov_regression import fedfunds
dta_fedfunds = pd.Series(fedfunds, index=pd.date_range('1954-07-01', '2010-10-01', freq='QS'))
# Plot the data
dta_fedfunds.plot(title='Federal funds rate', figsize=(12,3))
# Fit the model
# (a switching mean is the default of the MarkovRegession model)
mod_fedfunds = sm.tsa.MarkovRegression(dta_fedfunds, k_regimes=2)
res_fedfunds = mod_fedfunds.fit()
res_fedfunds.summary()
res_fedfunds.smoothed_marginal_probabilities[1].plot(
title='Probability of being in the high regime', figsize=(12,3));
print(res_fedfunds.expected_durations)
# Fit the model
mod_fedfunds2 = sm.tsa.MarkovRegression(
dta_fedfunds.iloc[1:], k_regimes=2, exog=dta_fedfunds.iloc[:-1])
res_fedfunds2 = mod_fedfunds2.fit()
res_fedfunds2.summary()
res_fedfunds2.smoothed_marginal_probabilities[0].plot(
title='Probability of being in the high regime', figsize=(12,3));
print(res_fedfunds2.expected_durations)
# Get the additional data
from statsmodels.tsa.regime_switching.tests.test_markov_regression import ogap, inf
dta_ogap = pd.Series(ogap, index=pd.date_range('1954-07-01', '2010-10-01', freq='QS'))
dta_inf = pd.Series(inf, index=pd.date_range('1954-07-01', '2010-10-01', freq='QS'))
exog = pd.concat((dta_fedfunds.shift(), dta_ogap, dta_inf), axis=1).iloc[4:]
# Fit the 2-regime model
mod_fedfunds3 = sm.tsa.MarkovRegression(
dta_fedfunds.iloc[4:], k_regimes=2, exog=exog)
res_fedfunds3 = mod_fedfunds3.fit()
# Fit the 3-regime model
np.random.seed(12345)
mod_fedfunds4 = sm.tsa.MarkovRegression(
dta_fedfunds.iloc[4:], k_regimes=3, exog=exog)
res_fedfunds4 = mod_fedfunds4.fit(search_reps=20)
res_fedfunds3.summary()
res_fedfunds4.summary()
fig, axes = plt.subplots(3, figsize=(10,7))
ax = axes[0]
ax.plot(res_fedfunds4.smoothed_marginal_probabilities[0])
ax.set(title='Smoothed probability of a low-interest rate regime')
ax = axes[1]
ax.plot(res_fedfunds4.smoothed_marginal_probabilities[1])
ax.set(title='Smoothed probability of a medium-interest rate regime')
ax = axes[2]
ax.plot(res_fedfunds4.smoothed_marginal_probabilities[2])
ax.set(title='Smoothed probability of a high-interest rate regime')
fig.tight_layout()
# Get the federal funds rate data
from statsmodels.tsa.regime_switching.tests.test_markov_regression import areturns
dta_areturns = pd.Series(areturns, index=pd.date_range('2004-05-04', '2014-5-03', freq='W'))
# Plot the data
dta_areturns.plot(title='Absolute returns, S&P500', figsize=(12,3))
# Fit the model
mod_areturns = sm.tsa.MarkovRegression(
dta_areturns.iloc[1:], k_regimes=2, exog=dta_areturns.iloc[:-1], switching_variance=True)
res_areturns = mod_areturns.fit()
res_areturns.summary()
res_areturns.smoothed_marginal_probabilities[0].plot(
title='Probability of being in a low-variance regime', figsize=(12,3));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Federal funds rate with switching intercept
Step2: From the summary output, the mean federal funds rate in the first regime (the "low regime") is estimated to be $3.7$ whereas in the "high regime" it is $9.6$. Below we plot the smoothed probabilities of being in the high regime. The model suggests that the 1980's was a time-period in which a high federal funds rate existed.
Step3: From the estimated transition matrix we can calculate the expected duration of a low regime versus a high regime.
Step4: A low regime is expected to persist for about fourteen years, whereas the high regime is expected to persist for only about five years.
Step5: There are several things to notice from the summary output
Step6: Finally, the expected durations of each regime have decreased quite a bit.
Step7: Taylor rule with 2 or 3 regimes
Step8: Due to lower information criteria, we might prefer the 3-state model, with an interpretation of low-, medium-, and high-interest rate regimes. The smoothed probabilities of each regime are plotted below.
Step9: Switching variances
Step10: The first regime is a low-variance regime and the second regime is a high-variance regime. Below we plot the probabilities of being in the low-variance regime. Between 2008 and 2012 there does not appear to be a clear indication of one regime guiding the economy.
|
12,162
|
<ASSISTANT_TASK:>
Python Code:
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"}
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
from datetime import datetime
from google.cloud import bigquery
# Output dataset
DESTINATION_DATA_SET = "movie_predictions" # @param {type:"string"}
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
DESTINATION_DATA_SET = "{prefix}_{timestamp}".format(
prefix=DESTINATION_DATA_SET, timestamp=TIMESTAMP
)
# Output table. Make sure that the table does NOT already exist; the BatchReadFeatureValues API cannot overwrite an existing table
DESTINATION_TABLE_NAME = "training_data" # @param {type:"string"}
DESTINATION_PATTERN = "bq://{project}.{dataset}.{table}"
DESTINATION_TABLE_URI = DESTINATION_PATTERN.format(
project=PROJECT_ID, dataset=DESTINATION_DATA_SET, table=DESTINATION_TABLE_NAME
)
# Create dataset
REGION = "us-central1" # @param {type:"string"}
client = bigquery.Client(project=PROJECT_ID)
dataset_id = "{}.{}".format(client.project, DESTINATION_DATA_SET)
dataset = bigquery.Dataset(dataset_id)
dataset.location = REGION
dataset = client.create_dataset(dataset)
print("Created dataset {}.{}".format(client.project, dataset.dataset_id))
# Other than project ID and featurestore ID and endpoints needs to be set
API_ENDPOINT = "us-central1-aiplatform.googleapis.com" # @param {type:"string"}
INPUT_CSV_FILE = "gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/movie_prediction.csv"
from google.cloud.aiplatform_v1 import (FeaturestoreOnlineServingServiceClient,
FeaturestoreServiceClient)
from google.cloud.aiplatform_v1.types import FeatureSelector, IdMatcher
from google.cloud.aiplatform_v1.types import entity_type as entity_type_pb2
from google.cloud.aiplatform_v1.types import feature as feature_pb2
from google.cloud.aiplatform_v1.types import featurestore as featurestore_pb2
from google.cloud.aiplatform_v1.types import \
featurestore_online_service as featurestore_online_service_pb2
from google.cloud.aiplatform_v1.types import \
featurestore_service as featurestore_service_pb2
from google.cloud.aiplatform_v1.types import io as io_pb2
from google.protobuf.duration_pb2 import Duration
# Create admin_client for CRUD and data_client for reading feature values.
admin_client = FeaturestoreServiceClient(client_options={"api_endpoint": API_ENDPOINT})
data_client = FeaturestoreOnlineServingServiceClient(
client_options={"api_endpoint": API_ENDPOINT}
)
# Represents featurestore resource path.
BASE_RESOURCE_PATH = admin_client.common_location_path(PROJECT_ID, REGION)
FEATURESTORE_ID = "movie_prediction"
try:
create_lro = admin_client.create_featurestore(
featurestore_service_pb2.CreateFeaturestoreRequest(
parent=BASE_RESOURCE_PATH,
featurestore_id=FEATURESTORE_ID,
featurestore=featurestore_pb2.Featurestore(
online_serving_config=featurestore_pb2.Featurestore.OnlineServingConfig(
fixed_node_count=1
),
),
)
)
# Wait for LRO to finish and get the LRO result.
print(create_lro.result())
except Exception as e:
print(e)
admin_client.get_featurestore(
name=admin_client.featurestore_path(PROJECT_ID, REGION, FEATURESTORE_ID)
)
from google.cloud.aiplatform_v1beta1.types import \
featurestore as v1beta1_featurestore_pb2
from google.cloud.aiplatform_v1beta1.types import \
featurestore_service as v1beta1_featurestore_service_pb2
create_featurestore_request = v1beta1_featurestore_service_pb2.CreateFeaturestoreRequest(
parent=BASE_RESOURCE_PATH,
featurestore_id=FEATURESTORE_ID,
featurestore=v1beta1_featurestore_pb2.Featurestore(
online_serving_config=v1beta1_featurestore_pb2.Featurestore.OnlineServingConfig(
scaling=v1beta1_featurestore_pb2.Featurestore.OnlineServingConfig.Scaling(
min_node_count=1, max_node_count=5
)
),
),
)
try:
users_entity_type_lro = admin_client.create_entity_type(
featurestore_service_pb2.CreateEntityTypeRequest(
parent=admin_client.featurestore_path(PROJECT_ID, REGION, FEATURESTORE_ID),
entity_type_id="users",
entity_type=entity_type_pb2.EntityType(
description="Users entity",
),
)
)
# Similarly, wait for EntityType creation operation.
print(users_entity_type_lro.result())
except Exception as e:
print(e)
# Create movies entity type without a monitoring configuration.
try:
movies_entity_type_lro = admin_client.create_entity_type(
featurestore_service_pb2.CreateEntityTypeRequest(
parent=admin_client.featurestore_path(PROJECT_ID, REGION, FEATURESTORE_ID),
entity_type_id="movies",
entity_type=entity_type_pb2.EntityType(description="Movies entity"),
)
)
# Similarly, wait for EntityType creation operation.
print(movies_entity_type_lro.result())
except Exception as e:
print(e)
from google.cloud.aiplatform_v1beta1 import \
FeaturestoreServiceClient as v1beta1_FeaturestoreServiceClient
from google.cloud.aiplatform_v1beta1.types import \
entity_type as v1beta1_entity_type_pb2
from google.cloud.aiplatform_v1beta1.types import \
featurestore_monitoring as v1beta1_featurestore_monitoring_pb2
from google.cloud.aiplatform_v1beta1.types import \
featurestore_service as v1beta1_featurestore_service_pb2
v1beta1_admin_client = v1beta1_FeaturestoreServiceClient(
client_options={"api_endpoint": API_ENDPOINT}
)
# Enable monitoring for users entity type.
# All Features belonging to this EntityType will by default inherit the monitoring config.
v1beta1_admin_client.update_entity_type(
v1beta1_featurestore_service_pb2.UpdateEntityTypeRequest(
entity_type=v1beta1_entity_type_pb2.EntityType(
name=admin_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, "users"
),
monitoring_config=v1beta1_featurestore_monitoring_pb2.FeaturestoreMonitoringConfig(
snapshot_analysis=v1beta1_featurestore_monitoring_pb2.FeaturestoreMonitoringConfig.SnapshotAnalysis(
monitoring_interval=Duration(seconds=86400), # 1 day
),
),
),
)
)
# Create features for the 'users' entity.
try:
admin_client.batch_create_features(
parent=admin_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, "users"
),
requests=[
featurestore_service_pb2.CreateFeatureRequest(
feature=feature_pb2.Feature(
value_type=feature_pb2.Feature.ValueType.INT64,
description="User age",
),
feature_id="age",
),
featurestore_service_pb2.CreateFeatureRequest(
feature=feature_pb2.Feature(
value_type=feature_pb2.Feature.ValueType.STRING,
description="User gender",
),
feature_id="gender",
),
featurestore_service_pb2.CreateFeatureRequest(
feature=feature_pb2.Feature(
value_type=feature_pb2.Feature.ValueType.STRING_ARRAY,
description="An array of genres that this user liked",
),
feature_id="liked_genres",
),
],
).result()
except Exception as e:
print(e)
# Create features for movies type.
try:
admin_client.batch_create_features(
parent=admin_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, "movies"
),
requests=[
featurestore_service_pb2.CreateFeatureRequest(
feature=feature_pb2.Feature(
value_type=feature_pb2.Feature.ValueType.STRING,
description="The title of the movie",
),
feature_id="title",
),
featurestore_service_pb2.CreateFeatureRequest(
feature=feature_pb2.Feature(
value_type=feature_pb2.Feature.ValueType.STRING,
description="The genres of the movie",
),
feature_id="genres",
),
featurestore_service_pb2.CreateFeatureRequest(
feature=feature_pb2.Feature(
value_type=feature_pb2.Feature.ValueType.DOUBLE,
description="The average rating for the movie, range is [1.0-5.0]",
),
feature_id="average_rating",
),
],
).result()
except Exception as e:
print(e)
# Search for all features across all featurestores.
list(admin_client.search_features(location=BASE_RESOURCE_PATH))
# Search for all features with value type `DOUBLE`
list(
admin_client.search_features(
featurestore_service_pb2.SearchFeaturesRequest(
location=BASE_RESOURCE_PATH, query="value_type=DOUBLE"
)
)
)
# Filter on feature value type and keywords.
list(
admin_client.search_features(
featurestore_service_pb2.SearchFeaturesRequest(
location=BASE_RESOURCE_PATH, query="feature_id:title AND value_type=STRING"
)
)
)
import_users_request = featurestore_service_pb2.ImportFeatureValuesRequest(
entity_type=admin_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, "users"
),
avro_source=io_pb2.AvroSource(
# Source
gcs_source=io_pb2.GcsSource(
uris=[
"gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/users.avro"
]
)
),
entity_id_field="user_id",
feature_specs=[
# Features
featurestore_service_pb2.ImportFeatureValuesRequest.FeatureSpec(id="age"),
featurestore_service_pb2.ImportFeatureValuesRequest.FeatureSpec(id="gender"),
featurestore_service_pb2.ImportFeatureValuesRequest.FeatureSpec(
id="liked_genres"
),
],
feature_time_field="update_time",
worker_count=1,
)
# Start to import, will take a couple of minutes
ingestion_lro = admin_client.import_feature_values(import_users_request)
# Polls for the LRO status and prints when the LRO has completed
ingestion_lro.result()
import_movie_request = featurestore_service_pb2.ImportFeatureValuesRequest(
entity_type=admin_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, "movies"
),
avro_source=io_pb2.AvroSource(
gcs_source=io_pb2.GcsSource(
uris=[
"gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/movies.avro"
]
)
),
entity_id_field="movie_id",
feature_specs=[
featurestore_service_pb2.ImportFeatureValuesRequest.FeatureSpec(id="title"),
featurestore_service_pb2.ImportFeatureValuesRequest.FeatureSpec(id="genres"),
featurestore_service_pb2.ImportFeatureValuesRequest.FeatureSpec(
id="average_rating"
),
],
feature_time_field="update_time",
worker_count=1,
)
# Start to import, will take a couple of minutes
ingestion_lro = admin_client.import_feature_values(import_movie_request)
# Polls for the LRO status and prints when the LRO has completed
ingestion_lro.result()
# Fetch the following 3 features.
feature_selector = FeatureSelector(
id_matcher=IdMatcher(ids=["age", "gender", "liked_genres"])
)
data_client.read_feature_values(
featurestore_online_service_pb2.ReadFeatureValuesRequest(
# Fetch from the following feature store/entity type
entity_type=admin_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, "users"
),
# Fetch the user features whose ID is "alice"
entity_id="alice",
feature_selector=feature_selector,
)
)
# Read the same set of features as above, but for multiple entities.
response_stream = data_client.streaming_read_feature_values(
featurestore_online_service_pb2.StreamingReadFeatureValuesRequest(
entity_type=admin_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, "users"
),
entity_ids=["alice", "bob"],
feature_selector=feature_selector,
)
)
# Iterate and process response. Note the first one is always the header only.
for response in response_stream:
print(response)
batch_serving_request = featurestore_service_pb2.BatchReadFeatureValuesRequest(
# featurestore info
featurestore=admin_client.featurestore_path(PROJECT_ID, REGION, FEATURESTORE_ID),
# URL for the label data, i.e., Table 1.
csv_read_instances=io_pb2.CsvSource(
gcs_source=io_pb2.GcsSource(uris=[INPUT_CSV_FILE])
),
destination=featurestore_service_pb2.FeatureValueDestination(
bigquery_destination=io_pb2.BigQueryDestination(
# Output to BigQuery table created earlier
output_uri=DESTINATION_TABLE_URI
)
),
entity_type_specs=[
featurestore_service_pb2.BatchReadFeatureValuesRequest.EntityTypeSpec(
# Read the 'age', 'gender' and 'liked_genres' features from the 'users' entity
entity_type_id="users",
feature_selector=FeatureSelector(
id_matcher=IdMatcher(
ids=[
# features, use "*" if you want to select all features within this entity type
"age",
"gender",
"liked_genres",
]
)
),
),
featurestore_service_pb2.BatchReadFeatureValuesRequest.EntityTypeSpec(
# Read the 'average_rating' and 'genres' feature values of the 'movies' entity
entity_type_id="movies",
feature_selector=FeatureSelector(
id_matcher=IdMatcher(ids=["average_rating", "genres"])
),
),
],
)
# Execute the batch read
batch_serving_lro = admin_client.batch_read_feature_values(batch_serving_request)
# This long runing operation will poll until the batch read finishes.
batch_serving_lro.result()
admin_client.delete_featurestore(
request=featurestore_service_pb2.DeleteFeaturestoreRequest(
name=admin_client.featurestore_path(PROJECT_ID, REGION, FEATURESTORE_ID),
force=True,
)
).result()
client.delete_dataset(
DESTINATION_DATA_SET, delete_contents=True, not_found_ok=True
) # Make an API request.
print("Deleted dataset '{}'.".format(DESTINATION_DATA_SET))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Restart the kernel
Step2: Before you begin
Step3: Otherwise, set your project ID here.
Step4: Authenticate your Google Cloud account
Step5: Prepare for output
Step 1. Create dataset for output
Step6: Import libraries and define constants
Step7: Terminology and Concept
Step8: You can use GetFeaturestore or ListFeaturestores to check if the Featurestore was successfully created. The following example gets the details of the Featurestore.
Step9: Auto scaling is available in v1beta1 since v1.11. Below is the example for the CreateFeaturestoreRequest with auto-scaling, use it with aiplatform_v1beta1.FeaturestoreServiceClient to create Featurestore
Step10: Create Entity Type
Step11: Feature monitoring is in preview, so you need to use v1beta1 Python. The easiest way to set this for now is using console UI. For completeness, below is example to do this using v1beta1 SDK.
Step12: Create Feature
Step13: Search created features
Step14: Now, narrow down the search to features that are of type DOUBLE
Step15: Or, limit the search results to features with specific keywords in their ID and type.
Step16: Import Feature Values
Step17: Import feature values for Movies
Step18: Online serving
Step19: Read multiple entities per request
Step20: Now that you have learned how to fetch imported feature values for online serving, the next step is learning how to use imported feature values for offline use cases.
Step21: After the LRO finishes, you should be able to see the result from the BigQuery console, in the dataset created earlier.
|
12,163
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function # Python 3
from sympy import init_printing
init_printing(use_latex='mathjax',use_unicode=False) # Affichage des résultats
for a in range(9):
for a in [1,2,3,4]:
for a in 'bonjour':
for i in liste: # ligne d'en-tête
<ligne 1 du bloc d'instruction>
<ligne 2 du bloc d'instruction>
...
<ligne n du bloc d'instruction>
<ligne exécutée après la boucle>
from sympy import factor
from sympy.abc import x
factor(x**1-1)
factor(x**2-1)
factor(x**3-1)
factor(x**4-1)
factor(x**5-1)
factor(x**6-1)
factor(x**7-1)
factor(x**8-1)
factor(x**9-1)
for k in range(1,12):
from sympy import Eq
for k in range(2, 10):
a = 5
n = 5
n = 3 * n + 1
W = x + 1
sous_total = 0
sous_total = sous_total + 1
sous_total += 1
L = [134, 13614, 73467, 1451, 134, 88]
s = 0
for a in L:
s
s = 0
for a in L:
s
sum(L)
s = 'gaston'
t = ''
for lettre in s:
t
k = 1
for _ in range(10):
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dans ce chapitre et les suivants, nous traitons de la programmation en Python. Les notes ici présentent les grandes lignes et les éléments principaux de ce sujet. Le lecteur désirant en savoir plus sera invité à consulter les chapitres 1 à 7 du livre en français de G. Swinnen Apprendre à programmer avec Python 3 [Swinnen]_, le syllabus du cours de programmation de Thierry Massart [Massart]_ ou encore les chapitre 1 à 11 du livre en anglais de Wentworth et al. How to Think Like a Computer Scientist - Learning with Python [Thinklike]_.
Step2: La boucle for
Step3: En Python, une boucle for est identifiée par une ligne d'en-tête commançant par for se terminant par un deux-points
Step4: Le bloc d'instructions est exécuté autant de fois qu'il y a d'éléments dans la liste. Le bloc d'instruction est exécuté une fois pour chaque valeur de la variable i dans la liste.
Step5: La boucle for permet répéter une action pour toutes les valeurs d'une liste. En utilisant une boucle for, l'exemple ci-haut peut se réécrire plus facilement
Step6: Pour différencier les lignes, il est possible d'afficher plus d'informations
Step7: Affectation d'une variable
Step8: La syntaxe a == 5 est réservée pour le test d'égalité.
Step9: Ligne 2 signifie obtenir la valeur courante de n, la multiplier par trois et ajouter un, et affecter la réponse à n. Donc, après avoir exécuté les deux lignes ci-dessus, n va pointer / se référer à l'entier 16.
Step10: Avant de pouvoir mettre à jour une variable, vous devez l'initialiser à une valeur de départ, habituellement avec une valeur simple
Step11: La mise à jour d'une variable en lui ajoutant 1 à celle-ci est très commune. On appelle cela un incrément de la variable; soustraire 1 est appelé un décrément.
Step12: Quelques exemples
Step13: On écrit la même chose en utilisant le signe += pour incrémenter la variable s
Step14: On vérifie que le calcul est bon
Step15: L'exemple suivant double chacune des lettres d'une chaîne de caractères
Step16: Lorsque la variable de la boucle n'est pas utilisée dans le bloc d'instruction la convention est d'utiliser la barre de soulignement (_) pour l'indiquer. Ici, on calcule les puissances du nombre 3. On remarque que l'expression d'assignation k *= 3 est équivalente à k = k * 3
|
12,164
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
data = pd.read_csv("../../Data/2014outagesJerry.csv")
data.head()
# Select input/output data
Y_tot = data['Total_outages']
X_tot = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_tot,Y_tot)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_tot,regr_lasso.predict(X_tot)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_tot = linear_model.Lasso()
regr_lasso_best_tot.set_params(alpha=1e-4,normalize=True,max_iter=1e6)
regr_lasso_best_tot.fit(X_tot,Y_tot)
Y_tot_predict = regr_lasso_best_tot.predict(X_tot)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_tot,Y_tot_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_tot,Y_tot_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_tot.score(X_tot,Y_tot))
# Select input/output data
Y_eqp = data['Equipment']
X_eqp = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_eqp,Y_eqp)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_eqp,regr_lasso.predict(X_eqp)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_eqp = linear_model.Lasso()
regr_lasso_best_eqp.set_params(alpha=1e-4,normalize=True,max_iter=1e6)
regr_lasso_best_eqp.fit(X_eqp,Y_eqp)
Y_eqp_predict = regr_lasso_best_eqp.predict(X_eqp)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_eqp,Y_eqp_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_eqp,Y_eqp_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_eqp.score(X_eqp,Y_eqp))
# Select input/output data
Y_tree = data['Trees']
#X_tree = data[['Max_temp_F','Max_humidity_percent','Min_visibility_mi','Max_windspeed_mph','Precipitation_in','Event_Hail']]
X_tree = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_tree,Y_tree)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_tree,regr_lasso.predict(X_tree)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_tree = linear_model.Lasso()
regr_lasso_best_tree.set_params(alpha=1e-5,normalize=True,max_iter=1e6)
regr_lasso_best_tree.fit(X_tree,Y_tree)
Y_tree_predict = regr_lasso_best_tree.predict(X_tree)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_tree,Y_tree_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_tree,Y_tree_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_tree.score(X_tree,Y_tree))
# Select input/output data
Y_ani = data['Animals']
X_ani = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_ani,Y_ani)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_ani,regr_lasso.predict(X_ani)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_ani = linear_model.Lasso()
regr_lasso_best_ani.set_params(alpha=1e-4,normalize=True,max_iter=1e6)
regr_lasso_best_ani.fit(X_ani,Y_ani)
Y_ani_predict = regr_lasso_best_ani.predict(X_ani)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_ani,Y_ani_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_ani,Y_ani_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_ani.score(X_ani,Y_ani))
# Select input/output data
Y_lightening = data['Lightning']
X_lightening = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_lightening,Y_lightening)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_lightening,regr_lasso.predict(X_lightening)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_lightening = linear_model.Lasso()
regr_lasso_best_lightening.set_params(alpha=1e-5,normalize=True,max_iter=1e6)
regr_lasso_best_lightening.fit(X_lightening,Y_lightening)
Y_lightening_predict = regr_lasso_best_lightening.predict(X_lightening)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_lightening,Y_lightening_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_lightening,Y_lightening_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_lightening.score(X_lightening,Y_lightening))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Total Outages
Step2: Equipment-caused Outages
Step3: Trees-caused Outages
Step4: Animals-caused Outages
Step5: Lightning-caused Outages
|
12,165
|
<ASSISTANT_TASK:>
Python Code:
def pretty_print_review_and_label(i):
print(labels[i] + "\t: " + reviews[i][:70] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[1]
labels[1]
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
from collections import Counter
import numpy as np
c = Counter()
for review in reviews:
for word in str(review).split():
c[word] += 1
common_words = c.most_common(24)
common_words
for word in common_words:
del c[word[0]]
c.most_common(10)
negative_words = Counter()
positive_words = Counter()
total_words = Counter()
for i, review in enumerate(reviews):
for word in str(review).split():
if labels[i] == "NEGATIVE":
negative_words[word] += 1
else:
positive_words[word] += 1
total_words[word] += 1
pos_neg_ratios = Counter()
neg_pos_ratios = Counter()
for word, cnt in total_words.most_common():
if cnt > 500:
pos_neg_ratios[word] += positive_words[word] / (negative_words[word] + 1.0)
neg_pos_ratios[word] += negative_words[word] / (positive_words[word] + 1.0)
pos_neg_ratios.most_common(20)
neg_pos_ratios.most_common(20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lesson
Step2: Counting all the words
Step3: the most common words have no predictive power
Step4: Hmmm.. it would be more useful to have two counters, one for negative reviews and the other for positive ones.
|
12,166
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.insert(0, '..')
import time
import matplotlib.pyplot as plt
%matplotlib notebook
import numpy as np
import scipy.stats
from Configuration import Configuration
from NeuralTract import NeuralTract
conf = Configuration('confNeuralTractSpikes.rmto')
t = np.arange(0.0, conf.simDuration_ms, conf.timeStep_ms)
pools = dict()
pools[0] = NeuralTract(conf, 'CMExt')
tic = time.time()
for i in xrange(0,len(t)-1):
pools[0].atualizePool(t[i], 1000/12.0, 10)
toc = time.time()
print str(toc - tic) + ' seconds'
pools[0].listSpikes()
plt.figure()
plt.plot(pools[0].poolTerminalSpikes[:, 0],
pools[0].poolTerminalSpikes[:, 1]+1, '.')
plt.xlabel('t (ms)')
plt.ylabel('Descending Command index')
plt.figure()
plt.plot(pools[0].poolTerminalSpikes[pools[0].poolTerminalSpikes[:, 0]>9000, 0],
pools[0].poolTerminalSpikes[pools[0].poolTerminalSpikes[:, 0]>9000, 1]+1, '.')
plt.xlabel('t (ms)')
plt.ylabel('Descending Command index')
ISI = np.array([])
for i in xrange(0,len(pools[0].unit)):
ISI = np.append(ISI, np.diff(np.reshape(np.array(pools[0].unit[i].terminalSpikeTrain), (-1,2))[:,0]))
plt.figure()
plt.hist(ISI)
plt.xlabel('ISI (ms)')
plt.ylabel('Counts')
SD = np.std(ISI)
M = np.mean(ISI)
SK = scipy.stats.skew(ISI)
CV = SD / M
print 'ISI Mean = ' + str(M) + ' ms'
print 'ISI Standard deviation = ' + str(SD) + ' ms'
print 'ISI CV = ' + str(CV)
M_FR = 1000.0 / M
SD_FR = np.sqrt((SD**2) * 1000 / (M**3) + 1/6.0 + (SD**4) / (2*M**4) - SK/(3*M**3))
print 'Firing rate mean = ' + str(M_FR) + ' Hz'
print 'Firing rate standard deviation = ' + str(SD_FR) + ' Hz'
CV_FR = SD_FR / M_FR
print 'CV of Firing rate = ' + str(CV_FR)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The spike times of all descending commands along the 10000 ms of simulation is shown in Fig. \ref{fig
Step2: The spike times of all descending commands during the last 1000 ms of the simulation is shown in Fig. \ref{fig
Step3: The histogram of the interspike intevals of all the descending commands is shown in Fig. \ref{fig
Step4: Below different statistics of the interspike intervals and firing rate are obtained.
|
12,167
|
<ASSISTANT_TASK:>
Python Code:
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
# we'll skip the "face" and "buttonpress" conditions, to save memory:
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4}
epochs = mne.Epochs(raw, events, tmin=-0.3, tmax=0.7, event_id=event_dict,
preload=True)
evoked = epochs['auditory/left'].average()
del raw # reduce memory usage
print(f'Epochs baseline: {epochs.baseline}')
print(f'Evoked baseline: {evoked.baseline}')
evoked.plot()
print(evoked.data[:2, :3]) # first 2 channels, first 3 timepoints
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
print(evoked_eeg.ch_names)
new_order = ['EEG 002', 'MEG 2521', 'EEG 003']
evoked_subset = evoked.copy().reorder_channels(new_order)
print(evoked_subset.ch_names)
sample_data_evk_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-ave.fif')
evokeds_list = mne.read_evokeds(sample_data_evk_file, verbose=False)
print(evokeds_list)
print(type(evokeds_list))
for evok in evokeds_list:
print(evok.comment)
right_vis = mne.read_evokeds(sample_data_evk_file, condition='Right visual')
print(right_vis)
print(type(right_vis))
evokeds_list[0].plot(picks='eeg')
# Original baseline (none set).
print(f'Baseline after loading: {evokeds_list[0].baseline}')
# Apply a custom baseline correction.
evokeds_list[0].apply_baseline((None, 0))
print(f'Baseline after calling apply_baseline(): {evokeds_list[0].baseline}')
# Visualize the evoked response.
evokeds_list[0].plot(picks='eeg')
left_right_aud = epochs['auditory'].average()
print(left_right_aud)
left_aud = epochs['auditory/left'].average()
right_aud = epochs['auditory/right'].average()
print([evok.nave for evok in (left_aud, right_aud)])
left_right_aud = mne.combine_evoked([left_aud, right_aud], weights='nave')
assert left_right_aud.nave == left_aud.nave + right_aud.nave
for ix, trial in enumerate(epochs[:3].iter_evoked()):
channel, latency, value = trial.get_peak(ch_type='eeg',
return_amplitude=True)
latency = int(round(latency * 1e3)) # convert to milliseconds
value = int(round(value * 1e6)) # convert to µV
print('Trial {}: peak of {} µV at {} ms in channel {}'
.format(ix, value, latency, channel))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating Evoked objects from Epochs
Step2: You may have noticed that MNE informed us that "baseline correction" has been
Step3: Basic visualization of Evoked objects
Step4: Like the plot() methods for
Step5: To select based on time in seconds, the
Step6: Similarities among the core data structures
Step7: Notice that
Step8: If you want to load only some of the conditions present in a .fif file,
Step9: Above, when we created an
Step10: This can be remedied by either passing a baseline parameter to
Step11: Notice that
Step12: This approach will weight each epoch equally and create a single
Step13: However, this may not always be the case; if for statistical reasons it is
Step14: Note that the nave attribute of the resulting ~mne.Evoked object will
|
12,168
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
url = 'data/bank-additional.csv'
bank = pd.read_csv(url, sep=';')
bank.head()
# list all columns (for reference)
bank.columns
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 2
|
12,169
|
<ASSISTANT_TASK:>
Python Code:
df['txcnt_second'] = df['tx_count'].values / df['blockTime'].values
df['avg_gasUsed_t_perblock'] = df.groupby('block_id')['gasUsed_t'].transform('mean')
df['avg_price_perblock'] = df.groupby('block_id')['price_gwei'].transform('mean')
def rolling_avg(window_size):
price = df[['block_id', 'avg_price_perblock']].drop_duplicates().sort_values(
'block_id', ascending=True)
gasUsed_t = df[['block_id', 'avg_gasUsed_t_perblock']].drop_duplicates().sort_values(
'block_id', ascending=True)
txcnt_second = df[['block_id', 'txcnt_second']].drop_duplicates().sort_values(
'block_id', ascending=True)
tx_count = df[['block_id', 'tx_count']].drop_duplicates().sort_values(
'block_id', ascending=True)
gasUsed_b = df[['block_id', 'gasUsed_b']].drop_duplicates().sort_values(
'block_id', ascending=True)
uncle_count = df[['block_id', 'uncle_count']].drop_duplicates().sort_values(
'block_id', ascending=True)
difficulty = df[['block_id', 'difficulty']].drop_duplicates().sort_values(
'block_id', ascending=True)
blocktime = df[['block_id', 'blockTime']].drop_duplicates().sort_values(
'block_id', ascending=True)
# create new pandas dataframe with average values
rolling_avg = pd.DataFrame()
# calculate rolling averages
rolling_avg['avg_blocktime'] = blocktime['blockTime'].rolling(window=window_size).mean()
rolling_avg['avg_gasUsed_b'] = gasUsed_b['gasUsed_b'].rolling(window=window_size).mean()
rolling_avg['avg_tx_count'] = tx_count['tx_count'].rolling(window=window_size).mean()
rolling_avg['avg_uncle_count'] = uncle_count['uncle_count'].rolling(window=window_size).mean()
rolling_avg['avg_difficulty'] = difficulty['difficulty'].rolling(window=window_size).mean()
rolling_avg['avg_txcnt_second'] = txcnt_second['txcnt_second'].rolling(window=window_size).mean()
rolling_avg['avg_gasUsed_t'] = gasUsed_t['avg_gasUsed_t_perblock'].rolling(window=window_size).mean()
rolling_avg['avg_price'] = price['avg_price_perblock'].rolling(window=window_size).mean()
# insert blockids to merge on
rolling_avg['blockids'] = df['block_id'].drop_duplicates().sort_values(ascending=True)
return rolling_avg
num_blocks = [6, 60]
for num in num_blocks:
df_rolling_avg = rolling_avg(num)
df_rolling_avg.to_csv('./../data/block_avg_{}.csv'.format(num))
df_rolling_avg_6 = rolling_avg(6)
df_rolling_avg_60 = rolling_avg(60)
merged1 = pd.merge(df, df_rolling_avg_6, left_on='block_id', right_on='blockids')
merged2 = pd.merge(merged1, df_rolling_avg_60, left_on='block_id', right_on='blockids', suffixes=('_6', '_60'))
merged2.columns
for col in merged2.columns:
print(col, merged2[col].isnull().sum())
merged2.dropna(inplace=True)
merged2['mv'] = merged2.gweiShare / merged2.gasShare
merged2['mv'].isnull().sum()
merged2['mv'].describe()
alpha = .25
mu= merged2.mv.quantile(alpha)
merged2.mv.apply(np.log10).hist(bins=100)
plt.xlim([-2,2])
ylims=plt.gca().get_ylim()
plt.vlines(np.log10(mu), ylims[0], ylims[1], 'r' )
merged2.mv.hist(bins=np.arange(0,10,.20))
ylims=plt.gca().get_ylim()
plt.vlines(mu, ylims[0], ylims[1], 'r' )
merged2.mv.hist(bins=np.arange(0,10,.20), color = 'k', alpha=0.5, histtype='stepfilled',
label='Miner Values')
ylims=plt.gca().get_ylim()
plt.vlines(mu, ylims[0], ylims[1], 'r', linestyle='--')
plt.title('Distribution of miner values', fontsize=18)
plt.legend()
plt.tight_layout()
plt.savefig('./../images/mv_dist.png', dpi=300)
mu
merged2['p_label'] = mu* (merged2.gweiPaid_b / merged2.gasUsed_b)
merged2['p_label'].hist(bins=np.arange(0,50,2), color = 'b', alpha=0.7, histtype='stepfilled',
label='New Label')
merged2['price_gwei'].hist(bins=np.arange(0,50,.5), color = 'r', alpha=0.7,
histtype='stepfilled', label='Price')
plt.title('Constructed Label', fontsize=18)
plt.legend()
plt.tight_layout()
merged2['p_label2'] = mu*merged2.gweiPaid_b/(merged2.gasUsed_b+merged2.gasUsed_t*(1-mu))
merged2.p_label2.describe()
merged2['p_label2'].hist(bins=np.arange(0,50,2), color = 'b', alpha=0.7, histtype='stepfilled',
label='New Label')
merged2['price_gwei'].hist(bins=np.arange(0,50,.5), color = 'r', alpha=0.7, histtype='stepfilled',
label='Price')
plt.title('Constructed Label', fontsize=16)
plt.legend()
plt.tight_layout()
plt.savefig('./../images/label.png', dpi=300)
merged2.columns
# select candidate features for modeling
sel_cols = ['gasLimit_t',
'gasUsed_t',
'newContract',
'blockTime',
'difficulty',
'gasLimit_b',
'gasUsed_b',
'reward',
'size',
'type',
'totalFee',
'amount_gwei',
'gasShare',
'gweiPaid',
'gweiPaid_b',
'gweiShare',
'free_t',
'day',
'hour',
'dayofweek',
'txcnt_second',
'avg_blocktime_6',
'avg_gasUsed_b_6',
'avg_tx_count_6',
'avg_uncle_count_6',
'avg_difficulty_6',
'avg_txcnt_second_6',
'avg_gasUsed_t_6',
'avg_price_6',
'avg_blocktime_60',
'avg_gasUsed_b_60',
'avg_tx_count_60',
'avg_uncle_count_60',
'avg_difficulty_60',
'avg_txcnt_second_60',
'avg_gasUsed_t_60',
'avg_price_60',
'mv']
features = merged2[sel_cols]
features.to_csv('./../data/training.csv')
labels = merged2['p_label2']
labels.to_csv('./../data/labels.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Merge data with new columns
Step2: Create a label
Step3: Compute the summary statistic mu
Step4: Compute the label, p, given mu
Step5: Look our method smoothed the prices out!
|
12,170
|
<ASSISTANT_TASK:>
Python Code:
import gensim
import pandas as pd
import smart_open
import random
# read data
dataframe = pd.read_csv('movie_plots.csv')
dataframe
def read_corpus(documents):
for i, plot in enumerate(documents):
yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(plot, max_len=30), [i])
train_corpus = list(read_corpus(dataframe.Plots))
train_corpus[:2]
model = gensim.models.doc2vec.Doc2Vec(size=50, min_count=2, iter=55)
model.build_vocab(train_corpus)
model.train(train_corpus, total_examples=model.corpus_count, epochs=model.iter)
model.save_word2vec_format('doc_tensor.w2v', doctag_vec=True, word_vec=False)
%run ../../gensim/scripts/word2vec2tensor.py -i doc_tensor.w2v -o movie_plot
with open('movie_plot_metadata.tsv','w') as w:
w.write('Titles\tGenres\n')
for i,j in zip(dataframe.Titles, dataframe.Genres):
w.write("%s\t%s\n" % (i,j))
import pandas as pd
import re
from gensim.parsing.preprocessing import remove_stopwords, strip_punctuation
from gensim.models import ldamodel
from gensim.corpora.dictionary import Dictionary
# read data
dataframe = pd.read_csv('movie_plots.csv')
# remove stopwords and punctuations
def preprocess(row):
return strip_punctuation(remove_stopwords(row.lower()))
dataframe['Plots'] = dataframe['Plots'].apply(preprocess)
# Convert data to required input format by LDA
texts = []
for line in dataframe.Plots:
lowered = line.lower()
words = re.findall(r'\w+', lowered, flags = re.UNICODE | re.LOCALE)
texts.append(words)
# Create a dictionary representation of the documents.
dictionary = Dictionary(texts)
# Filter out words that occur less than 2 documents, or more than 30% of the documents.
dictionary.filter_extremes(no_below=2, no_above=0.3)
# Bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(text) for text in texts]
# Set training parameters.
num_topics = 10
chunksize = 2000
passes = 50
iterations = 200
eval_every = None
# Train model
model = ldamodel.LdaModel(corpus=corpus, id2word=dictionary, chunksize=chunksize, alpha='auto', eta='auto', iterations=iterations, num_topics=num_topics, passes=passes, eval_every=eval_every)
# Get document topics
all_topics = model.get_document_topics(corpus, minimum_probability=0)
all_topics[0]
# create file for tensors
with open('doc_lda_tensor.tsv','w') as w:
for doc_topics in all_topics:
for topics in doc_topics:
w.write(str(topics[1])+ "\t")
w.write("\n")
# create file for metadata
with open('doc_lda_metadata.tsv','w') as w:
w.write('Titles\tGenres\n')
for j, k in zip(dataframe.Titles, dataframe.Genres):
w.write("%s\t%s\n" % (j, k))
tensors = []
for doc_topics in all_topics:
doc_tensor = []
for topic in doc_topics:
if round(topic[1], 3) > 0:
doc_tensor.append((topic[0], float(round(topic[1], 3))))
# sort topics according to highest probabilities
doc_tensor = sorted(doc_tensor, key=lambda x: x[1], reverse=True)
# store vectors to add in metadata file
tensors.append(doc_tensor[:5])
# overwrite metadata file
i=0
with open('doc_lda_metadata.tsv','w') as w:
w.write('Titles\tGenres\n')
for j,k in zip(dataframe.Titles, dataframe.Genres):
w.write("%s\t%s\n" % (''.join((str(j), str(tensors[i]))),k))
i+=1
model.show_topic(topicid=0, topn=15)
import pyLDAvis.gensim
viz = pyLDAvis.gensim.prepare(model, corpus, dictionary)
pyLDAvis.display(viz)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Visualizing Doc2Vec
Step2: Let's take a look at the training corpus.
Step3: Training the Doc2Vec Model
Step4: Now, we'll save the document embedding vectors per doctag.
Step5: Prepare the Input files for Tensorboard
Step6: The script above generates two files, movie_plot_tensor.tsv which contain the embedding vectors and movie_plot_metadata.tsv containing doctags. But, these doctags are simply the unique index values and hence are not really useful to interpret what the document was while visualizing. So, we will overwrite movie_plot_metadata.tsv to have a custom metadata file with two columns. The first column will be for the movie titles and the second for their corresponding genres.
Step7: Now you can go to http
Step8: Train LDA Model
Step9: You can refer to this notebook also before training the LDA model. It contains tips and suggestions for pre-processing the text data, and how to train the LDA model to get good results.
Step10: The above output shows the topic distribution of first document in the corpus as a list of (topic_id, topic_probability).
Step11: Now you can go to http
Step12: Next, we upload the previous tensor file "doc_lda_tensor.tsv" and this new metadata file to http
Step13: You can even use pyLDAvis to deduce topics more efficiently. It provides a deeper inspection of the terms highly associated with each individual topic. For this, it uses a measure called relevance of a term to a topic that allows users to flexibly rank terms best suited for a meaningful topic interpretation. It's weight parameter called λ can be adjusted to display useful terms which could help in differentiating topics efficiently.
|
12,171
|
<ASSISTANT_TASK:>
Python Code:
import os
%%bash
TFX_PKG="tfx==0.22.0"
KFP_PKG="kfp==0.5.1"
pip freeze | grep $TFX_PKG || pip install -Uq $TFX_PKG
pip freeze | grep $KFP_PKG || pip install -Uq $KFP_PKG
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
%%bash
LOCAL_BIN="/home/jupyter/.local/bin"
SKAFFOLD_URI="https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64"
test -d $LOCAL_BIN || mkdir -p $LOCAL_BIN
which skaffold || (
curl -Lo skaffold $SKAFFOLD_URI &&
chmod +x skaffold &&
mv skaffold $LOCAL_BIN
)
!which skaffold
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
GOOGLE_CLOUD_PROJECT=shell_output[0]
%env GOOGLE_CLOUD_PROJECT={GOOGLE_CLOUD_PROJECT}
ENDPOINT = # Enter your ENDPOINT here.
# Docker image name for the pipeline image.
CUSTOM_TFX_IMAGE = 'gcr.io/' + GOOGLE_CLOUD_PROJECT + '/tfx-pipeline'
CUSTOM_TFX_IMAGE
PIPELINE_NAME = "guided_project_1"
PROJECT_DIR = os.path.join(os.path.expanduser("."), PIPELINE_NAME)
PROJECT_DIR
!tfx template copy \
--pipeline-name={PIPELINE_NAME} \
--destination-path={PROJECT_DIR} \
--model=taxi
%cd {PROJECT_DIR}
!python -m models.features_test
!python -m models.keras.model_test
!tail -26 models/features_test.py
GCS_BUCKET_NAME = GOOGLE_CLOUD_PROJECT + '-kubeflowpipelines-default'
GCS_BUCKET_NAME
!gsutil mb gs://{GCS_BUCKET_NAME}
!gsutil cp data/data.csv gs://{GCS_BUCKET_NAME}/tfx-template/data/data.csv
!tfx pipeline create \
--pipeline-path=kubeflow_dag_runner.py \
--endpoint={ENDPOINT} \
--build-target-image={CUSTOM_TFX_IMAGE}
!tfx run create --pipeline-name={PIPELINE_NAME} --endpoint={ENDPOINT}
# Update the pipeline
!tfx pipeline update \
--pipeline-path=kubeflow_dag_runner.py \
--endpoint={ENDPOINT}
# You can run the pipeline the same way.
!tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT}
print('https://' + ENDPOINT)
!tfx pipeline update \
--pipeline-path=kubeflow_dag_runner.py \
--endpoint={ENDPOINT}
print("https://" + ENDPOINT)
!tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT}
!tfx pipeline update \
--pipeline-path=kubeflow_dag_runner.py \
--endpoint={ENDPOINT}
!tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT}
!tfx pipeline update \
--pipeline-path=kubeflow_dag_runner.py \
--endpoint={ENDPOINT}
!tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT}
!tfx pipeline update \
--pipeline-path=kubeflow_dag_runner.py \
--endpoint={ENDPOINT}
!tfx run create --pipeline-name {PIPELINE_NAME} --endpoint={ENDPOINT}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1. Environment setup
Step2: You may need to restart the kernel at this point.
Step3: Modify the PATH environment variable so that skaffold is available
Step4: Environment variable setup
Step5: We also need to access your KFP cluster. You can access it in your Google Cloud Console under "AI Platform > Pipeline" menu.
Step6: Set the image name as tfx-pipeline under the current GCP project
Step7: Step 2. Copy the predefined template to your project directory.
Step8: TFX includes the taxi template with the TFX python package.
Step9: Step 3. Browse your copied source files
Step10: Let's quickly go over the structure of a test file to test Tensorflow code
Step11: First of all, notice that you start by importing the code you want to test by importing the corresponding module. Here we want to test the code in features.py so we import the module features
Step12: Let's upload our sample data to GCS bucket so that we can use it in our pipeline later.
Step13: Let's create a TFX pipeline using the tfx pipeline create command.
Step14: While creating a pipeline, Dockerfile and build.yaml will be generated to build a Docker image.
Step15: Or, you can also run the pipeline in the KFP Dashboard. The new execution run will be listed
Step 5. Add components for data validation.
Step16: Check pipeline outputs
Step17: Step 6. Add components for training
Step18: When this execution run finishes successfully, you have now created and run your first TFX pipeline in AI Platform Pipelines!
Step 7. Try BigQueryExampleGen
Step19: Step 8. Try Dataflow with KFP
Step20: You can find your Dataflow jobs in Dataflow in Cloud Console.
Step 9. Try Cloud AI Platform Training and Prediction with KFP
|
12,172
|
<ASSISTANT_TASK:>
Python Code:
import tempfile
import girder_client
import numpy as np
from histomicstk.annotations_and_masks.annotation_and_mask_utils import (
delete_annotations_in_slide)
from histomicstk.saliency.cellularity_detection_superpixels import (
Cellularity_detector_superpixels)
import matplotlib.pylab as plt
from matplotlib.colors import ListedColormap
%matplotlib inline
# color map
vals = np.random.rand(256,3)
vals[0, ...] = [0.9, 0.9, 0.9]
cMap = ListedColormap(1 - vals)
APIURL = 'http://candygram.neurology.emory.edu:8080/api/v1/'
SAMPLE_SLIDE_ID = "5d586d76bd4404c6b1f286ae"
# SAMPLE_SLIDE_ID = "5d8c296cbd4404c6b1fa5572"
gc = girder_client.GirderClient(apiUrl=APIURL)
gc.authenticate(apiKey='kri19nTIGOkWH01TbzRqfohaaDWb6kPecRqGmemb')
# This is where the run logs will be saved
logging_savepath = tempfile.mkdtemp()
# color normalization values from TCGA-A2-A3XS-DX1
cnorm_thumbnail = {
'mu': np.array([9.24496373, -0.00966569, 0.01757247]),
'sigma': np.array([0.35686209, 0.02566772, 0.02500282]),
}
# from the ROI in Amgad et al, 2019
cnorm_main = {
'mu': np.array([8.74108109, -0.12440419, 0.0444982]),
'sigma': np.array([0.6135447, 0.10989545, 0.0286032]),
}
# deleting existing annotations in target slide (if any)
delete_annotations_in_slide(gc, SAMPLE_SLIDE_ID)
print(Cellularity_detector_superpixels.__init__.__doc__)
# init cellularity detector
cds = Cellularity_detector_superpixels(
gc, slide_id=SAMPLE_SLIDE_ID,
MAG=3.0, compactness=0.1, spixel_size_baseMag=256 * 256,
max_cellularity=40,
visualize_spixels=True, visualize_contiguous=True,
get_tissue_mask_kwargs={
'deconvolve_first': False,
'n_thresholding_steps': 2,
'sigma': 1.5,
'min_size': 500, },
verbose=2, monitorPrefix='test',
logging_savepath=logging_savepath)
# set color normalization for thumbnail
# cds.set_color_normalization_values(
# mu=cnorm_thumbnail['mu'],
# sigma=cnorm_thumbnail['sigma'], what='thumbnail')
# set color normalization values for main tissue
cds.set_color_normalization_values(
mu=cnorm_main['mu'], sigma=cnorm_main['sigma'], what='main')
print(cds.run.__doc__)
tissue_pieces = cds.run()
plt.imshow(tissue_pieces[0].tissue_mask, cmap=cMap)
plt.imshow(tissue_pieces[0].spixel_mask, cmap=cMap)
tissue_pieces[0].fdata.head()
tissue_pieces[0].cluster_props
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepwork
Step2: Initialize the cellularity detector
Step3: In this example, and as the default behavior, we use a handful of informative intensity features extracted from the hematoxylin channel after color deconvolution to fit a gaussian mixture model. Empirically (on a few test slides), this seems to give better results than using the full suite of intensity and texture features available. Feel free to experiment with this and find the optimum combination of features for your application.
Step4: Set the color normalization values
Step5: Run the detector
Step6: Check the results
|
12,173
|
<ASSISTANT_TASK:>
Python Code:
%%html
%matplotlib inline
import matplotlib
#import pygsp #Uncomment if you have pygsp installed.
import numpy as np
import matplotlib.pylab as plt
import networkx as nx
import pandas as pd
plt.rcParams['figure.figsize'] = (6, 6)
%%html
## Create a graph.
N = 100 # number of nodes.
G = pygsp.graphs.Sensor(N)
## Compute the Fourier basis.
G.compute_fourier_basis()
## Create a smooth signal with noise.
## The second Eigenvector of the Laplacian matrix, often called the Fiedler vector,
# can be considered as a smooth graph signal.
x = G.U[:, 1]
y = x + np.random.normal(scale=1/np.sqrt(N), size=N)
## Select a filter.
filter = pygsp.filters.Expwin(G, 0.1)
## Filter the noise.
s = filter.analysis(y)
## Display the original signal.
G.plot_signal(x, default_qtg=False, plot_name='original_signal',savefig=True)
## Display the noisy signal.
G.plot_signal(y, default_qtg=False, plot_name='noisy_signal',savefig=True)
## Display the filtered signal.
G.plot_signal(s, default_qtg=False, plot_name='filtered_signal',savefig=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, simple filtering on a noisy graph signal will be demonstrated. This is based on an example in an article by Nathanael Perraudin et al. (2016) titled "GSPBOX
|
12,174
|
<ASSISTANT_TASK:>
Python Code:
xvals = np.linspace(0, 20, 1000)
mu1 = 5
mu2 = 15
fig, ax = plt.subplots()
ax.plot(xvals, stats.norm.pdf(xvals, loc=mu1, scale=1), label='Model 1')
ax.plot(xvals, stats.norm.pdf(xvals, loc=mu2, scale=1), label='Model 2')
ax.set_xticks([mu1, mu2])
ax.set_yticks([])
ax.set_xticklabels(['$\mu_1$', '$\mu_2$'], size=22)
ax.legend(loc='best', frameon=False)
plt.show()
xvals = np.linspace(0, 20, 1000)
mu1 = 5
mu2 = 15
fig, ax = plt.subplots()
ax.plot(xvals, stats.norm.pdf(xvals, loc=mu1, scale=1), label='Model 1')
ax.plot(xvals, stats.norm.pdf(xvals, loc=mu2, scale=1), label='Model 2')
ax.set_xticks([mu1, mu2])
ax.set_yticks([])
ax.set_xticklabels(['$x_0$', '$n\sigma$'], size=22)
ax.axvline(mu1, color='k')
ax.legend(loc='best', frameon=False)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The log-odds ratio, conditioned on the data, between these two models can be written as
|
12,175
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(1337)
import datetime
from IPython.display import SVG
from keras.datasets import mnist
from keras import activations
from keras.layers import Dense, Input, concatenate, Conv1D, Conv2D, Dropout, MaxPooling1D, MaxPooling2D
from keras.layers import Dense, Flatten
from keras.models import Sequential, load_model
from keras.utils import plot_model
from keras.utils.vis_utils import model_to_dot
from matplotlib import gridspec
from matplotlib.ticker import NullFormatter, NullLocator, MultipleLocator
from scipy import stats
from sklearn.metrics import auc, roc_curve
from sklearn.model_selection import train_test_split
from vis.utils import utils
from vis.visualization import visualize_activation
from vis.visualization import visualize_saliency
import datetime
import keras
import matplotlib.pylab as plt
import pandas as pd
import seaborn as sns
import talos as ta
sns.set_palette('husl')
sns.set(style='ticks')
%matplotlib inline
plt.rcParams["figure.figsize"] = [17, 17]
num_classes = 2
model = Sequential()
model.add(Conv1D(32, (5), strides = (1), input_shape = (18, 1), activation = 'tanh'))
model.add(MaxPooling1D(pool_size = (2), strides = (2)))
model.add(Conv1D(32, (3), strides = (1), input_shape = (18, 1), activation = 'tanh'))
model.add(Flatten())
model.add(Dense(300, activation = 'tanh'))
model.add(Dropout(rate = 0.5))
model.add(Dense(300, activation = 'tanh'))
model.add(Dropout(rate = 0.5))
model.add(Dense(num_classes, activation = 'softmax', name = "preds"))
model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=['accuracy'])
checkpoint = keras.callbacks.ModelCheckpoint(
filepath = 'best_model.{epoch:02d}-{val_loss:.2f}.h5',
monitor = 'val_loss',
save_best_only = True
)
checkpoint = keras.callbacks.ModelCheckpoint(
filepath = 'model_latest.h5',
monitor = 'val_loss',
save_best_only = True
)
from keras.models import load_model
model = load_model('model_latest.h5')
model.summary()
SVG(model_to_dot(model).create(prog='dot', format='svg'))
model.layers
model.layers[8].get_config()
index = 0
plt.imshow(model.layers[index].get_weights()[0].squeeze(), cmap='gray')
plt.title(model.layers[index].get_config()['name']);
index = 4
plt.imshow(model.layers[index].get_weights()[0].squeeze(), cmap='gray')
plt.title(model.layers[index].get_config()['name']);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For the purposes of this notebook, a simple model is constructed.
Step2: Model checkpoints can be saved during training. They are usually saved in the HDF5 format.
Step3: To save only the latest model in training, something like the following callback could be used
Step4: A saved HDF5 file can be loaded as a model in a way like the following
Step5: A model can be summarized in a way like the following
Step6: An SVG of the model can be displayed in Jupyter in a way like the following
Step7: The layers of a model can be accessed
Step8: The configuration of an individual layer can be inspected
Step9: The weights of individual layers can be accessed and visualized
|
12,176
|
<ASSISTANT_TASK:>
Python Code:
# Import Libraries needed
import pandas as pd #dataframe manipulation
import numpy as np #numerical processing of vectors
import matplotlib.pyplot as plt #plotting
%matplotlib inline
#import tensorflow as tf
import sklearn
from sklearn import tree
import graphviz
import dask
print("Pandas:\t\t", pd.__version__)
print("Numpy:\t\t", np.__version__)
#print("Tensorflow:\t", tf.__version__)
print("Dask:\t\t", dask.__version__)
print("Scikit-learn:\t", sklearn.__version__)
df_init = df = pd.read_csv('./maintenance_data.csv')
df.columns
df.info()
df.describe()
df.sort_values(by='lifetime', ascending=True).head()
df.sort_values(by='lifetime', ascending=True).tail()
plt.bar(df.sort_values('team').team, df.sort_values('lifetime').lifetime)
df.groupby([df.team, df.broken]).count()
df.groupby(['team','broken']).agg({'broken': 'count'}).apply(lambda x:100 * x / float(x.sum()))
show_perc = df.groupby(['team','broken']).agg({'broken': 'count'})
show_perc.apply(lambda x:100 * x / float(x.sum()))
column = 'provider'
show_perc = df.loc[df['broken'] == 1].groupby([column]).agg({'broken': 'count'})
show_perc.apply(lambda x:round(100 * x / float(x.sum()),2)).rename(columns={"broken": "%"})
tree_data = df_init.drop(columns=['broken'])
tree_target = df_init.broken
#workaround replacement strings to integers - DO NOT DO IT LIKE THIS ;-)
try:
tree_data.replace('TeamA',1, inplace=True)
tree_data.replace('TeamB',2, inplace=True)
tree_data.replace('TeamC',3, inplace=True)
tree_data.replace('Provider1',1, inplace=True)
tree_data.replace('Provider2',2, inplace=True)
tree_data.replace('Provider3',3, inplace=True)
tree_data.replace('Provider4',4, inplace=True)
except:
pass
#convert dataframes to arrays
tree_data = tree_data.values
tree_target = tree_target.values
#column names - labels
tree_feature_names = ['lifetime', 'pressureInd', 'moistureInd', 'temperatureInd', 'team', 'provider']
#target names - class
tree_target_names = ['BROKEN!','Operational']
#Tree Classifiers
tree_clf = tree.DecisionTreeClassifier()
#tree_clf.set_params(max_depth=2)
tree_clf = tree_clf.fit(tree_data, tree_target)
tree_clf.get_params()
#output graph tree
tree_dot_data = tree.export_graphviz(tree_clf,
out_file=None,
feature_names=tree_feature_names,
class_names=tree_target_names,
filled=True,
rounded=True,
special_characters=True)
graph = graphviz.Source(tree_dot_data)
graph.render("Maintenance_classification_tree")
#show tree
graph
df_init.drop(columns=['broken']).columns
#PREDICTION WITHOUT REGRESSION - 1-->BROKEN 0-->Operational
print('instance 1 prediction: ', tree_clf.predict([[70., 100., 100., 100., 1., 3.]]))
print('instance 2 prediction: ', tree_clf.predict([[70., 100., 100., 100., 1., 1.]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3>Decision Tree Classification of existing Data with Scikit-learn</h3>
Step2: <h3 align='center'>To better have a look at the tree</h3>
|
12,177
|
<ASSISTANT_TASK:>
Python Code:
from collections import defaultdict, Counter
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (8, 8)
girls = ['alice', 'allie', 'bernice', 'brenda', 'clarice', 'cilly']
boys = ['chris', 'christopher', 'arald', 'arnold', 'bob']
[(b, g) for b in boys for g in girls if b[0] == g[0]]
letterGirls = {}
for girl in girls:
letterGirls.setdefault(girl[0], []).append(girl)
[(b, g) for b in boys for g in letterGirls[b[0]]]
>>> s = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]
>>> d = defaultdict(list)
>>> for k, v in s:
... d[k].append(v)
...
>>> list(d.items())
[('blue', [2, 4]), ('red', [1]), ('yellow', [1, 3])]
class Bunch(dict):
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
>>> T = Bunch
>>> t = T(left=T(left="a", right="b"), right=T(left="c"))
>>> t.left
>>> t.left.right
>>> t['left']['right']
>>> "left" in t.right
"right" in t.right
class Bunch(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
from types import SimpleNamespace
x, y = 32, 64
point = SimpleNamespace(datum=y, squared=y*y, coord=x)
point
point.datum, point.squared, point.coord
[i for i in point]
def fast_countdown(count):
nums = []
for i in range(count):
nums.append(i)
nums.reverse()
return nums
def slow_countdown(count):
nums = []
for i in range(count):
nums.insert(0, i)
return nums
def printer(lst, chunk=10):
print("{}...{}".format(" ".join(map(str, lst[:chunk])),
" ".join(map(str, lst[-chunk:]))))
%timeit nums = fast_countdown(10**5)
printer(nums)
%timeit nums = slow_countdown(10**5)
printer(nums)
from collections import deque
def enhanced_slow_countdown(count):
nums = deque()
for i in range(count):
nums.appendleft(i)
return nums
%timeit nums = enhanced_slow_countdown(10**5)
printer(nums)
from random import randrange
max_value = 10000
checks = 1000
L = [randrange(max_value) for i in range(checks)]
%timeit [randrange(max_value) in L for _ in range(checks)]
S = set(L)
%timeit [randrange(max_value) in S for _ in range(checks)]
lists = [[1, 2], [3, 4, 5], [6]]
sum(lists, [])
res = []
for lst in lists:
res.extend(lst)
res
def string_producer(length=10**7):
return ''.join([chr(randrange(ord('a'), ord('z'))) for _ in range(length)])
%%timeit # %% means 'cell-wise'
s = ""
for chunk in string_producer():
s += chunk
%%timeit
chunks = []
for chunk in string_producer():
chunks.append(chunk)
s = ''.join(chunks)
%timeit s = ''.join(string_producer())
def perm_isomorphism(M, domain):
iso = dict(enumerate(domain))
return [iso[M[i]] for i in range(len(M))]
def fix_perm(M, fix):
return [M[i] if i in fix else i for i in range(len(M))]
def naive_max_perm(M, A=None):
'''
Fix a permutation such that it is one-to-one and maximal, recursively.
consumes:
M - a permutation as a list of integers
A - a set of positions allowed to move
produces:
a set `fix` such that makes M maximal, ensuring to be one-to-one
'''
if A is None: A = set(range(len(M))) # init to handle first invocation, all elems can move
if len(A) is 1: return A # recursion base, unary perm can move, trivial
B = set(M[i] for i in A) # b in B iff b is desired by someone
C = A - B # c in C iff c isn't desired, so discard it
return naive_max_perm(M, A - C) if C else A # recur with desired position only
identity = range(8)
letters = "abcdefgh"
perm_isomorphism(identity, letters)
M = [2, 2, 0, 5, 3, 4, 7, 4]
perm_isomorphism(M, letters)
fix = naive_max_perm(M)
max_M = fix_perm(M, fix)
perm_isomorphism(max_M, letters)
def max_perm(M):
n = len(M) # How many elements?
A = set(range(n)) # A = {0, 1, ... , n-1}
count = Counter(M) # desired positions by frequencies
Q = deque([i for i in A if not count[i]]) # useless elements
while Q: # While useless elts. left...
i = Q.pop() # get one of them
A.remove(i) # remove it from the maximal permutation
j = M[i] # get its desired position
count[j] -= 1 # and release it for someone else
if not count[j]: # if such position isn't desired anymore
Q.appendleft(j) # enqueue such position in order to discard it
return A
fix = max_perm(M)
max_M = fix_perm(M, fix)
perm_isomorphism(max_M, letters)
def counting_sort(A, key=None, sort_boundary=None):
'''
Sorts the given collection A in linear time, assuming their elements are hashable.
This implementation implements a vanilla counting sort, working in linear time respect
iterable length and spacing between objects. It works best if elements are evenly, namely
*uniformly* distributed in the domain; on contrast, if they are sparse and concentrated
next accumulation points, traversing distances between them is time consuming.
If `sort_boundary` is instantiated to a float within [0,1], then the domain is ordered
using a classic loglinear algorithm before building the result.
'''
if key is None: key = lambda x: x
B, C = [], defaultdict(list)
for x in A:
C[key(x)].append(x)
domain = sorted(C) if sort_boundary and len(C) <= len(A)*sort_boundary else range(min(C), max(C)+1)
for k in domain:
B.extend(C[k])
return B
A = [randrange(50) for i in range(2*10**3)]
assert sorted(A) == counting_sort(A)
n, bins, patches = plt.hist(A, 10, normed=0,facecolor='green', alpha=0.5)
plt.xlabel('elements')
plt.ylabel('frequencies')
plt.grid(True)
plt.show()
%timeit counting_sort(A)
%timeit counting_sort(A, sort_boundary=1)
B = [randrange(50) for i in range(10**3)] + [10**4 + randrange(50) for i in range(10**3)]
n, bins, patches = plt.hist(B, 100, normed=0,facecolor='green', alpha=0.5)
plt.xlabel('elements')
plt.ylabel('frequencies')
plt.grid(True)
plt.show()
assert sorted(B) == counting_sort(B)
%timeit counting_sort(B)
%timeit counting_sort(B, sort_boundary=1/8)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A grouping pattern, avoiding quadratic time
Step2: the bad way, quadratic time
Step3: there is a better approach avoiding quadratic time, toward defaultdict
Step4: However there is an even better solution, as pointed out in the example subsection of the previous link
Step5: The Bunch pattern
Step6: However inheriting from dict is discouraged by Alex
Step7: It is interesting to observe that this idiom has been merged within the standard library, starting from Python 3.3, as with the name of SimpleNamespace
Step8: If you need point to be iterable use the structured object namedtuple instead.
Step9: Citing Hetland, pag 11
Step10: Hidden squares
Step11: Hetland's words, pag. 35
Step12: Hetland, pag.36
Step13: concerning strings
Step14: maybe some optimization is performed because s is a string object.
Step15: a better approach using constant append to the top
Step16: maybe a little better since it doesn't loop with for explicitly.
Step17: The following is a naive implementation, recursive but in $\mathcal{O}(n^{2})$, where $n$ is the permutation length.
Step18: Hetland, pag. 78
Step19: Counting Sort
|
12,178
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def print_sum(a, b):
Print the sum of the arguments a and b.
c=a+b
print (c)
# YOUR CODE HERE
interact(print_sum,a=(-10,10,.1),b=(-8,8,2));
assert True # leave this for grading the print_sum exercise
def print_string(s, length=False):
Print the string s and optionally its length.
# YOUR CODE HERE
print (s)
if length==True:
print (len(s))
# YOUR CODE HERE
interact(print_string,s='Hello World', length=True);
assert True # leave this for grading the print_string exercise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Interact basics
Step3: Use the interact function to interact with the print_sum function.
Step5: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
Step6: Use the interact function to interact with the print_string function.
|
12,179
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import os
import json
import re
import sys
import pandas
from datetime import datetime, timedelta
from time import sleep
from subprocess import check_output
try:
from urllib import urlopen
except:
from urllib.request import urlopen
import ssl
import yaml
context = ssl._create_unverified_context()
with open('packages.yml') as package_file:
packages = yaml.load(package_file)
CWD = os.path.abspath(os.path.curdir)
start_date = '2017-11-03'
since_date = '--since="{start}"'.format(start=start_date)
since_date
since = datetime.strptime(start_date+" 0:0:0", "%Y-%m-%d %H:%M:%S")
since
from datetime import datetime, timedelta
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def parse_link_header(headers):
link_s = headers.get('link', '')
urls = element_pat.findall(link_s)
rels = rel_pat.findall(link_s)
d = {}
for rel,url in zip(rels, urls):
d[rel] = url
return d
def get_paged_request(url):
get a full list, handling APIv3's paging
results = []
while url:
#print("fetching %s" % url, file=sys.stderr)
f = urlopen(url)
results.extend(json.load(f))
links = parse_link_header(f.headers)
url = links.get('next')
return results
def get_issues(project="pysal/pysal", state="closed", pulls=False):
Get a list of the issues from the Github API.
which = 'pulls' if pulls else 'issues'
url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE)
return get_paged_request(url)
def _parse_datetime(s):
Parse dates in the format returned by the Github API.
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
Convert a list of issues to a dict, keyed by issue number.
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def is_pull_request(issue):
Return True if the given issue is a pull request.
return 'pull_request_url' in issue
def issues_closed_since(period=timedelta(days=365), project="pysal/pysal", pulls=False):
Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
period = datetime.now() - period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url)
# allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)
filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period]
# exclude rejected PRs
if pulls:
filtered = [ pr for pr in filtered if pr['merged_at'] ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
Return a list of issues sorted by closing date date.
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
Summary report about a list of issues, printing number and title.
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print('* :%s:`%d`: %s' % (role, i['number'],
i['title'].encode('utf-8')))
else:
for i in issues:
print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))
all_issues = {}
all_pulls = {}
total_commits = 0
issue_details = {}
pull_details = {}
for package in packages:
subpackages = packages[package].split()
for subpackage in subpackages:
prj = 'pysal/{subpackage}'.format(subpackage=subpackage)
os.chdir(CWD)
os.chdir('tmp/{subpackage}'.format(subpackage=subpackage))
#sub_issues = issues_closed_since(project=prj, period=since)
#sleep(5)
issues = issues_closed_since(since, project=prj,pulls=False)
pulls = issues_closed_since(since, project=prj,pulls=True)
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
issue_details[subpackage] = issues
pull_details[subpackage] = pulls
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
all_issues[subpackage] = n_total, n_pulls
os.chdir(CWD)
import pickle
pickle.dump( issue_details, open( "issue_details.p", "wb" ) )
pickle.dump( pull_details, open("pull_details.p", "wb"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step9: Our last main release was 2017-11-03
Step10: The issues are pulled since the last release date of the meta package.
|
12,180
|
<ASSISTANT_TASK:>
Python Code:
def nthTerm(N ) :
return(( 2 * N + 3 ) *(2 * N + 3 ) - 2 * N ) ;
n = 4
print(nthTerm(n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
12,181
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
# Set up the data loading:
images, labels = ...
# Define the model
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope)
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope)
pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
pool4 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope)
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv5, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope)
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights')
conv = tf.nn.conv2d(conv5, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope)
pool5 = tf.nn.max_pool(conv5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool5')
with tf.name_scope('fc_6') as scope:
flat = tf.reshape(pool5, [-1, 7*7*512])
weights = tf.Variable(tf.truncated_normal([7*7*512, 4096], dtype=tf.float32, stddev=1e-1), name='weights')
mat = tf.matmul(flat, weights)
biases = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(mat, biases)
fc6 = tf.nn.relu(bias, name=scope)
fc6_drop = tf.nn.dropout(fc6, keep_prob=0.5, name='dropout')
with tf.name_scope('fc_7') as scope:
weights = tf.Variable(tf.truncated_normal([4096, 4096], dtype=tf.float32, stddev=1e-1), name='weights')
mat = tf.matmul(fc6, weights)
biases = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(mat, biases)
fc7 = tf.nn.relu(bias, name=scope)
fc7_drop = tf.nn.dropout(fc7, keep_prob=0.5, name='dropout')
with tf.name_scope('fc_8') as scope:
weights = tf.Variable(tf.truncated_normal([4096, 1000], dtype=tf.float32, stddev=1e-1), name='weights')
mat = tf.matmul(fc7, weights)
biases = tf.Variable(tf.constant(0.0, shape=[1000], dtype=tf.float32), trainable=True, name='biases')
bias = tf.nn.bias_add(mat, biases)
predictions = bias
import tensorflow as tf
slim = tf.contrib.slim
# Set up the data loading:
images, labels = ...
# Define the model:
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.repeat(images, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
net = slim.fully_connected(net, 4096, scope='fc6')
net = slim.dropout(net, 0.5, scope='dropout6')
net = slim.fully_connected(net, 4096, scope='fc7')
net = slim.dropout(net, 0.5, scope='dropout7')
net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc8')
predictions = net
import tensorflow as tf
slim = tf.contrib.slim
vgg = tf.contrib.slim.nets.vgg
# Set up the data loading:
images, labels = ...
# Define the model:
predictions = vgg.vgg16(images)
import tensorflow as tf
from tqdm import trange
from tensorflow.examples.tutorials.mnist import input_data
# Import data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784], name='x')
W = tf.Variable(tf.zeros([784, 10]), name='W')
b = tf.Variable(tf.zeros([10]), name='b')
y = tf.nn.bias_add(tf.matmul(x, W), b, name='y')
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10], name='y_')
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Variable Initializer
init_op = tf.global_variables_initializer()
# Create a Saver object for saving weights
saver = tf.train.Saver()
# Create a Session object, initialize all variables
sess = tf.Session()
sess.run(init_op)
# Train
for _ in trange(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Save model
save_path = saver.save(sess, "./log_reg_model.ckpt")
print("Model saved in file: %s" % save_path)
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Test accuracy: {0}'.format(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})))
sess.close()
import tensorflow as tf
from tqdm import trange
from tensorflow.examples.tutorials.mnist import input_data
# Import data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Create a Session object, initialize all variables
sess = tf.Session()
# Restore weights
saver = tf.train.import_meta_graph('./log_reg_model.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
print("Model restored.")
graph = tf.get_default_graph()
x = graph.get_tensor_by_name("x:0")
y = graph.get_tensor_by_name("y:0")
y_ = graph.get_tensor_by_name("y_:0")
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Test accuracy: {0}'.format(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})))
sess.close()
from datasets import dataset_utils
import tensorflow as tf
url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
checkpoints_dir = './checkpoints'
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
import os
import tensorflow as tf
from nets import vgg
slim = tf.contrib.slim
# Load images
images = ...
# Pre-process
processed_images = ...
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(vgg.vgg_arg_scope()):
logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False)
probabilities = tf.nn.softmax(logits)
# Load checkpoint values
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
slim.get_model_variables('vgg_16'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Understanding every line of this model isn't important. The main point to notice is how much space this takes up. Several of the above lines (conv2d, bias_add, relu, maxpool) can obviously be combined to cut down on the size a bit, and you could also try to compress the code with some clever for looping, but all at the cost of sacrificing readability. With this much code, there is high potential for bugs or typos (to be honest, there are probably a few up there^), and modifying or refactoring the code becomes a huge pain.
Step2: Much cleaner. For the TF-Slim version, it's much more obvious what the network is doing, writing it is faster, and typos and bugs are much less likely.
Step3: Pre-Trained Weights
Step4: Note, the differences from what we worked with yesterday
Step5: Importantly, notice that we didn't have to retrain the model. Instead, the graph and all variable values were loaded directly from our checkpoint files. In this example, this probably takes just as long, but for more complex models, the utility of saving/restoring is immense.
|
12,182
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
import pandas as pd
Série a ser transformada
s = pd.Series(
name="Compras",
index=["Leite", "Ovos", "Carne", "Arroz", "Feijão"],
data=[2, 12, 1, 5, 2]
)
s
Função de Transformação
def fn(x):
return x ** 2 + x - 100
Transformação elemento a elemento
s.map(fn)
DataFrame a ser transformado
df = pd.DataFrame(
index=pd.Index(data=["Leite", "Ovos", "Carne", "Arroz", "Feijão"], name="Itens"),
columns=["Quantidade", "Preço Unitário"],
data=np.array([
[4, 3, 1, 5, 2],
[3.00, 6.50, 25.90, 5.00, 3.80]
]).T,
)
df
Função de Transformação
def fn(x):
return x ** 3 / 100
Transformação elemento a elemento
df.applymap(fn)
Função de Transformação
def fn(x):
return x.product()
Transformação elemento a elemento
df.apply(fn)
Transformação elemento a elemento
df.apply(fn, axis=1)
df1 = pd.DataFrame(
columns=["user_id", "store_id", "product_id"],
data=[
[1, 1, 1],
[1, 2, 6],
[3, 2, 7],
[2, 2, 3],
[3, 2, 3],
[4, 3, 2],
[2, 3, 1],
[1, 1, 3],
[1, 3, 3],
[5, 1, 4],
[5, 1, 1],
[3, 2, 1],
[1, 2, 1],
[2, 2, 2],
[3, 2, 2],
[4, 2, 3],
[4, 1, 5],
[5, 1, 6],
[3, 1, 6],
[2, 3, 7],
]
)
df2 = pd.DataFrame(
columns=["store_id", "store_name"],
data=[
[1, "Pão de Açúcar"],
[2, "Dia"],
[3, "Extra"],
]
)
df3 = pd.DataFrame(
columns=["product_id", "product_name"],
data=[
[1, "Leite"],
[2, "Ovos"],
[3, "Arroz"],
[4, "Feijão"],
[5, "Carne"],
[6, "Frango"],
[7, "Peixe"]
]
)
df = pd.merge(pd.merge(df1, df2, on="store_id"), df3, on="product_id")[["user_id", "store_name", "product_name"]]
df
df.pivot?
df.pivot_table?
df.pivot(columns="store_name", values="product_name")
df.pivot_table(columns="store_name", values="product_name", aggfunc="count")
pd.crosstab?
pd.crosstab(df.store_name, df.product_name)
pd.DataFrame.groupby?
pd.core.groupby.DataFrameGroupBy.aggregate?
g = df.groupby("product_name")
g
g.describe()
g.aggregate(["min", "mean", "median", "max", "sum"])
g.max()
file_name = os.path.join("data", "desafio", "dataset.csv")
dataset = pd.read_csv(filename, sep=",", index_col=0, encoding="utf-8")
dataset.head()
Entre com o Código aqui
Entre com o Código aqui
Entre com o Código aqui
Entre com o Código aqui
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Propagação de Funções
Step5: Elemento a elemento
Step7: DataFrame
Step10: Elemento a elemento
Step14: Linhas e Colunas
Step15: Transformações
Step16: Manipulações
Step17: crosstab
Step18: Agrupamento e Agregação
Step19: Desafio
Step21: Itens
Step23: [ Item B ]
Step25: [ Item C ]
Step27: [ Item D ]
|
12,183
|
<ASSISTANT_TASK:>
Python Code:
from stix2 import Indicator
indicator = Indicator(name="File hash for malware variant",
pattern_type="stix",
pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']")
print(indicator.serialize(pretty=True))
print(indicator.serialize())
print(indicator.serialize(indent=4))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: New in 3.0.0
Step2: If you need performance but also need human-readable output, you can pass the indent keyword argument to serialize()
|
12,184
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import seaborn
import statsmodels.api as sm
from statsmodels.regression.rolling import RollingOLS
seaborn.set_style("darkgrid")
pd.plotting.register_matplotlib_converters()
%matplotlib inline
factors = pdr.get_data_famafrench("F-F_Research_Data_Factors", start="1-1-1926")[0]
factors.head()
industries = pdr.get_data_famafrench("10_Industry_Portfolios", start="1-1-1926")[0]
industries.head()
endog = industries.HiTec - factors.RF.values
exog = sm.add_constant(factors["Mkt-RF"])
rols = RollingOLS(endog, exog, window=60)
rres = rols.fit()
params = rres.params.copy()
params.index = np.arange(1, params.shape[0] + 1)
params.head()
params.iloc[57:62]
params.tail()
fig = rres.plot_recursive_coefficient(variables=["Mkt-RF"], figsize=(14, 6))
exog_vars = ["Mkt-RF", "SMB", "HML"]
exog = sm.add_constant(factors[exog_vars])
rols = RollingOLS(endog, exog, window=60)
rres = rols.fit()
fig = rres.plot_recursive_coefficient(variables=exog_vars, figsize=(14, 18))
joined = pd.concat([factors, industries], axis=1)
joined["Mkt_RF"] = joined["Mkt-RF"]
mod = RollingOLS.from_formula("HiTec ~ Mkt_RF + SMB + HML", data=joined, window=60)
rres = mod.fit()
rres.params.tail()
%timeit rols.fit()
%timeit rols.fit(params_only=True)
res = RollingOLS(endog, exog, window=60, min_nobs=12, expanding=True).fit()
res.params.iloc[10:15]
res.nobs[10:15]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: pandas-datareader is used to download data from
Step2: The first model estimated is a rolling version of the CAPM that regresses
Step3: We next plot the market loading along with a 95% point-wise confidence interval.
Step4: Next, the model is expanded to include all three factors, the excess market, the size factor
Step5: Formulas
Step6: RollingWLS
Step7: Expanding Sample
|
12,185
|
<ASSISTANT_TASK:>
Python Code:
from pygoose import *
import gc
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import *
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import *
from keras.callbacks import EarlyStopping, ModelCheckpoint
project = kg.Project.discover()
feature_list_id = 'oofp_nn_lstm_with_activations'
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
embedding_matrix = kg.io.load(project.aux_dir + 'fasttext_vocab_embedding_matrix.pickle')
X_train_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_train.pickle')
X_train_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_train.pickle')
X_test_q1 = kg.io.load(project.preprocessed_data_dir + 'sequences_q1_fasttext_test.pickle')
X_test_q2 = kg.io.load(project.preprocessed_data_dir + 'sequences_q2_fasttext_test.pickle')
y_train = kg.io.load(project.features_dir + 'y_train.pickle')
EMBEDDING_DIM = embedding_matrix.shape[-1]
VOCAB_LENGTH = embedding_matrix.shape[0]
MAX_SEQUENCE_LENGTH = X_train_q1.shape[-1]
print(EMBEDDING_DIM, VOCAB_LENGTH, MAX_SEQUENCE_LENGTH)
def zero_loss(y_true, y_pred):
return K.zeros((1,))
def create_model_question_branch():
input_q = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedding_q = Embedding(
VOCAB_LENGTH,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False,
)(input_q)
timedist_q = TimeDistributed(Dense(
EMBEDDING_DIM,
activation='relu',
))(embedding_q)
lambda_q = Lambda(
lambda x: K.max(x, axis=1),
output_shape=(EMBEDDING_DIM, )
)(timedist_q)
output_q = lambda_q
return input_q, output_q
def create_model(params):
embedding_layer = Embedding(
VOCAB_LENGTH,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False,
)
lstm_layer = LSTM(
params['num_lstm'],
dropout=params['lstm_dropout_rate'],
recurrent_dropout=params['lstm_dropout_rate'],
)
input_q1 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences_1 = embedding_layer(input_q1)
x1 = lstm_layer(embedded_sequences_1)
input_q2 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences_2 = embedding_layer(input_q2)
y1 = lstm_layer(embedded_sequences_2)
features = Concatenate(name='feature_output')([x1, y1])
dropout_feat = Dropout(params['dense_dropout_rate'])(features)
bn_feat = BatchNormalization()(dropout_feat)
dense_1 = Dense(params['num_dense'], activation='relu')(bn_feat)
dropout_1 = Dropout(params['dense_dropout_rate'])(dense_1)
bn_1 = BatchNormalization()(dropout_1)
output = Dense(1, activation='sigmoid', name='target_output')(bn_1)
model = Model(
inputs=[input_q1, input_q2],
outputs=[output, features],
)
model.compile(
loss={'target_output': 'binary_crossentropy', 'feature_output': zero_loss},
loss_weights={'target_output': 1.0, 'feature_output': 0.0},
optimizer='nadam',
metrics=None,
)
return model
def predict(model, X_q1, X_q2):
Mirror the pairs, compute two separate predictions, and average them.
y1 = model.predict([X_q1, X_q2], batch_size=1024, verbose=1).reshape(-1)
y2 = model.predict([X_q2, X_q1], batch_size=1024, verbose=1).reshape(-1)
return (y1 + y2) / 2
NUM_FOLDS = 5
kfold = StratifiedKFold(
n_splits=NUM_FOLDS,
shuffle=True,
random_state=RANDOM_SEED
)
BATCH_SIZE = 2048
MAX_EPOCHS = 200
model_params = {
'dense_dropout_rate': 0.075,
'lstm_dropout_rate': 0.332,
'num_dense': 130,
'num_lstm': 300,
}
feature_output_size = model_params['num_lstm'] * 2
y_train_oofp = np.zeros_like(y_train, dtype='float32')
y_train_oofp_features = np.zeros((len(y_train), feature_output_size), dtype='float32')
y_test_oofp = np.zeros((len(X_test_q1), NUM_FOLDS), dtype='float32')
y_test_oofp_features = np.zeros((len(X_test_q1), feature_output_size), dtype='float32')
model_checkpoint_path = project.temp_dir + 'fold-checkpoint-' + feature_list_id + '.h5'
%%time
# Iterate through folds.
for fold_num, (ix_train, ix_val) in enumerate(kfold.split(X_train_q1, y_train)):
# Augment the training set by mirroring the pairs.
X_fold_train_q1 = np.vstack([X_train_q1[ix_train], X_train_q2[ix_train]])
X_fold_train_q2 = np.vstack([X_train_q2[ix_train], X_train_q1[ix_train]])
X_fold_val_q1 = np.vstack([X_train_q1[ix_val], X_train_q2[ix_val]])
X_fold_val_q2 = np.vstack([X_train_q2[ix_val], X_train_q1[ix_val]])
# Ground truth should also be "mirrored".
y_fold_train = np.concatenate([y_train[ix_train], y_train[ix_train]])
y_fold_val = np.concatenate([y_train[ix_val], y_train[ix_val]])
print()
print(f'Fitting fold {fold_num + 1} of {kfold.n_splits}')
print()
# Compile a new model.
model = create_model(model_params)
# Train.
model.fit(
# Create dummy ground truth values for the activation outputs.
[X_fold_train_q1, X_fold_train_q2],
[y_fold_train, np.zeros((len(y_fold_train), feature_output_size))],
validation_data=(
[X_fold_val_q1, X_fold_val_q2],
[y_fold_val, np.zeros((len(y_fold_val), feature_output_size))],
),
batch_size=BATCH_SIZE,
epochs=MAX_EPOCHS,
verbose=1,
callbacks=[
# Stop training when the validation loss stops improving.
EarlyStopping(
monitor='val_loss',
min_delta=0.001,
patience=3,
verbose=1,
mode='auto',
),
# Save the weights of the best epoch.
ModelCheckpoint(
model_checkpoint_path,
monitor='val_loss',
save_best_only=True,
verbose=2,
),
],
)
# Restore the best epoch.
model.load_weights(model_checkpoint_path)
# Compute out-of-fold predictions.
y_train_oofp[ix_val] = predict(model, X_train_q1[ix_val], X_train_q2[ix_val])
y_test_oofp[:, fold_num] = predict(model, X_test_q1, X_test_q2)
# Clear GPU memory.
K.clear_session()
del X_fold_train_q1, X_fold_train_q2
del X_fold_val_q1, X_fold_val_q2
del model
gc.collect()
cv_score = log_loss(y_train, y_train_oofp)
print('CV score:', cv_score)
feature_names = [feature_list_id]
features_train = y_train_oofp.reshape((-1, 1))
features_test = np.mean(y_test_oofp, axis=1).reshape((-1, 1))
project.save_features(features_train, features_test, feature_names, feature_list_id)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Config
Step2: Identifier for storing these features on disk and referring to them later.
Step3: Make subsequent NN runs reproducible.
Step4: Read data
Step5: Padded sequences of word indices for every question.
Step6: Word embedding properties.
Step8: Define models
Step9: Partition the data
Step10: Define hyperparameters
Step11: Best values picked by Bayesian optimization.
Step12: Create placeholders for out-of-fold predictions.
Step13: The path where the best weights of the current model will be saved.
Step14: Fit the folds and compute out-of-fold predictions
Step15: Save features
|
12,186
|
<ASSISTANT_TASK:>
Python Code:
import math
import random
darths_thrown = 10000
throws = [[random.random(), random.random()] for i in range(darths_thrown)]
in_circle=0
out_circle=0
for throw in throws:
if math.sqrt(throw[0]**2 + throw[1]**2) <= 1:
in_circle +=1
else:
out_circle += 1
pi_estimate = in_circle/len(throws) * 4
print(f'estimate: {pi_estimate}')
print(f'actual: {math.pi}')
print(f'difference is {abs(pi_estimate-math.pi)}')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.DataFrame(np.random.rand(darths_thrown,2)*2, columns=["x", "y"])
df.head()
def is_in_circle(row):
x = row['x']
y = row['y']
if math.sqrt((x-1)**2 + (y-1)**2) <= 1:
return True
else:
return False
df['In_Circle'] = df.apply(is_in_circle, axis=1)
df.head()
plt.scatter(df['x'], df['y'], c=[within and 'blue' or 'red' for within in df['In_Circle']], alpha=0.4,s= 25)
fig = plt.gcf()
fig.set_size_inches(9,9);
pi_estimate = df['In_Circle'].sum() / len(df) * 4
pi_estimate
pi_estimate - math.pi
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: simple way
Step2: now to jazz this up visually
Step3: the ratio of the area of the circle divided by the area of the square gives us pi/4
|
12,187
|
<ASSISTANT_TASK:>
Python Code:
sequence = [1, 2, 3, 4, 5]
def square(x):
return x**2
result = list(map(square, sequence))
print(result)
sequence = range(-10, 10)
greater_than_zero = list(filter(lambda x: x > 0, sequence))
print(greater_than_zero)
from functools import reduce
product = reduce((lambda x, y: x * y), [1, 2, 3, 4])
print(product)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Filter
Step2: Reduce
|
12,188
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
# this url has no header info, so column names must be specified
colnames = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS',
'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
# load dataset into pandas
df = pd.read_csv(data_url, header=None, sep='\s+', names=colnames)
df.head()
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Our seven columns of interest
cols = ['MEDV', 'RM', 'CRIM', 'NOX', 'AGE', 'RAD', 'PTRATIO']
sns.set(font_scale=1.5)
sns.pairplot(df[cols], size = 2.5)
import numpy as np
cm = np.corrcoef(df[cols].values.T)
sns.heatmap(cm, annot=True, square=True, fmt='.2f', annot_kws={'size' : 12},
yticklabels=cols, xticklabels=cols)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's explore some of these variables. It seems like there should definitely be a correlation between several of these variables. For example, I bet as home value increases (MEDV) so will the number of rooms (RM), but the crime rate (CRIM) will decrease. Other relationships might not be so intuitive.
Step2: A lot of information in this plot. However, the trends we supposed earlier appear to be valid. Other interesting trends are that nitric oxide concentration appears to be higher in areas with a larger share of older homes. Looks like all the crime happens where there are older homes and when the teacher/student ration is 1 to 20. Many of these relationships are pretty noisy. Let's check how these variables correlate with a heatmap.
|
12,189
|
<ASSISTANT_TASK:>
Python Code:
y = np.asarray([20, 21, 17, 19, 17, 28])
k = len(y)
p = 1/k
n = y.sum()
n, p
sns.barplot(x=np.arange(1, k+1), y=y);
n, y
with pm.Model() as dice_model:
# initializes the Dirichlet distribution with a uniform prior:
a = np.ones(k)
theta = pm.Dirichlet("theta", a=a)
# Since theta[5] will hold the posterior probability of rolling a 6
# we'll compare this to the reference value p = 1/6
# six_bias = pm.Deterministic("six_bias", theta[k-1] - p)
results = pm.Multinomial("results", n=n, p=theta, observed=y)
dice_model
pm.model_to_graphviz(dice_model)
dice_model.check_test_point()
with dice_model:
dice_trace = pm.sample(draws=1000)
with dice_model:
pm.traceplot(dice_trace, combined=True, lines={"theta": p})
axes = pm.plot_posterior(dice_trace, varnames=["theta"], ref_val=np.round(p, 3))
for i, ax in enumerate(axes):
ax.set_title(f"{i+1}")
ax = pm.plot_posterior(dice_trace, varnames=["six_bias"], ref_val=[0])
ax.set_title(f"P(Theta[Six] - {p:.2%})");
six_bias_perc = len(dice_trace["six_bias"][dice_trace["six_bias"]>0])/len(dice_trace["six_bias"])
print(f'P(Six is biased) = {six_bias_perc:.2%}')
y = np.asarray([727, 583, 137])
n = y.sum()
k = len(y)
n, k
with pm.Model() as polling_model:
# initializes the Dirichlet distribution with a uniform prior:
a = np.ones(k)
theta = pm.Dirichlet("theta", a=a)
bush_dukakis_diff = pm.Deterministic("bush_dukakis_diff", theta[0] - theta[1])
likelihood = pm.Multinomial("likelihood", n=n, p=theta, observed=y)
pm.model_to_graphviz(polling_model)
with polling_model:
polling_trace = pm.sample(draws=1000)
with polling_model:
pm.traceplot(polling_trace, combined=True)
_, ax = plt.subplots(1,1, figsize=(10, 6))
sns.distplot(polling_trace["bush_dukakis_diff"], bins=20, ax=ax, kde=False, fit=stats.beta)
ax.axvline(0, c='g', linestyle='dotted')
ax.set_title("% Difference Bush vs Dukakis")
ax.set_xlabel("% Difference");
bush_dukakis_diff_perc = len(polling_trace["bush_dukakis_diff"][polling_trace["bush_dukakis_diff"]>0])/len(polling_trace["bush_dukakis_diff"])
print(f'P(More Responses for Bush) = {bush_dukakis_diff_perc:.0%}')
data = pd.DataFrame([
{"candidate": "bush", "pre": 294, "post": 288},
{"candidate": "dukakis", "pre": 307, "post": 332},
{"candidate": "other", "pre": 38, "post": 10}
], columns=["candidate", "pre", "post"])
data
y = data[["pre", "post"]].T.values
y
n = y.sum(axis=1)
n
m = y[:, :2].sum(axis=1)
m
n_debates, n_candidates = y.shape
n_debates, n_candidates
with pm.Model() as polling_model_debates:
# initializes the Dirichlet distribution with a uniform prior:
shape = (n_debates, n_candidates)
a = np.ones(shape)
# This creates a separate Dirichlet distribution for each debate
# where sum of probabilities across candidates = 100% for each debate
theta = pm.Dirichlet("theta", a=a, shape=shape)
# get the "Bush" theta for each debate, at index=0
bush_pref = pm.Deterministic("bush_pref", theta[:, 0] * n / m)
# to calculate probability that support for Bush shifted from debate 1 [0] to 2 [1]
bush_shift = pm.Deterministic("bush_shift", bush_pref[1]-bush_pref[0])
# because of the shapes of the inputs, this essentially creates 2 multinomials,
# one for each debate
responses = pm.Multinomial("responses", n=n, p=theta, observed=y)
for v in polling_model_debates.unobserved_RVs:
print(v, v.tag.test_value.shape)
pm.model_to_graphviz(polling_model_debates)
with polling_model_debates:
polling_trace_debates = pm.sample(draws=3000, tune=1500)
with polling_model_debates:
pm.traceplot(polling_trace_debates, combined=True)
s = ["pre", "post"]
candidates = data["candidate"].values
pd.DataFrame(polling_trace_debates["theta"].mean(axis=0), index=s, columns=candidates)
pd.DataFrame(polling_trace_debates["bush_pref"].mean(axis=0), index=s, columns=["bush_pref"])
_, ax = plt.subplots(2,1, figsize=(10, 10))
sns.distplot(polling_trace_debates["bush_pref"][:,0], hist=False, ax=ax[0], label="Pre-Debate")
sns.distplot(polling_trace_debates["bush_pref"][:,1], hist=False, ax=ax[0], label="Post-Debate")
ax[0].set_title("% Responses for Bush vs Dukakis")
ax[0].set_xlabel("% Responses");
sns.distplot(polling_trace_debates["bush_shift"], hist=True, ax=ax[1], label="P(Bush Shift)")
ax[1].axvline(0, c='g', linestyle='dotted')
ax[1].set_title("% Shift Pre/Prior Debate")
ax[1].set_xlabel("% Shift");
perc_shift = (len(polling_trace_debates["bush_shift"][polling_trace_debates["bush_shift"] > 0])
/len(polling_trace_debates["bush_shift"])
)
print(f'P(Shift Towards Bush) = {perc_shift:.1%}')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Just looking at a simple bar plot, we suspect that we might not be dealing with a fair die!
Step2: Let's set up a simple model in PyMC3 that not only calculates the posterior probability for $theta$ (i.e. the probability for each side of the die), but also estimates the bias for throwing a $6$.
Step3: Starting with version 3.5, PyMC3 includes a handy function to plot models in plate notation
Step4: Let's draw 1,000 samples from the joint posterior using the default NUTS sampler
Step5: From the traceplot, we can already see that one of the $theta$ posteriors isn't in line with the rest
Step6: We'll plot the posterior distributions for each $theta$ and compare it our reference value $p$ to see if the 95% HPD (Highest Posterior Density) interval includes $p = 1/6$.
Step7: We can clearly see that the HPD for the posterior probability for rolling a $6$ barely includes what we'd expect from a fair die.
Step8: Lastly, we can calculate the probability that the die is biased on $6$ by calculating the density to the right of our reference line at $0$
Step9: Better get some new dice...!
Step10: We, again, set up a simple Dirichlet-Multinomial model and include a Deterministic variable that calculates the metric of interest - the difference in probability of respondents for Bush vs. Dukakis.
Step11: Looking at the % difference between respondents for Bush vs Dukakis, we can see that most of the density is greater than 0%, signifying a strong advantage for Bush in this poll.
Step12: Percentage of samples with bush_dukakis_diff > 0
Step13: Polling #2
Step14: Convert to 2x3 array
Step15: Number of respondents in each survey
Step16: Number of respondents for the 2 major candidates in each survey
Step17: For this model, we'll need to set up the priors slightly differently. Instead of 1 set of thetas, we need 2, one for each survey (pre/post debate).
Step18: Thus, we need to initialize a Dirichlet distribution prior with shape (2,3) and then refer to the relevant parameters by index where needed.
Step19: For models with multi-dimensional shapes, it's always good to check the shapes of the various parameters before sampling
Step20: The plate notation visual can also help with that
Step21: Let's sample with a slightly higher number of draws and tuning steps
Step22: We'll take a look at the means of the posteriors for theta, indicating the % of support for each candidate pre & post debate
Step23: Just from the means, we can see that the number of Bush supporters has likely decreased post debate from 48.8% to 46.3% (as a % of supporters of the 2 major candidates)
Step24: Let's compare the results visually, by plotting the posterior distributions of the pre/post debate values for % responses for Bush and the posterior for pre/post difference in Bush supporters
Step25: From the second plot, we can already see that a large portion of the posterior density is below 0, but let's be precise and actually calculate the probability that support shifted towards Bush after the debate
|
12,190
|
<ASSISTANT_TASK:>
Python Code:
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
uid = ""
password = ""
user = auth.sign_in_with_email_and_password(uid, password)
db = firebase.database() # reference to the database service
def firebaseRefresh():
global user
user = auth.refresh(user['refreshToken'])
import unidecode
import numpy as np
import matplotlib.pyplot as plt
def plot_polarity_subjectivity(listed_name_on_database):
pol = []
sub = []
articles_of_a_newspaper = db.child(str("articles/" + listed_name_on_database)).get()
articles = articles_of_a_newspaper.val()
for article_no in range(len(articles)):
data = list(articles.items())[article_no][1]
pol.append(abs(float(data["polarity"])))
sub.append(float(data["subjectivity"]))
plt.scatter(pol,sub,[80/np.sqrt(len(pol))]*len(sub), alpha=0.7, label = listed_name_on_database)
return np.column_stack((pol, sub))
plt.clf()
plt.figure(figsize=(12, 10))
plt.title("Scatter Plot (Articles)")
websites = ["wwwchannelnewsasiacom","wwwstraitstimescom","wwwtnpsg","wwwtodayonlinecom",
"sgnewsyahoocom","sgfinanceyahoocom","stompstraitstimescom","mothershipsg",
"thehearttruthscom","wwwtremerituscom","yawningbreadwordpresscom",
"wwwtheonlinecitizencom","wwwallsingaporestuffcom","alvinologycom","berthahensonwordpresscom"]
centroid ={}
for website in websites:
data = plot_polarity_subjectivity(website)
time.sleep(0.2)
centroid[website] = np.mean(data, axis=0)
plt.legend(loc=4)
plt.xlabel("Polarity")
plt.ylabel("Subjectivity")
plt.show()
plt.clf()
plt.figure(figsize=(12, 10))
plt.title("Centroids (Sources)")
mothershipsg = centroid["wwwchannelnewsasiacom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwchannelnewsasiacom")
#plt.annotate("wwwchannelnewsasiacom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["wwwstraitstimescom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwstraitstimescom")
#plt.annotate("wwwstraitstimescom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["wwwtnpsg"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwtnpsg")
#plt.annotate("wwwtnpsg",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["wwwtodayonlinecom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwtodayonlinecom")
#plt.annotate("wwwtodayonlinecom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["mothershipsg"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="mothership")
#plt.annotate("mothership",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["sgnewsyahoocom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="sgnewsyahoocom")
#plt.annotate("sgnewsyahoocom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["sgfinanceyahoocom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="sgfinanceyahoocom")
#plt.annotate("sgfinanceyahoocom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["stompstraitstimescom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="stompstraitstimescom")
#plt.annotate("stompstraitstimescom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["alvinologycom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="alvinologycom")
#plt.annotate("alvinologycom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["wwwallsingaporestuffcom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwallsingaporestuffcom")
#plt.annotate("wwwallsingaporestuffcom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["wwwtheonlinecitizencom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwtheonlinecitizencom")
#plt.annotate("wwwtheonlinecitizencom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["wwwtremerituscom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="wwwtremerituscom")
#plt.annotate("wwwtremerituscom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["thehearttruthscom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="thehearttruthscom")
#plt.annotate("thehearttruthscom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["berthahensonwordpresscom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="berthahensonwordpresscom")
#plt.annotate("berthahensonwordpresscom",(mothershipsg[0],mothershipsg[1]))
mothershipsg = centroid["yawningbreadwordpresscom"]
plt.scatter(mothershipsg[0],mothershipsg[1],label="yawningbreadwordpresscom")
#plt.annotate("yawningbreadwordpresscom",(mothershipsg[0],mothershipsg[1]))
plt.xlabel("Polarity")
plt.ylabel("Subjectivity")
plt.legend(loc=4)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Analyse already evaluated components
|
12,191
|
<ASSISTANT_TASK:>
Python Code:
%%capture
# Installing the required libraries:
!pip install matplotlib pandas scikit-learn tensorflow pyarrow tqdm
!pip install google-cloud-bigquery google-cloud-bigquery-storage
!pip install flake8 pycodestyle pycodestyle_magic geopandas
# Python Builtin Libraries
# Third Party Libraries
import numpy as np
import geopandas
from google.cloud import bigquery
import matplotlib.pyplot as plt
# Configurations
%matplotlib inline
try:
from google.colab import auth
print("Authenticating in Colab")
auth.authenticate_user()
print("Authenticated")
except: # noqa
print("This notebook is not running on Colab.")
print("Please make sure to follow the authentication steps.")
# ENTER THE GCP PROJECT HERE
gcp_project = "YOUR-GCP-PROJECT"
print(f"gcp_project is set to {gcp_project}")
# Select Coutry:
country_name = 'Australia'
country_code = 'AU'
TOP_LEFT = (-11, 110)
BOTTOM_RIGHT = (-40, 157)
query = f
SELECT kingdom, decimallatitude, decimallongitude
FROM
`bigquery-public-data.gbif.occurrences` TABLESAMPLE SYSTEM (1 PERCENT)
WHERE
decimallatitude IS NOT NULL
AND decimallongitude IS NOT NULL
AND decimallatitude BETWEEN {BOTTOM_RIGHT[0]} AND {TOP_LEFT[0]}
AND decimallongitude BETWEEN {TOP_LEFT[1]} AND {BOTTOM_RIGHT[1]}
AND countrycode = '{country_code}';
bqclient = bigquery.Client(project=gcp_project)
dataframe = bqclient.query(query).result().to_dataframe()
print(dataframe.shape)
dataframe.head()
# Using geopandas to extract the map of Australia:
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
country = world.loc[world['name'] == country_name]
# Using a subset of the entire dataframe to create the map:
subdataframe = dataframe.sample(n=10000)
labels, indices = np.unique(subdataframe['kingdom'].values, return_inverse=True)
ax = country['geometry'].plot(color='white', edgecolor='black', figsize=(18, 12))
scatter = ax.scatter(x=subdataframe['decimallongitude'],
y=subdataframe['decimallatitude'],
s=indices / 2.0 + 1.0,
c=indices,
cmap='Accent')
ax.legend(scatter.legend_elements()[0], labels, title='Kingdom', loc=1)
plt.show();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authentication
Step2: Configurations
Step3: Also, let's select the country. In this notebook, we have selected Australia. For a more accurate mapping, we also select the region that contains Australia. This helps us exclude the entires with the country code AU, but with latitudes and longitudes that are not in this region.
Step5: Data Preparation
Step6: Check the Dataframe
Step7: View the Map
|
12,192
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -U tensorflow-addons
import tensorflow as tf
import tensorflow_addons as tfa
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
# Reshape into "channels last" setup.
tf.keras.layers.Reshape((28,28,1), input_shape=(28,28)),
tf.keras.layers.Conv2D(filters=10, kernel_size=(3,3),data_format="channels_last"),
# Groupnorm Layer
tfa.layers.GroupNormalization(groups=5, axis=3),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_test, y_test)
model = tf.keras.models.Sequential([
# Reshape into "channels last" setup.
tf.keras.layers.Reshape((28,28,1), input_shape=(28,28)),
tf.keras.layers.Conv2D(filters=10, kernel_size=(3,3),data_format="channels_last"),
# LayerNorm Layer
tfa.layers.InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_test, y_test)
model = tf.keras.models.Sequential([
# Reshape into "channels last" setup.
tf.keras.layers.Reshape((28,28,1), input_shape=(28,28)),
tf.keras.layers.Conv2D(filters=10, kernel_size=(3,3),data_format="channels_last"),
# LayerNorm Layer
tf.keras.layers.LayerNormalization(axis=3 , center=True , scale=True),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_test, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 正規化
Step2: データセットを準備する
Step3: グループ正規化のチュートリアル
Step4: インスタンス正規化のチュートリアル
Step5: レイヤー正規化のチュートリアル
|
12,193
|
<ASSISTANT_TASK:>
Python Code:
import ebisu
defaultModel = (4., 4., 24.) # alpha, beta, and half-life in hours
from datetime import datetime, timedelta
date0 = datetime(2017, 4, 19, 22, 0, 0)
database = [dict(factID=1, model=defaultModel, lastTest=date0),
dict(factID=2, model=defaultModel, lastTest=date0 + timedelta(hours=11))]
oneHour = timedelta(hours=1)
now = date0 + timedelta(hours=11.1)
print("On {},".format(now))
for row in database:
recall = ebisu.predictRecall(row['model'],
(now - row['lastTest']) / oneHour,
exact=True)
print("Fact #{} probability of recall: {:0.1f}%".format(row['factID'], recall * 100))
now = date0 + timedelta(hours=26.5)
print("On {},".format(now))
for row in database:
recall = ebisu.predictRecall(row['model'],
(now - row['lastTest']) / oneHour,
exact=True)
print("Fact #{} probability of recall: {:0.1f}%".format(row['factID'], recall * 100))
row = database[0] # review FIRST question
result = 1 # success!
total = 1 # number of times this flashcard was shown (fixed)
newModel = ebisu.updateRecall(row['model'],
result,
total,
(now - row['lastTest']) / oneHour)
print('New model for fact #1:', newModel)
row['model'] = newModel
row['lastTest'] = now
row = database[1] # review SECOND question
result = 0
newModel = ebisu.updateRecall(row['model'],
result,
total,
(now - row['lastTest']) / oneHour)
print('New model for fact #2:', newModel)
row['model'] = newModel
row['lastTest'] = now
for row in database:
meanHalflife = ebisu.modelToPercentileDecay(row['model'])
print("Fact #{} has half-life of ≈{:0.1f} hours".format(row['factID'], meanHalflife))
database[0]['model'] = ebisu.rescaleHalflife(database[0]['model'], 2.0)
for row in database:
meanHalflife = ebisu.modelToPercentileDecay(row['model'])
print("Fact #{} has half-life of ≈{:0.1f} hours".format(row['factID'], meanHalflife))
# As above: a bit slow to get exact probabilities
%timeit ebisu.predictRecall(database[0]['model'], 100., exact=True)
# A bit faster alternative: get log-probabilities (this is the defalt)
%timeit ebisu.predictRecall(database[0]['model'], 100., exact=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ebisu—this is what we’re here to learn about!
Step2: After learning the second fact, at 0900, what does Ebisu expect each fact’s probability of recall to be, for each of the facts?
Step3: Both facts are expected to still be firmly in memory—especially the second one since it was just learned! So the quiz app doesn’t ask the student to review anything yet—though if she wanted to, the quiz app would pick the fact most in danger of being forgotten.
Step4: Suppose the quiz app has been configured to quiz the student if the expected recall probability drops below 50%—which it did for fact 1! The app shows the flashcard once, analyzes the user's response, and sets the result of the quiz to 1 if passed and 0 if failed. It calls Ebisu to update the model, giving it this result as well as the total number of times it showed this flashcard (one time—Ebisu can support more advanced cases where an app reviews the same flashcard multiple times in a single review session, but let's keep it simple for now).
Step5: Observe how ebisu.updateRecall takes
Step6: The new parameters for this fact differ from the previous one because (1) the student failed this quiz while she passed the other, (2) different amounts of time had elapsed since the respective facts were last seen.
Step7: Note how the half-life (the time between quizzes for expected recall probability to drop to 50%) for the first question increased from 24 to 29 hours after the student got it right, while it decreased to 20 hours for the second when she got it wrong. Ebisu has incorporated the fact that the second fact had been learned not that long ago and should have been strong, and uses the surprising quiz result to strongly adjust its belief about its recall probability.
Step8: If the user was worried that this flashcard was shown too infrequently, and wanted to see it three times as often, you might pass in 1/3 as the second argument.
|
12,194
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import arduino_helpers.hardware.teensy as teensy
from teensy_minimal_rpc.adc_sampler import AdcSampler, analog_reads
from teensy_minimal_rpc import SerialProxy
import teensy_minimal_rpc.ADC as ADC
# Disconnect from existing proxy (if available)
try:
del proxy
except NameError:
pass
proxy = SerialProxy()
proxy.pin_mode(teensy.LED_BUILTIN, 1)
proxy.digital_write(teensy.LED_BUILTIN, 1)
proxy.free_all()
(sampling_rate_hz, adc_settings,
df_volts_i, df_adc_results) = analog_reads(proxy, 'A0', 20, resolution=12)
fig, axes = plt.subplots(1, 2, figsize=(10, 3))
df_volts_i.plot(ylim=0, ax=axes[0], style='x-')
axes[0].set_ylabel('volts')
df_adc_results.plot(style='x-', ax=axes[1],
ylim=(-5, 1.1 * (1 << adc_settings['Bit-width'])))
axes[1].set_ylabel('raw_adc')
for ax in axes:
ax.set_xlabel('sample_i')
(sampling_rate_hz, adc_settings,
df_volts_i, df_adc_results) = analog_reads(proxy, 'A0', 20, resolution=12)
fig, axes = plt.subplots(1, 2, figsize=(10, 3))
df_volts_i.plot(ylim=0, ax=axes[0], style='x-')
axes[0].set_ylabel('volts')
df_adc_results.plot(style='x-', ax=axes[1],
ylim=(-5, 1.1 * (1 << adc_settings['Bit-width'])))
axes[1].set_ylabel('raw_adc')
for ax in axes:
ax.set_xlabel('sample_i')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example
Step2: Example
|
12,195
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow==2.7.0
!pip install tensorflow-quantum
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
np.random.seed(1234)
def generate_random_qnn(qubits, symbol, depth):
Generate random QNN's with the same structure from McClean et al.
circuit = cirq.Circuit()
for qubit in qubits:
circuit += cirq.ry(np.pi / 4.0)(qubit)
for d in range(depth):
# Add a series of single qubit rotations.
for i, qubit in enumerate(qubits):
random_n = np.random.uniform()
random_rot = np.random.uniform(
) * 2.0 * np.pi if i != 0 or d != 0 else symbol
if random_n > 2. / 3.:
# Add a Z.
circuit += cirq.rz(random_rot)(qubit)
elif random_n > 1. / 3.:
# Add a Y.
circuit += cirq.ry(random_rot)(qubit)
else:
# Add a X.
circuit += cirq.rx(random_rot)(qubit)
# Add CZ ladder.
for src, dest in zip(qubits, qubits[1:]):
circuit += cirq.CZ(src, dest)
return circuit
generate_random_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2)
def process_batch(circuits, symbol, op):
Compute the variance of a batch of expectations w.r.t. op on each circuit that
contains `symbol`. Note that this method sets up a new compute graph every time it is
called so it isn't as performant as possible.
# Setup a simple layer to batch compute the expectation gradients.
expectation = tfq.layers.Expectation()
# Prep the inputs as tensors
circuit_tensor = tfq.convert_to_tensor(circuits)
values_tensor = tf.convert_to_tensor(
np.random.uniform(0, 2 * np.pi, (n_circuits, 1)).astype(np.float32))
# Use TensorFlow GradientTape to track gradients.
with tf.GradientTape() as g:
g.watch(values_tensor)
forward = expectation(circuit_tensor,
operators=op,
symbol_names=[symbol],
symbol_values=values_tensor)
# Return variance of gradients across all circuits.
grads = g.gradient(forward, values_tensor)
grad_var = tf.math.reduce_std(grads, axis=0)
return grad_var.numpy()[0]
n_qubits = [2 * i for i in range(2, 7)
] # Ranges studied in paper are between 2 and 24.
depth = 50 # Ranges studied in paper are between 50 and 500.
n_circuits = 200
theta_var = []
for n in n_qubits:
# Generate the random circuits and observable for the given n.
qubits = cirq.GridQubit.rect(1, n)
symbol = sympy.Symbol('theta')
circuits = [
generate_random_qnn(qubits, symbol, depth) for _ in range(n_circuits)
]
op = cirq.Z(qubits[0]) * cirq.Z(qubits[1])
theta_var.append(process_batch(circuits, symbol, op))
plt.semilogy(n_qubits, theta_var)
plt.title('Gradient Variance in QNNs')
plt.xlabel('n_qubits')
plt.xticks(n_qubits)
plt.ylabel('$\\partial \\theta$ variance')
plt.show()
def generate_identity_qnn(qubits, symbol, block_depth, total_depth):
Generate random QNN's with the same structure from Grant et al.
circuit = cirq.Circuit()
# Generate initial block with symbol.
prep_and_U = generate_random_qnn(qubits, symbol, block_depth)
circuit += prep_and_U
# Generate dagger of initial block without symbol.
U_dagger = (prep_and_U[1:])**-1
circuit += cirq.resolve_parameters(
U_dagger, param_resolver={symbol: np.random.uniform() * 2 * np.pi})
for d in range(total_depth - 1):
# Get a random QNN.
prep_and_U_circuit = generate_random_qnn(
qubits,
np.random.uniform() * 2 * np.pi, block_depth)
# Remove the state-prep component
U_circuit = prep_and_U_circuit[1:]
# Add U
circuit += U_circuit
# Add U^dagger
circuit += U_circuit**-1
return circuit
generate_identity_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2, 2)
block_depth = 10
total_depth = 5
heuristic_theta_var = []
for n in n_qubits:
# Generate the identity block circuits and observable for the given n.
qubits = cirq.GridQubit.rect(1, n)
symbol = sympy.Symbol('theta')
circuits = [
generate_identity_qnn(qubits, symbol, block_depth, total_depth)
for _ in range(n_circuits)
]
op = cirq.Z(qubits[0]) * cirq.Z(qubits[1])
heuristic_theta_var.append(process_batch(circuits, symbol, op))
plt.semilogy(n_qubits, theta_var)
plt.semilogy(n_qubits, heuristic_theta_var)
plt.title('Heuristic vs. Random')
plt.xlabel('n_qubits')
plt.xticks(n_qubits)
plt.ylabel('$\\partial \\theta$ variance')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Barren plateaus
Step2: Install TensorFlow Quantum
Step3: Now import TensorFlow and the module dependencies
Step5: 1. Summary
Step7: The authors investigate the gradient of a single parameter $\theta_{1,1}$. Let's follow along by placing a sympy.Symbol in the circuit where $\theta_{1,1}$ would be. Since the authors do not analyze the statistics for any other symbols in the circuit, let's replace them with random values now instead of later.
Step8: 3.1 Set up and run
Step10: This plot shows that for quantum machine learning problems, you can't simply guess a random QNN ansatz and hope for the best. Some structure must be present in the model circuit in order for gradients to vary to the point where learning can happen.
Step11: 4.2 Comparison
|
12,196
|
<ASSISTANT_TASK:>
Python Code:
# Load Biospytial modules and etc.
%matplotlib inline
import sys
sys.path.append('/apps')
sys.path.append('..')
#sys.path.append('../../spystats')
import django
django.setup()
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
## Use the ggplot style
plt.style.use('ggplot')
from external_plugins.spystats.spystats import tools as sptools
import scipy
#c_delta = lambda d : np.hstack(((4 + d),-1,np.zeros(128 - 3),-1))
#c_delta = lambda d : np.hstack(((0),-1,np.zeros(128 - 3),-1))
#C = scipy.linalg.circulant(c_delta(0.1))
def createToroidalCircularBase(d=0.1,N=128):
Creates a circular base similar to the one described in GMRF Rue and Held, 2005.
c00 = np.hstack(((4 + d),-1,np.zeros(N - 3),-1))
c01 = np.hstack((-1,np.zeros(N - 1)))
c0 = np.zeros((N - 2 ,N))
c1 = np.vstack((c00,c01))
c = np.vstack((c1,c0))
c[N -1, 0] = -1
return c
%%time
## Create circular base
d = 0.00001
N = 100
c = createToroidalCircularBase(d=d,N=N)
## Simulate random noise (Normal distributed)
from scipy.fftpack import ifft2, fft2
zr = scipy.stats.norm.rvs(size=(c.size,2),loc=0,scale=1,random_state=1234)
zr.dtype=np.complex_
#plt.hist(zr.real)
#Lm = scipy.sqrt(C.shape[0]*C.shape[0]) * fft2(C)
Lm = fft2(c)
v = 1.0/ len(c) * fft2((Lm ** -0.5)* zr.reshape(Lm.shape))
x = v.real
plt.imshow(x,interpolation='None')
## Calculate inverse of c
C_inv = ifft2 ((fft2(c) ** -1))
plt.plot(C_inv[:,0])
%%time
vm = sptools.ExponentialVariogram(sill=0.3,range_a=0.4)
xx,yy,z = sptools.simulatedGaussianFieldAsPcolorMesh(vm,grid_sizex=100,grid_sizey=100,random_seed=1234)
plt.imshow(z)
346 / 0.151
c
plt.figure(figsize=(10, 5))
plt.subplot(1,2,1)
plt.imshow(z)
plt.subplot(1,2,2)
plt.imshow(x,interpolation='None')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Algorithm to simulate GMRF with block-circulant Matrix.
Step3: For benchmarking we will perfom a GF simulation.
Step4: comparison
|
12,197
|
<ASSISTANT_TASK:>
Python Code:
from nltk.corpus import gutenberg
gutenberg.fileids()
text = ''
for txt in gutenberg.fileids():
if 'chesterton' in txt:
text += gutenberg.raw(txt).lower()
chars = sorted(list(set(text)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
'corpus length: {} total chars: {}'.format(len(text), len(chars))
print(text[:100])
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i+maxlen])
next_chars.append(text[i + maxlen])
print("sequences: ", len(sentences))
print(sentences[0])
print(sentences[1])
print(next_chars[0])
import numpy as np
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
model.summary()
epochs = 2
batch_size = 128
model.fit(X, y, batch_size=batch_size, epochs=epochs)
import random
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create the Training set
Step2: One-hot encode
Step3: Create the Model
Step4: Train the Model
Step5: Generate new sequence
|
12,198
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import seaborn as sns
import warnings
import numpy as np
import statsmodels.formula.api as smf
import datetime as dt
from pandas_datareader import wb
wb.search('gdp.*capita.*const')[['id','name']]
wb.search('Urban Population')[['id','name']].tail()
indicators = ['NY.GDP.PCAP.KD', 'SP.URB.TOTL.IN.ZS']
dat = wb.download(indicator=indicators, country = 'all', start=1980, end=1980)
dat.columns
dat.columns = [['gdppc', 'urbpct']]
dat['lngpc'] = np.log(dat.gdppc)
g = sns.jointplot("lngpc", "urbpct", data=dat, kind="reg",
color ="b", size=7)
mod = smf.ols("urbpct ~ lngpc", dat).fit()
print(mod.summary())
mod.resid.sort_values(ascending=False).head(15)
countries = ['CHL', 'USA', 'ARG']
start, end = dt.datetime(1950, 1, 1), dt.datetime(2016, 1, 1)
dat = wb.download(
indicator=indicators,
country = countries,
start=start,
end=end).dropna()
dat.columns
dat.columns = [['gdppc', 'urb']]
dat.head()
dat.xs('Chile',level='country').head(3)
dat.xs('2007', level='year').head()
dat.loc['Chile']['gdppc'].head()
datyr = dat.unstack(level='country')
datyr.head()
datyr.xs('1962')['gdppc']
datyr['urb'].plot(kind='line');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data on urban bias
Step2: We will use NY.GDP.PCAP.KD for GDP per capita (constant 2010 US$).
Step3: Let's use the ones we like but use a python dictionary to rename these to shorter variable names when we load the data into a python dataframe
Step4: Since we are interested in exploring the extent of 'urban bias' in some countries, let's load data from 1980 which was toward the end of the era of import-substituting industrialization when urban-biased policies were claimed to be most pronounced.
Step5: Let's rename the columns to something shorter and then plot and regress log gdp per capita against urban extent we get a pretty tight fit
Step6: That is a pretty tight fit
Step7: Now let's just look at a list of countries sorted by the size of their residuals in this regression line. Countries with the largest residuals had urbanization in excess of what the model predicts from their 1980 level of income per capita.
Step8: This is of course only suggestive but (leaving aside the island states like Singapore and Hong-Kong) the list is dominated by southern cone countries such as Chile, Argentina and Peru which in addition to having legacies of heavy political centralization also pursued ISI policies in the 60s and 70s that many would associate with urban biased policies.
Step9: Lets use shorter column names
Step10: Notice this has a two-level multi-index. The outer level is named 'country' and the inner level is 'year'
Step11: (Note we could have also used dat.loc['Chile'].head())
Step12: Note that what was returned was a dataframe with the data just for our selected country. We can in turn further specify what column(s) from this we want
Step13: Unstack data
Step14: We can now easily index a 2015 cross-section of GDP per capita like so
Step15: We'd get same result from datyr.loc['2015']['gdppc']
|
12,199
|
<ASSISTANT_TASK:>
Python Code:
def dedupe_pings(rdd):
return rdd.filter(lambda p: p["meta/clientId"] is not None)\
.map(lambda p: (p["meta/documentId"], p))\
.reduceByKey(lambda x, y: x)\
.map(lambda x: x[1])
def transform(ping):
# Should not be None since we filter those out.
clientId = ping["meta/clientId"]
# Added via the ingestion process so should not be None.
submissionDate = dt.datetime.strptime(ping["meta/submissionDate"], "%Y%m%d")
geoCountry = ping["meta/geoCountry"]
profileDate = None
profileDaynum = ping["profileDate"]
if profileDaynum is not None:
try:
# Bad data could push profileDaynum > 32767 (size of a C int) and throw exception
profileDate = dt.datetime(1970, 1, 1) + dt.timedelta(int(profileDaynum))
except:
profileDate = None
# Create date should already be in ISO format
creationDate = ping["creationDate"]
if creationDate is not None:
# This is only accurate because we know the creation date is always in 'Z' (zulu) time.
creationDate = dt.datetime.strptime(ping["creationDate"], "%Y-%m-%dT%H:%M:%S.%fZ")
appVersion = ping["meta/appVersion"]
buildId = ping["meta/appBuildId"]
locale = ping["locale"]
os = ping["os"]
osVersion = ping["osversion"]
device = ping["device"]
arch = ping["arch"]
defaultSearch = ping["defaultSearch"]
distributionId = ping["distributionId"]
experiments = ping["experiments"]
if experiments is None:
experiments = []
#bug 1315028
defaultNewTabExperience = ping["defaultNewTabExperience"]
defaultMailClient = ping["defaultMailClient"]
#bug 1307419
searches = ping["searches"]
durations = ping["durations"]
sessions = ping["sessions"]
return [clientId, submissionDate, creationDate, profileDate, geoCountry, locale, os,
osVersion, buildId, appVersion, device, arch, defaultSearch, distributionId,
json.dumps(experiments), defaultNewTabExperience, defaultMailClient, searches,
durations, sessions]
channels = ["nightly", "aurora", "beta", "release"]
batch_date = os.environ.get('date')
if batch_date:
start = end = dt.datetime.strptime(batch_date, '%Y%m%d')
else:
start = dt.datetime.now() - dt.timedelta(1)
end = dt.datetime.now() - dt.timedelta(1)
day = start
while day <= end:
for channel in channels:
print "\nchannel: " + channel + ", date: " + day.strftime("%Y%m%d")
kwargs = dict(
doc_type="core",
submission_date=(day.strftime("%Y%m%d"), day.strftime("%Y%m%d")),
channel=channel,
app="Fennec",
fraction=1
)
# Grab all available source_version pings
pings = get_pings(sc, source_version="*", **kwargs)
subset = get_pings_properties(pings, ["meta/clientId",
"meta/documentId",
"meta/submissionDate",
"meta/appVersion",
"meta/appBuildId",
"meta/geoCountry",
"locale",
"os",
"osversion",
"device",
"arch",
"profileDate",
"creationDate",
"defaultSearch",
"distributionId",
"experiments",
"defaultNewTabExperience",
"defaultMailClient",
"searches",
"durations",
"sessions"])
subset = dedupe_pings(subset)
print "\nDe-duped pings:" + str(subset.count())
print subset.first()
transformed = subset.map(transform)
print "\nTransformed pings:" + str(transformed.count())
print transformed.first()
s3_output = "s3n://net-mozaws-prod-us-west-2-pipeline-analysis/mobile/mobile_clients"
s3_output += "/v1/channel=" + channel + "/submission=" + day.strftime("%Y%m%d")
schema = StructType([
StructField("clientid", StringType(), False),
StructField("submissiondate", TimestampType(), False),
StructField("creationdate", TimestampType(), True),
StructField("profiledate", TimestampType(), True),
StructField("geocountry", StringType(), True),
StructField("locale", StringType(), True),
StructField("os", StringType(), True),
StructField("osversion", StringType(), True),
StructField("buildid", StringType(), True),
StructField("appversion", StringType(), True),
StructField("device", StringType(), True),
StructField("arch", StringType(), True),
StructField("defaultsearch", StringType(), True),
StructField("distributionid", StringType(), True),
StructField("experiments", StringType(), True),
StructField("defaultNewTabExperience", StringType(), True),
StructField("defaultMailClient", StringType(), True),
StructField("searches", StringType(), True),
StructField("durations", StringType(), True),
StructField("sessions", StringType(), True)
])
# Make parquet parition file size large, but not too large for s3 to handle
coalesce = 1
if channel == "release":
coalesce = 4
grouped = sqlContext.createDataFrame(transformed, schema)
grouped.coalesce(coalesce).write.mode('overwrite').parquet(s3_output)
day += dt.timedelta(1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Transform and sanitize the pings into arrays.
Step2: Create a set of pings from "core" to build a set of core client data. Output the data to CSV or Parquet.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.