text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tld(url):
"""Get the tld of the given URL. Args: url (str):
The URL to get the tld from. Returns: str: The tld """ |
if url not in URLHelper.__cache:
URLHelper.__cache[url] = urlparse(url)
parts = URLHelper.__cache[url].netloc.split(".")
if len(parts) == 1:
return ""
else:
return parts[-1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ordered_params(url):
"""Get the query parameters of the given URL in alphabetical order. Args: url (str):
The URL to get the query parameters from. Returns: str: The query parameters """ |
if url not in URLHelper.__cache:
URLHelper.__cache[url] = urlparse(url)
params = URLHelper.query_string_to_dict(URLHelper.__cache[url].query)
return OrderedDict(sorted(params.items())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query_dict_to_string(query):
"""Convert an OrderedDict to a query string. Args: query (obj):
The key value object with query params. Returns: str: The query string. Note: This method does the same as urllib.parse.urlencode except that it doesn't actually encode the values. """ |
query_params = []
for key, value in query.items():
query_params.append(key + "=" + value)
return "&".join(query_params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query_string_to_dict(query):
"""Convert a string to a query dict. Args: query (str):
The query string. Returns: obj: The key value object with query params. Note: This method does the same as urllib.parse.parse_qsl except that it doesn't actually decode the values. """ |
query_params = {}
for key_value in query.split("&"):
key_value_pair = key_value.split("=", 1)
key = key_value_pair[0] if len(key_value_pair) >= 1 else ""
value = key_value_pair[1] if len(key_value_pair) == 2 else ""
query_params[key] = value
return query_params |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_version():
"""Get the version number of this package. Returns: str: The version number (marjor.minor.patch). Note: When this package is installed, the version number will be available through the package resource details. Otherwise this method will look for a ``.semver`` file. Note: In rare cases corrupt installs can cause the version number to be unknown. In this case the version number will be set to the string "Unknown". """ |
if PackageHelper.__version:
return PackageHelper.__version
PackageHelper.__version = "Unknown"
# If this is a GIT clone without install, use the ``.semver`` file.
file = os.path.realpath(__file__)
folder = os.path.dirname(file)
try:
semver = open(folder + "/../../.semver", "r")
PackageHelper.__version = semver.read().rstrip()
semver.close()
return PackageHelper.__version
except:
pass
# If the package was installed, get the version number via Python's distribution details.
try:
distribution = pkg_resources.get_distribution(PackageHelper.get_alias())
if distribution.version:
PackageHelper.__version = distribution.version
return PackageHelper.__version
except:
pass
return PackageHelper.__version |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Executes the HTTP call. Note: If this and the parent handler raised an error, the queue item status will be set to errored instead of finished. This is to prevent e.g. 404 recursion. """ |
try:
self.__options.callbacks.request_in_thread_before_start(self.__queue_item)
except Exception as e:
print(e)
new_requests = []
failed = False
try:
handler = Handler(self.__options, self.__queue_item)
new_requests = handler.get_new_requests()
try:
self.__queue_item.response.raise_for_status()
except Exception:
if self.__queue_item.request.parent_raised_error:
failed = True
else:
for new_request in new_requests:
new_request.parent_raised_error = True
except Exception as e:
failed = True
error_message = "Setting status of '{}' to '{}' because of an HTTP error.".format(
self.__queue_item.request.url,
QueueItem.STATUS_ERRORED
)
DebugHelper.output(self.__options, error_message)
DebugHelper.output(self.__options, e)
try:
self.__options.callbacks.request_on_error(self.__queue_item, str(e))
except Exception as e:
print(e)
for new_request in new_requests:
new_request.parent_url = self.__queue_item.request.url
try:
self.__options.callbacks.request_in_thread_after_finish(self.__queue_item)
except Exception as e:
print(e)
with self.__callback_lock:
self.__callback(self.__queue_item, new_requests, failed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_with(self, request):
"""Start the crawler using the given request. Args: request (:class:`nyawc.http.Request`):
The startpoint for the crawler. """ |
HTTPRequestHelper.patch_with_options(request, self.__options)
self.queue.add_request(request)
self.__crawler_start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __spawn_new_requests(self):
"""Spawn new requests until the max threads option value is reached. Note: If no new requests were spawned and there are no requests in progress the crawler will stop crawling. """ |
self.__should_spawn_new_requests = False
in_progress_count = len(self.queue.get_all(QueueItem.STATUS_IN_PROGRESS))
while in_progress_count < self.__options.performance.max_threads:
if self.__spawn_new_request():
in_progress_count += 1
else:
break
if in_progress_count == 0:
self.__crawler_stop() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __spawn_new_request(self):
"""Spawn the first queued request if there is one available. Returns: bool: True if a new request was spawned, false otherwise. """ |
first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED)
if first_in_line is None:
return False
while self.routing.is_treshold_reached(first_in_line.request):
self.queue.move(first_in_line, QueueItem.STATUS_CANCELLED)
first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED)
if first_in_line is None:
return False
self.__request_start(first_in_line)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __crawler_start(self):
"""Spawn the first X queued request, where X is the max threads option. Note: The main thread will sleep until the crawler is finished. This enables quiting the application using sigints (see http://stackoverflow.com/a/11816038/2491049). Note: `__crawler_stop()` and `__spawn_new_requests()` are called here on the main thread to prevent thread recursion and deadlocks. """ |
try:
self.__options.callbacks.crawler_before_start()
except Exception as e:
print(e)
print(traceback.format_exc())
self.__spawn_new_requests()
while not self.__stopped:
if self.__should_stop:
self.__crawler_stop()
if self.__should_spawn_new_requests:
self.__spawn_new_requests()
time.sleep(0.1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __crawler_stop(self):
"""Mark the crawler as stopped. Note: If :attr:`__stopped` is True, the main thread will be stopped. Every piece of code that gets executed after :attr:`__stopped` is True could cause Thread exceptions and or race conditions. """ |
if self.__stopping:
return
self.__stopping = True
self.__wait_for_current_threads()
self.queue.move_bulk([
QueueItem.STATUS_QUEUED,
QueueItem.STATUS_IN_PROGRESS
], QueueItem.STATUS_CANCELLED)
self.__crawler_finish()
self.__stopped = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __crawler_finish(self):
"""Called when the crawler is finished because there are no queued requests left or it was stopped.""" |
try:
self.__options.callbacks.crawler_after_finish(self.queue)
except Exception as e:
print(e)
print(traceback.format_exc()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __request_start(self, queue_item):
"""Execute the request in given queue item. Args: queue_item (:class:`nyawc.QueueItem`):
The request/response pair to scrape. """ |
try:
action = self.__options.callbacks.request_before_start(self.queue, queue_item)
except Exception as e:
action = None
print(e)
print(traceback.format_exc())
if action == CrawlerActions.DO_STOP_CRAWLING:
self.__should_stop = True
if action == CrawlerActions.DO_SKIP_TO_NEXT:
self.queue.move(queue_item, QueueItem.STATUS_FINISHED)
self.__should_spawn_new_requests = True
if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None:
self.queue.move(queue_item, QueueItem.STATUS_IN_PROGRESS)
thread = CrawlerThread(self.__request_finish, self.__lock, self.__options, queue_item)
self.__threads[queue_item.get_hash()] = thread
thread.daemon = True
thread.start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __request_finish(self, queue_item, new_requests, request_failed=False):
"""Called when the crawler finished the given queue item. Args: queue_item (:class:`nyawc.QueueItem`):
The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`):
All the requests that were found during this request. request_failed (bool):
True if the request failed (if needs to be moved to errored). """ |
if self.__stopping:
return
del self.__threads[queue_item.get_hash()]
if request_failed:
new_queue_items = []
self.queue.move(queue_item, QueueItem.STATUS_ERRORED)
else:
self.routing.increase_route_count(queue_item.request)
new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests)
self.queue.move(queue_item, QueueItem.STATUS_FINISHED)
try:
action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items)
except Exception as e:
action = None
print(e)
print(traceback.format_exc())
queue_item.decompose()
if action == CrawlerActions.DO_STOP_CRAWLING:
self.__should_stop = True
if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None:
self.__should_spawn_new_requests = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __add_scraped_requests_to_queue(self, queue_item, scraped_requests):
"""Convert the scraped requests to queue items, return them and also add them to the queue. Args: queue_item (:class:`nyawc.QueueItem`):
The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`):
All the requests that were found during this request. Returns: list(:class:`nyawc.QueueItem`):
The new queue items. """ |
new_queue_items = []
for scraped_request in scraped_requests:
HTTPRequestHelper.patch_with_options(scraped_request, self.__options, queue_item)
if not HTTPRequestHelper.complies_with_scope(queue_item, scraped_request, self.__options.scope):
continue
if self.queue.has_request(scraped_request):
continue
scraped_request.depth = queue_item.request.depth + 1
if self.__options.scope.max_depth is not None:
if scraped_request.depth > self.__options.scope.max_depth:
continue
new_queue_item = self.queue.add_request(scraped_request)
new_queue_items.append(new_queue_item)
return new_queue_items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit(self, train_set, test_set):
"""Fit the model to the given data. :param train_set: training data :param test_set: test data """ |
with tf.Graph().as_default(), tf.Session() as self.tf_session:
self.build_model()
tf.global_variables_initializer().run()
third = self.num_epochs // 3
for i in range(self.num_epochs):
lr_decay = self.lr_decay ** max(i - third, 0.0)
self.tf_session.run(
tf.assign(self.lr_var, tf.multiply(self.learning_rate, lr_decay)))
train_perplexity = self._run_train_step(train_set, 'train')
print("Epoch: %d Train Perplexity: %.3f"
% (i + 1, train_perplexity))
test_perplexity = self._run_train_step(test_set, 'test')
print("Test Perplexity: %.3f" % test_perplexity) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _run_train_step(self, data, mode='train'):
"""Run a single training step. :param data: input data :param mode: 'train' or 'test'. """ |
epoch_size = ((len(data) // self.batch_size) - 1) // self.num_steps
costs = 0.0
iters = 0
step = 0
state = self._init_state.eval()
op = self._train_op if mode == 'train' else tf.no_op()
for step, (x, y) in enumerate(
utilities.seq_data_iterator(
data, self.batch_size, self.num_steps)):
cost, state, _ = self.tf_session.run(
[self.cost, self.final_state, op],
{self.input_data: x,
self.input_labels: y,
self._init_state: state})
costs += cost
iters += self.num_steps
if step % (epoch_size // 10) == 10:
print("%.3f perplexity" % (step * 1.0 / epoch_size))
return np.exp(costs / iters) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_model(self):
"""Build the model's computational graph.""" |
with tf.variable_scope(
"model", reuse=None, initializer=self.initializer):
self._create_placeholders()
self._create_rnn_cells()
self._create_initstate_and_embeddings()
self._create_rnn_architecture()
self._create_optimizer_node() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_placeholders(self):
"""Create the computational graph's placeholders.""" |
self.input_data = tf.placeholder(
tf.int32, [self.batch_size, self.num_steps])
self.input_labels = tf.placeholder(
tf.int32, [self.batch_size, self.num_steps]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_rnn_cells(self):
"""Create the LSTM cells.""" |
lstm_cell = tf.nn.rnn_cell.LSTMCell(
self.num_hidden, forget_bias=0.0)
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=self.dropout)
self.cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell] * self.num_layers) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_initstate_and_embeddings(self):
"""Create the initial state for the cell and the data embeddings.""" |
self._init_state = self.cell.zero_state(self.batch_size, tf.float32)
embedding = tf.get_variable(
"embedding", [self.vocab_size, self.num_hidden])
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
self.inputs = tf.nn.dropout(inputs, self.dropout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_rnn_architecture(self):
"""Create the training architecture and the last layer of the LSTM.""" |
self.inputs = [tf.squeeze(i, [1]) for i in tf.split(
axis=1, num_or_size_splits=self.num_steps, value=self.inputs)]
outputs, state = tf.nn.rnn(
self.cell, self.inputs, initial_state=self._init_state)
output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, self.num_hidden])
softmax_w = tf.get_variable(
"softmax_w", [self.num_hidden, self.vocab_size])
softmax_b = tf.get_variable("softmax_b", [self.vocab_size])
logits = tf.add(tf.matmul(output, softmax_w), softmax_b)
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self.input_labels, [-1])],
[tf.ones([self.batch_size * self.num_steps])])
self.cost = tf.div(tf.reduce_sum(loss), self.batch_size)
self.final_state = state |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_optimizer_node(self):
"""Create the optimizer node of the graph.""" |
self.lr_var = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
self.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr_var)
self._train_op = optimizer.apply_gradients(zip(grads, tvars)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_encoding_layers(self):
"""Create the encoding layers for supervised finetuning. :return: output of the final encoding layer. """ |
next_train = self.input_data
self.layer_nodes = []
for l, layer in enumerate(self.layers):
with tf.name_scope("encode-{}".format(l)):
y_act = tf.add(
tf.matmul(next_train, self.encoding_w_[l]),
self.encoding_b_[l]
)
if self.finetune_enc_act_func[l] is not None:
layer_y = self.finetune_enc_act_func[l](y_act)
else:
layer_y = None
# the input to the next layer is the output of this layer
next_train = tf.nn.dropout(layer_y, self.keep_prob)
self.layer_nodes.append(next_train)
self.encode = next_train |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_decoding_layers(self):
"""Create the decoding layers for reconstruction finetuning. :return: output of the final encoding layer. """ |
next_decode = self.encode
for l, layer in reversed(list(enumerate(self.layers))):
with tf.name_scope("decode-{}".format(l)):
# Create decoding variables
if self.tied_weights:
dec_w = tf.transpose(self.encoding_w_[l])
else:
dec_w = tf.Variable(tf.transpose(
self.encoding_w_[l].initialized_value()))
dec_b = tf.Variable(tf.constant(
0.1, shape=[dec_w.get_shape().dims[1].value]))
self.decoding_w.append(dec_w)
self.decoding_b.append(dec_b)
y_act = tf.add(
tf.matmul(next_decode, dec_w),
dec_b
)
if self.finetune_dec_act_func[l] is not None:
layer_y = self.finetune_dec_act_func[l](y_act)
else:
layer_y = None
# the input to the next layer is the output of this layer
next_decode = tf.nn.dropout(layer_y, self.keep_prob)
self.layer_nodes.append(next_decode)
self.reconstruction = next_decode |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_mnist_dataset(mode='supervised', one_hot=True):
"""Load the MNIST handwritten digits dataset. :param mode: 'supervised' or 'unsupervised' mode :param one_hot: whether to get one hot encoded labels :return: train, validation, test data: for (X, y) if 'supervised', for (X) if 'unsupervised' """ |
mnist = input_data.read_data_sets("MNIST_data/", one_hot=one_hot)
# Training set
trX = mnist.train.images
trY = mnist.train.labels
# Validation set
vlX = mnist.validation.images
vlY = mnist.validation.labels
# Test set
teX = mnist.test.images
teY = mnist.test.labels
if mode == 'supervised':
return trX, trY, vlX, vlY, teX, teY
elif mode == 'unsupervised':
return trX, vlX, teX |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_cifar10_dataset(cifar_dir, mode='supervised'):
"""Load the cifar10 dataset. :param cifar_dir: path to the dataset directory (cPicle format from: https://www.cs.toronto.edu/~kriz/cifar.html) :param mode: 'supervised' or 'unsupervised' mode :return: train, test data: for (X, y) if 'supervised', for (X) if 'unsupervised' """ |
# Training set
trX = None
trY = np.array([])
# Test set
teX = np.array([])
teY = np.array([])
for fn in os.listdir(cifar_dir):
if not fn.startswith('batches') and not fn.startswith('readme'):
fo = open(os.path.join(cifar_dir, fn), 'rb')
data_batch = pickle.load(fo)
fo.close()
if fn.startswith('data'):
if trX is None:
trX = data_batch['data']
trY = data_batch['labels']
else:
trX = np.concatenate((trX, data_batch['data']), axis=0)
trY = np.concatenate((trY, data_batch['labels']), axis=0)
if fn.startswith('test'):
teX = data_batch['data']
teY = data_batch['labels']
trX = trX.astype(np.float32) / 255.
teX = teX.astype(np.float32) / 255.
if mode == 'supervised':
return trX, trY, teX, teY
elif mode == 'unsupervised':
return trX, teX |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def linear(prev_layer, out_dim, name="linear"):
"""Create a linear fully-connected layer. Parameters prev_layer : tf.Tensor Last layer's output tensor. out_dim : int Number of output units. Returns ------- tuple ( tf.Tensor : Linear output tensor tf.Tensor : Linear weights variable tf.Tensor : Linear biases variable ) """ |
with tf.name_scope(name):
in_dim = prev_layer.get_shape()[1].value
W = tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=0.1))
b = tf.Variable(tf.constant(0.1, shape=[out_dim]))
out = tf.add(tf.matmul(prev_layer, W), b)
return (out, W, b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def regularization(variables, regtype, regcoef, name="regularization"):
"""Compute the regularization tensor. Parameters variables : list of tf.Variable List of model variables. regtype : str Type of regularization. Can be ["none", "l1", "l2"] regcoef : float, Regularization coefficient. name : str, optional (default = "regularization") Name for the regularization op. Returns ------- tf.Tensor : Regularization tensor. """ |
with tf.name_scope(name):
if regtype != 'none':
regs = tf.constant(0.0)
for v in variables:
if regtype == 'l2':
regs = tf.add(regs, tf.nn.l2_loss(v))
elif regtype == 'l1':
regs = tf.add(regs, tf.reduce_sum(tf.abs(v)))
return tf.multiply(regcoef, regs)
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def accuracy(mod_y, ref_y, summary=True, name="accuracy"):
"""Accuracy computation op. Parameters mod_y : tf.Tensor Model output tensor. ref_y : tf.Tensor Reference input tensor. summary : bool, optional (default = True) Whether to save tf summary for the op. Returns ------- tf.Tensor : accuracy op. tensor """ |
with tf.name_scope(name):
mod_pred = tf.argmax(mod_y, 1)
correct_pred = tf.equal(mod_pred, tf.argmax(ref_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if summary:
tf.summary.scalar('accuracy', accuracy)
return accuracy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pretrain_procedure(self, layer_objs, layer_graphs, set_params_func, train_set, validation_set=None):
"""Perform unsupervised pretraining of the model. :param layer_objs: list of model objects (autoencoders or rbms) :param layer_graphs: list of model tf.Graph objects :param set_params_func: function used to set the parameters after pretraining :param train_set: training set :param validation_set: validation set :return: return data encoded by the last layer """ |
next_train = train_set
next_valid = validation_set
for l, layer_obj in enumerate(layer_objs):
print('Training layer {}...'.format(l + 1))
next_train, next_valid = self._pretrain_layer_and_gen_feed(
layer_obj, set_params_func, next_train, next_valid,
layer_graphs[l])
return next_train, next_valid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pretrain_layer_and_gen_feed(self, layer_obj, set_params_func, train_set, validation_set, graph):
"""Pretrain a single autoencoder and encode the data for the next layer. :param layer_obj: layer model :param set_params_func: function used to set the parameters after pretraining :param train_set: training set :param validation_set: validation set :param graph: tf object for the rbm :return: encoded train data, encoded validation data """ |
layer_obj.fit(train_set, train_set,
validation_set, validation_set, graph=graph)
with graph.as_default():
set_params_func(layer_obj, graph)
next_train = layer_obj.transform(train_set, graph=graph)
if validation_set is not None:
next_valid = layer_obj.transform(validation_set, graph=graph)
else:
next_valid = None
return next_train, next_valid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_layers_output(self, dataset):
"""Get output from each layer of the network. :param dataset: input data :return: list of np array, element i is the output of layer i """ |
layers_out = []
with self.tf_graph.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
for l in self.layer_nodes:
layers_out.append(l.eval({self.input_data: dataset,
self.keep_prob: 1}))
if layers_out == []:
raise Exception("This method is not implemented for this model")
else:
return layers_out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parameters(self, params, graph=None):
"""Get the parameters of the model. :param params: dictionary of keys (str names) and values (tensors). :return: evaluated tensors in params """ |
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
out = {}
for par in params:
if type(params[par]) == list:
for i, p in enumerate(params[par]):
out[par + '-' + str(i+1)] = p.eval()
else:
out[par] = params[par].eval()
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit(self, train_X, train_Y, val_X=None, val_Y=None, graph=None):
"""Fit the model to the data. Parameters train_X : array_like, shape (n_samples, n_features) Training data. train_Y : array_like, shape (n_samples, n_classes) Training labels. val_X : array_like, shape (N, n_features) optional, (default = None). Validation data. val_Y : array_like, shape (N, n_classes) optional, (default = None). Validation labels. graph : tf.Graph, optional (default = None) Tensorflow Graph object. Returns ------- """ |
if len(train_Y.shape) != 1:
num_classes = train_Y.shape[1]
else:
raise Exception("Please convert the labels with one-hot encoding.")
g = graph if graph is not None else self.tf_graph
with g.as_default():
# Build model
self.build_model(train_X.shape[1], num_classes)
with tf.Session() as self.tf_session:
# Initialize tf stuff
summary_objs = tf_utils.init_tf_ops(self.tf_session)
self.tf_merged_summaries = summary_objs[0]
self.tf_summary_writer = summary_objs[1]
self.tf_saver = summary_objs[2]
# Train model
self._train_model(train_X, train_Y, val_X, val_Y)
# Save model
self.tf_saver.save(self.tf_session, self.model_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, test_X):
"""Predict the labels for the test set. Parameters test_X : array_like, shape (n_samples, n_features) Test data. Returns ------- array_like, shape (n_samples,) : predicted labels. """ |
with self.tf_graph.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {
self.input_data: test_X,
self.keep_prob: 1
}
return self.mod_y.eval(feed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def score(self, test_X, test_Y):
"""Compute the mean accuracy over the test set. Parameters test_X : array_like, shape (n_samples, n_features) Test data. test_Y : array_like, shape (n_samples, n_features) Test labels. Returns ------- float : mean accuracy over the test set """ |
with self.tf_graph.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {
self.input_data: test_X,
self.input_labels: test_Y,
self.keep_prob: 1
}
return self.accuracy.eval(feed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pretrain(self, train_set, validation_set=None):
"""Perform Unsupervised pretraining of the autoencoder.""" |
self.do_pretrain = True
def set_params_func(autoenc, autoencgraph):
params = autoenc.get_parameters(graph=autoencgraph)
self.encoding_w_.append(params['enc_w'])
self.encoding_b_.append(params['enc_b'])
return SupervisedModel.pretrain_procedure(
self, self.autoencoders, self.autoencoder_graphs,
set_params_func=set_params_func, train_set=train_set,
validation_set=validation_set) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_tf_ops(sess):
"""Initialize TensorFlow operations. This function initialize the following tensorflow ops: * init variables ops * summary ops * create model saver Parameters sess : object Tensorflow `Session` object Returns ------- tuple : (summary_merged, summary_writer) * tf merged summaries object * tf summary writer object * tf saver object """ |
summary_merged = tf.summary.merge_all()
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init_op)
# Retrieve run identifier
run_id = 0
for e in os.listdir(Config().logs_dir):
if e[:3] == 'run':
r = int(e[3:])
if r > run_id:
run_id = r
run_id += 1
run_dir = os.path.join(Config().logs_dir, 'run' + str(run_id))
print('Tensorboard logs dir for this run is %s' % (run_dir))
summary_writer = tf.summary.FileWriter(run_dir, sess.graph)
return (summary_merged, summary_writer, saver) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_summaries( sess, merged_summaries, summary_writer, epoch, feed, tens):
"""Run the summaries and error computation on the validation set. Parameters sess : tf.Session Tensorflow session object. merged_summaries : tf obj Tensorflow merged summaries obj. summary_writer : tf.summary.FileWriter Tensorflow summary writer obj. epoch : int Current training epoch. feed : dict Validation feed dict. tens : tf.Tensor Tensor to display and evaluate during training. Can be self.accuracy for SupervisedModel or self.cost for UnsupervisedModel. Returns ------- err : float, mean error over the validation set. """ |
try:
result = sess.run([merged_summaries, tens], feed_dict=feed)
summary_str = result[0]
out = result[1]
summary_writer.add_summary(summary_str, epoch)
except tf.errors.InvalidArgumentError:
out = sess.run(tens, feed_dict=feed)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pretrain(self, train_set, validation_set=None):
"""Perform Unsupervised pretraining of the DBN.""" |
self.do_pretrain = True
def set_params_func(rbmmachine, rbmgraph):
params = rbmmachine.get_parameters(graph=rbmgraph)
self.encoding_w_.append(params['W'])
self.encoding_b_.append(params['bh_'])
return SupervisedModel.pretrain_procedure(
self, self.rbms, self.rbm_graphs, set_params_func=set_params_func,
train_set=train_set, validation_set=validation_set) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_encode_layer(self):
"""Create the encoding layer of the network. Returns ------- self """ |
with tf.name_scope("encoder"):
activation = tf.add(
tf.matmul(self.input_data, self.W_),
self.bh_
)
if self.enc_act_func:
self.encode = self.enc_act_func(activation)
else:
self.encode = activation
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_decode_layer(self):
"""Create the decoding layer of the network. Returns ------- self """ |
with tf.name_scope("decoder"):
activation = tf.add(
tf.matmul(self.encode, tf.transpose(self.W_)),
self.bv_
)
if self.dec_act_func:
self.reconstruction = self.dec_act_func(activation)
else:
self.reconstruction = activation
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample_prob(probs, rand):
"""Get samples from a tensor of probabilities. :param probs: tensor of probabilities :param rand: tensor (of the same shape as probs) of random values :return: binary sample of probabilities """ |
return tf.nn.relu(tf.sign(probs - rand)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def corrupt_input(data, sess, corrtype, corrfrac):
"""Corrupt a fraction of data according to the chosen noise method. :return: corrupted data """ |
corruption_ratio = np.round(corrfrac * data.shape[1]).astype(np.int)
if corrtype == 'none':
return np.copy(data)
if corrfrac > 0.0:
if corrtype == 'masking':
return masking_noise(data, sess, corrfrac)
elif corrtype == 'salt_and_pepper':
return salt_and_pepper_noise(data, corruption_ratio)
else:
return np.copy(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xavier_init(fan_in, fan_out, const=1):
"""Xavier initialization of network weights. https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow :param fan_in: fan in of the network (n_features) :param fan_out: fan out of the network (n_components) :param const: multiplicative constant """ |
low = -const * np.sqrt(6.0 / (fan_in + fan_out))
high = const * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_batches(data, batch_size):
"""Divide input data into batches. :param data: input data :param batch_size: size of each batch :return: data divided into batches """ |
data = np.array(data)
for i in range(0, data.shape[0], batch_size):
yield data[i:i + batch_size] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_one_hot(dataY):
"""Convert the vector of labels dataY into one-hot encoding. :param dataY: vector of labels :return: one-hot encoded labels """ |
nc = 1 + np.max(dataY)
onehot = [np.zeros(nc, dtype=np.int8) for _ in dataY]
for i, j in enumerate(dataY):
onehot[i][j] = 1
return onehot |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def conv2bin(data):
"""Convert a matrix of probabilities into binary values. If the matrix has values <= 0 or >= 1, the values are normalized to be in [0, 1]. :type data: numpy array :param data: input matrix :return: converted binary matrix """ |
if data.min() < 0 or data.max() > 1:
data = normalize(data)
out_data = data.copy()
for i, sample in enumerate(out_data):
for j, val in enumerate(sample):
if np.random.random() <= val:
out_data[i][j] = 1
else:
out_data[i][j] = 0
return out_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def masking_noise(data, sess, v):
"""Apply masking noise to data in X. In other words a fraction v of elements of X (chosen at random) is forced to zero. :param data: array_like, Input data :param sess: TensorFlow session :param v: fraction of elements to distort, float :return: transformed data """ |
data_noise = data.copy()
rand = tf.random_uniform(data.shape)
data_noise[sess.run(tf.nn.relu(tf.sign(v - rand))).astype(np.bool)] = 0
return data_noise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def salt_and_pepper_noise(X, v):
"""Apply salt and pepper noise to data in X. In other words a fraction v of elements of X (chosen at random) is set to its maximum or minimum value according to a fair coin flip. If minimum or maximum are not given, the min (max) value in X is taken. :param X: array_like, Input data :param v: int, fraction of elements to distort :return: transformed data """ |
X_noise = X.copy()
n_features = X.shape[1]
mn = X.min()
mx = X.max()
for i, sample in enumerate(X):
mask = np.random.randint(0, n_features, v)
for m in mask:
if np.random.random() < 0.5:
X_noise[i][m] = mn
else:
X_noise[i][m] = mx
return X_noise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expand_args(**args_to_expand):
"""Expand the given lists into the length of the layers. This is used as a convenience so that the user does not need to specify the complete list of parameters for model initialization. IE the user can just specify one parameter and this function will expand it """ |
layers = args_to_expand['layers']
try:
items = args_to_expand.iteritems()
except AttributeError:
items = args_to_expand.items()
for key, val in items:
if isinstance(val, list) and len(val) != len(layers):
args_to_expand[key] = [val[0] for _ in layers]
return args_to_expand |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flag_to_list(flagval, flagtype):
"""Convert a string of comma-separated tf flags to a list of values.""" |
if flagtype == 'int':
return [int(_) for _ in flagval.split(',') if _]
elif flagtype == 'float':
return [float(_) for _ in flagval.split(',') if _]
elif flagtype == 'str':
return [_ for _ in flagval.split(',') if _]
else:
raise Exception("incorrect type") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def str2actfunc(act_func):
"""Convert activation function name to tf function.""" |
if act_func == 'sigmoid':
return tf.nn.sigmoid
elif act_func == 'tanh':
return tf.nn.tanh
elif act_func == 'relu':
return tf.nn.relu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def random_seed_np_tf(seed):
"""Seed numpy and tensorflow random number generators. :param seed: seed parameter """ |
if seed >= 0:
np.random.seed(seed)
tf.set_random_seed(seed)
return True
else:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_image(img, width, height, outfile, img_type='grey'):
"""Save an image with the given parameters.""" |
assert len(img) == width * height or len(img) == width * height * 3
if img_type == 'grey':
misc.imsave(outfile, img.reshape(width, height))
elif img_type == 'color':
misc.imsave(outfile, img.reshape(3, width, height)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_model(self, n_features, n_classes):
"""Create the computational graph of the model. :param n_features: Number of features. :param n_classes: number of classes. :return: self """ |
self._create_placeholders(n_features, n_classes)
self._create_layers(n_classes)
self.cost = self.loss.compile(self.mod_y, self.input_labels)
self.train_step = self.trainer.compile(self.cost)
self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def max_pool(x, dim):
"""Max pooling operation.""" |
return tf.nn.max_pool(
x, ksize=[1, dim, dim, 1], strides=[1, dim, dim, 1],
padding='SAME') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reconstruct(self, data, graph=None):
"""Reconstruct data according to the model. Parameters data : array_like, shape (n_samples, n_features) Data to transform. graph : tf.Graph, optional (default = None) Tensorflow Graph object Returns ------- array_like, transformed data """ |
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {self.input_data: data, self.keep_prob: 1}
return self.reconstruction.eval(feed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def score(self, data, data_ref, graph=None):
"""Compute the reconstruction loss over the test set. Parameters data : array_like Data to reconstruct. data_ref : array_like Reference data. Returns ------- float: Mean error. """ |
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {
self.input_data: data,
self.input_labels: data_ref,
self.keep_prob: 1
}
return self.cost.eval(feed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile(self, cost, name_scope="train"):
"""Compile the optimizer with the given training parameters. Parameters cost : Tensor A Tensor containing the value to minimize. name_scope : str , optional (default="train") Optional name scope for the optimizer graph ops. """ |
with tf.name_scope(name_scope):
return self.opt_.minimize(cost) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile(self, mod_y, ref_y, regterm=None):
"""Compute the loss function tensor. Parameters mode_y : tf.Tensor model output tensor ref_y : tf.Tensor reference input tensor regterm : tf.Tensor, optional (default = None) Regularization term tensor Returns ------- Loss function tensor. """ |
with tf.name_scope(self.name):
if self.lfunc == 'cross_entropy':
clip_inf = tf.clip_by_value(mod_y, 1e-10, float('inf'))
clip_sup = tf.clip_by_value(1 - mod_y, 1e-10, float('inf'))
cost = - tf.reduce_mean(tf.add(
tf.multiply(ref_y, tf.log(clip_inf)),
tf.multiply(tf.subtract(1.0, ref_y), tf.log(clip_sup))))
elif self.lfunc == 'softmax_cross_entropy':
cost = tf.losses.softmax_cross_entropy(ref_y, mod_y)
elif self.lfunc == 'mse':
cost = tf.sqrt(tf.reduce_mean(
tf.square(tf.subtract(ref_y, mod_y))))
else:
cost = None
if cost is not None:
cost = cost + regterm if regterm is not None else cost
tf.summary.scalar(self.lfunc, cost)
else:
cost = None
return cost |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_model(self, n_features, encoding_w=None, encoding_b=None):
"""Create the computational graph for the reconstruction task. :param n_features: Number of features :param encoding_w: list of weights for the encoding layers. :param encoding_b: list of biases for the encoding layers. :return: self """ |
self._create_placeholders(n_features, n_features)
if encoding_w and encoding_b:
self.encoding_w_ = encoding_w
self.encoding_b_ = encoding_b
else:
self._create_variables(n_features)
self._create_encoding_layers()
self._create_decoding_layers()
variables = []
variables.extend(self.encoding_w_)
variables.extend(self.encoding_b_)
regterm = Layers.regularization(variables, self.regtype, self.regcoef)
self.cost = self.loss.compile(
self.reconstruction, self.input_labels, regterm=regterm)
self.train_step = self.trainer.compile(self.cost) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_feed_dict(self, data):
"""Create the dictionary of data to feed to tf session during training. :param data: training/validation set batch :return: dictionary(self.input_data: data, self.hrand: random_uniform, self.vrand: random_uniform) """ |
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
self.vrand: np.random.rand(data.shape[0], data.shape[1])
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_model(self, n_features, regtype='none'):
"""Build the Restricted Boltzmann Machine model in TensorFlow. :param n_features: number of features :param regtype: regularization type :return: self """ |
self._create_placeholders(n_features)
self._create_variables(n_features)
self.encode = self.sample_hidden_from_visible(self.input_data)[0]
self.reconstruction = self.sample_visible_from_hidden(
self.encode, n_features)
hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
self.input_data, n_features)
positive = self.compute_positive_association(self.input_data,
hprob0, hstate0)
nn_input = vprob
for step in range(self.gibbs_sampling_steps - 1):
hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
nn_input, n_features)
nn_input = vprob
negative = tf.matmul(tf.transpose(vprob), hprob1)
self.w_upd8 = self.W.assign_add(
self.learning_rate * (positive - negative) / self.batch_size)
self.bh_upd8 = self.bh_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
tf.subtract(hprob0, hprob1), 0)))
self.bv_upd8 = self.bv_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
tf.subtract(self.input_data, vprob), 0)))
variables = [self.W, self.bh_, self.bv_]
regterm = Layers.regularization(variables, self.regtype, self.regcoef)
self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gibbs_sampling_step(self, visible, n_features):
"""Perform one step of gibbs sampling. :param visible: activations of the visible units :param n_features: number of features :return: tuple(hidden probs, hidden states, visible probs, new hidden probs, new hidden states) """ |
hprobs, hstates = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs, n_features)
hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)
return hprobs, hstates, vprobs, hprobs1, hstates1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample_hidden_from_visible(self, visible):
"""Sample the hidden units from the visible units. This is the Positive phase of the Contrastive Divergence algorithm. :param visible: activations of the visible units :return: tuple(hidden probabilities, hidden binary states) """ |
hprobs = tf.nn.sigmoid(tf.add(tf.matmul(visible, self.W), self.bh_))
hstates = utilities.sample_prob(hprobs, self.hrand)
return hprobs, hstates |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample_visible_from_hidden(self, hidden, n_features):
"""Sample the visible units from the hidden units. This is the Negative phase of the Contrastive Divergence algorithm. :param hidden: activations of the hidden units :param n_features: number of features :return: visible probabilities """ |
visible_activation = tf.add(
tf.matmul(hidden, tf.transpose(self.W)),
self.bv_
)
if self.visible_unit_type == 'bin':
vprobs = tf.nn.sigmoid(visible_activation)
elif self.visible_unit_type == 'gauss':
vprobs = tf.truncated_normal(
(1, n_features), mean=visible_activation, stddev=self.stddev)
else:
vprobs = None
return vprobs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute_positive_association(self, visible, hidden_probs, hidden_states):
"""Compute positive associations between visible and hidden units. :param visible: visible units :param hidden_probs: hidden units probabilities :param hidden_states: hidden units states :return: positive association = dot(visible.T, hidden) """ |
if self.visible_unit_type == 'bin':
positive = tf.matmul(tf.transpose(visible), hidden_states)
elif self.visible_unit_type == 'gauss':
positive = tf.matmul(tf.transpose(visible), hidden_probs)
else:
positive = None
return positive |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_model(self, shape, gibbs_sampling_steps, model_path):
"""Load a trained model from disk. The shape of the model (num_visible, num_hidden) and the number of gibbs sampling steps must be known in order to restore the model. :param shape: tuple(num_visible, num_hidden) :param gibbs_sampling_steps: :param model_path: :return: self """ |
n_features, self.num_hidden = shape[0], shape[1]
self.gibbs_sampling_steps = gibbs_sampling_steps
self.build_model(n_features)
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
with tf.Session() as self.tf_session:
self.tf_session.run(init_op)
self.tf_saver.restore(self.tf_session, model_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parameters(self, graph=None):
"""Return the model parameters in the form of numpy arrays. :param graph: tf graph object :return: model parameters """ |
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {
'W': self.W.eval(),
'bh_': self.bh_.eval(),
'bv_': self.bv_.eval()
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_distance(seq1, seq2, action_function=lowest_cost_action, test=operator.eq):
"""Computes the edit distance between the two given sequences. This uses the relatively fast method that only constructs two columns of the 2d array for edits. This function actually uses four columns because we track the number of matches too. """ |
m = len(seq1)
n = len(seq2)
# Special, easy cases:
if seq1 == seq2:
return 0, n
if m == 0:
return n, 0
if n == 0:
return m, 0
v0 = [0] * (n + 1) # The two 'error' columns
v1 = [0] * (n + 1)
m0 = [0] * (n + 1) # The two 'match' columns
m1 = [0] * (n + 1)
for i in range(1, n + 1):
v0[i] = i
for i in range(1, m + 1):
v1[0] = i
for j in range(1, n + 1):
cost = 0 if test(seq1[i - 1], seq2[j - 1]) else 1
# The costs
ins_cost = v1[j - 1] + 1
del_cost = v0[j] + 1
sub_cost = v0[j - 1] + cost
# Match counts
ins_match = m1[j - 1]
del_match = m0[j]
sub_match = m0[j - 1] + int(not cost)
action = action_function(ins_cost, del_cost, sub_cost, ins_match,
del_match, sub_match, cost)
if action in [EQUAL, REPLACE]:
v1[j] = sub_cost
m1[j] = sub_match
elif action == INSERT:
v1[j] = ins_cost
m1[j] = ins_match
elif action == DELETE:
v1[j] = del_cost
m1[j] = del_match
else:
raise Exception('Invalid dynamic programming option returned!')
# Copy the columns over
for i in range(0, n + 1):
v0[i] = v1[i]
m0[i] = m1[i]
return v1[n], m1[n] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_opcodes_from_bp_table(bp):
"""Given a 2d list structure, collect the opcodes from the best path.""" |
x = len(bp) - 1
y = len(bp[0]) - 1
opcodes = []
while x != 0 or y != 0:
this_bp = bp[x][y]
opcodes.append(this_bp)
if this_bp[0] == EQUAL or this_bp[0] == REPLACE:
x = x - 1
y = y - 1
elif this_bp[0] == INSERT:
y = y - 1
elif this_bp[0] == DELETE:
x = x - 1
opcodes.reverse()
return opcodes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Read two files line-by-line and print edit distances between each pair of lines. Will terminate at the end of the shorter of the two files.""" |
if len(sys.argv) != 3:
print('Usage: {} <file1> <file2>'.format(sys.argv[0]))
exit(-1)
file1 = sys.argv[1]
file2 = sys.argv[2]
with open(file1) as f1, open(file2) as f2:
for line1, line2 in zip(f1, f2):
print("Line 1: {}".format(line1.strip()))
print("Line 2: {}".format(line2.strip()))
dist, _, _ = edit_distance_backpointer(line1.split(), line2.split())
print('Distance: {}'.format(dist))
print('=' * 80) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ratio(self):
"""Ratio of matches to the average sequence length.""" |
return 2.0 * self.matches() / (len(self.seq1) + len(self.seq2)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_distance_fast(self):
"""Calls edit_distance, and asserts that if we already have values for matches and distance, that they match.""" |
d, m = edit_distance(self.seq1, self.seq2,
action_function=self.action_function,
test=self.test)
if self.dist:
assert d == self.dist
if self._matches:
assert m == self._matches
self.dist = d
self._matches = m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def newbie(cls, *args, **kwargs):
""" Create a newbie class, with all the skills needed """ |
parser = cls(*args, **kwargs)
subparser = parser.add_subparsers(dest='command')
parents = [parser.pparser, parser.output_parser]
sparser = subparser.add_parser('search', help='Perform new search of items', parents=parents)
""" Adds search arguments to a parser """
parser.search_group = sparser.add_argument_group('search options')
parser.search_group.add_argument('-c', '--collection', help='Name of collection', default=None)
h = 'One or more scene IDs from provided collection (ignores other parameters)'
parser.search_group.add_argument('--ids', help=h, nargs='*', default=None)
parser.search_group.add_argument('--bbox', help='Bounding box (min lon, min lat, max lon, max lat)', nargs=4)
parser.search_group.add_argument('--intersects', help='GeoJSON Feature (file or string)')
parser.search_group.add_argument('--datetime', help='Single date/time or begin and end date/time (e.g., 2017-01-01/2017-02-15)')
parser.search_group.add_argument('-p', '--property', nargs='*', help='Properties of form KEY=VALUE (<, >, <=, >=, = supported)')
parser.search_group.add_argument('--sort', help='Sort by fields', nargs='*')
h = 'Only output how many Items found'
parser.search_group.add_argument('--found', help=h, action='store_true', default=False)
parser.search_group.add_argument('--url', help='URL of the API', default=config.API_URL)
parents.append(parser.download_parser)
lparser = subparser.add_parser('load', help='Load items from previous search', parents=parents)
lparser.add_argument('items', help='GeoJSON file of Items')
return parser |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(items=None, printmd=None, printcal=False, found=False, save=None, download=None, requestor_pays=False, **kwargs):
""" Main function for performing a search """ |
if items is None:
## if there are no items then perform a search
search = Search.search(**kwargs)
if found:
num = search.found()
print('%s items found' % num)
return num
items = search.items()
else:
# otherwise, load a search from a file
items = Items.load(items)
print('%s items found' % len(items))
# print metadata
if printmd is not None:
print(items.summary(printmd))
# print calendar
if printcal:
print(items.calendar())
# save all metadata in JSON file
if save is not None:
items.save(filename=save)
# download files given `download` keys
if download is not None:
if 'ALL' in download:
# get complete set of assets
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def found(self):
""" Small query to determine total number of hits """ |
if 'ids' in self.kwargs:
cid = self.kwargs['query']['collection']['eq']
return len(self.items_by_id(self.kwargs['ids'], cid))
kwargs = {
'page': 1,
'limit': 0
}
kwargs.update(self.kwargs)
results = self.query(**kwargs)
return results['meta']['found'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collection(cls, cid):
""" Get a Collection record """ |
url = urljoin(config.API_URL, 'collections/%s' % cid)
return Collection(cls.query(url=url)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def items_by_id(cls, ids, collection):
""" Return Items from collection with matching ids """ |
col = cls.collection(collection)
items = []
base_url = urljoin(config.API_URL, 'collections/%s/items' % collection)
for id in ids:
try:
items.append(Item(cls.query(urljoin(base_url, id))))
except SatSearchError as err:
pass
return Items(items, collections=[col]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def items(self, limit=10000):
""" Return all of the Items and Collections for this search """ |
_limit = 500
if 'ids' in self.kwargs:
col = self.kwargs.get('query', {}).get('collection', {}).get('eq', None)
if col is None:
raise SatSearchError('Collection required when searching by id')
return self.items_by_id(self.kwargs['ids'], col)
items = []
found = self.found()
if found > limit:
logger.warning('There are more items found (%s) than the limit (%s) provided.' % (found, limit))
maxitems = min(found, limit)
kwargs = {
'page': 1,
'limit': min(_limit, maxitems)
}
kwargs.update(self.kwargs)
while len(items) < maxitems:
items += [Item(i) for i in self.query(**kwargs)['features']]
kwargs['page'] += 1
# retrieve collections
collections = []
for c in set([item.properties['collection'] for item in items if 'collection' in item.properties]):
collections.append(self.collection(c))
#del collections[c]['links']
# merge collections into items
#_items = []
#for item in items:
# import pdb; pdb.set_trace()
# if 'collection' in item['properties']:
# item = dict_merge(item, collections[item['properties']['collection']])
# _items.append(Item(item))
return Items(items, collections=collections, search=self.kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def copy_byte_range(infile, outfile, start=None, stop=None, bufsize=16*1024):
'''Like shutil.copyfileobj, but only copy a range of the streams.
Both start and stop are inclusive.
'''
if start is not None: infile.seek(start)
while 1:
to_read = min(bufsize, stop + 1 - infile.tell() if stop else bufsize)
buf = infile.read(to_read)
if not buf:
break
outfile.write(buf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_byte_range(byte_range):
'''Returns the two numbers in 'bytes=123-456' or throws ValueError.
The last number or both numbers may be None.
'''
if byte_range.strip() == '':
return None, None
m = BYTE_RANGE_RE.match(byte_range)
if not m:
raise ValueError('Invalid byte range %s' % byte_range)
first, last = [x and int(x) for x in m.groups()]
if last and last < first:
raise ValueError('Invalid byte range %s' % byte_range)
return first, last |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def cache_page(*args, **kwargs):
'''
Same as django's ``cache_page`` decorator, but wraps the view into
additional decorators before and after that. Makes it possible to serve multiple
flavours without getting into trouble with django's caching that doesn't
know about flavours.
'''
decorator = _django_cache_page(*args, **kwargs)
def flavoured_decorator(func):
return vary_on_flavour_fetch(decorator(vary_on_flavour_update(func)))
return flavoured_decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def CheckLineLength(filename, linenumber, clean_lines, errors):
""" Check for lines longer than the recommended length """ |
line = clean_lines.raw_lines[linenumber]
if len(line) > _lint_state.linelength:
return errors(
filename,
linenumber,
'linelength',
'Lines should be <= %d characters long' %
(_lint_state.linelength)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def CheckUpperLowerCase(filename, linenumber, clean_lines, errors):
""" Check that commands are either lower case or upper case, but not both """ |
line = clean_lines.lines[linenumber]
if ContainsCommand(line):
command = GetCommand(line)
if IsCommandMixedCase(command):
return errors(
filename,
linenumber,
'readability/wonkycase',
'Do not use mixed case commands')
if clean_lines.have_seen_uppercase is None:
clean_lines.have_seen_uppercase = IsCommandUpperCase(command)
else:
is_upper = IsCommandUpperCase(command)
if is_upper != clean_lines.have_seen_uppercase:
return errors(
filename,
linenumber,
'readability/mixedcase',
'Do not mix upper and lower case commands') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def CheckCommandSpaces(filename, linenumber, clean_lines, errors):
""" No extra spaces between command and parenthesis """ |
line = clean_lines.lines[linenumber]
match = ContainsCommand(line)
if match and len(match.group(2)):
errors(filename, linenumber, 'whitespace/extra',
"Extra spaces between '%s' and its ()"%(match.group(1)))
if match:
spaces_after_open = len(_RE_COMMAND_START_SPACES.match(line).group(1))
initial_spaces = GetInitialSpaces(line)
initial_linenumber = linenumber
end = None
while True:
line = clean_lines.lines[linenumber]
end = _RE_COMMAND_END_SPACES.search(line)
if end:
break
linenumber += 1
if linenumber >= len(clean_lines.lines):
break
if linenumber == len(clean_lines.lines) and not end:
errors(filename, initial_linenumber, 'syntax',
'Unable to find the end of this command')
if end:
spaces_before_end = len(end.group(1))
initial_spaces = GetInitialSpaces(line)
if initial_linenumber != linenumber and spaces_before_end >= initial_spaces:
spaces_before_end -= initial_spaces
if spaces_after_open != spaces_before_end:
errors(filename, initial_linenumber, 'whitespace/mismatch',
'Mismatching spaces inside () after command') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def CheckRepeatLogic(filename, linenumber, clean_lines, errors):
""" Check for logic inside else, endif etc """ |
line = clean_lines.lines[linenumber]
for cmd in _logic_commands:
if re.search(r'\b%s\b'%cmd, line.lower()):
m = _RE_LOGIC_CHECK.search(line)
if m:
errors(filename, linenumber, 'readability/logic',
'Expression repeated inside %s; '
'better to use only %s()'%(cmd, m.group(1)))
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_hurst_hist():
""" Plots a histogram of values obtained for the hurst exponent of uniformly distributed white noise. This function requires the package ``matplotlib``. """ |
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
hs = [nolds.hurst_rs(np.random.random(size=10000), corrected=True) for _ in range(100)]
plt.hist(hs, bins=20)
plt.xlabel("esimated value of hurst exponent")
plt.ylabel("number of experiments")
plt.show() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hurst_compare_nvals(data, nvals=None):
""" Creates a plot that compares the results of different choices for nvals for the function hurst_rs. Args: data (array-like of float):
the input data from which the hurst exponent should be estimated Kwargs: nvals (array of int):
a manually selected value for the nvals parameter that should be plotted in comparison to the default choices """ |
import matplotlib.pyplot as plt
data = np.asarray(data)
n_all = np.arange(2,len(data)+1)
dd_all = nolds.hurst_rs(data, nvals=n_all, debug_data=True, fit="poly")
dd_def = nolds.hurst_rs(data, debug_data=True, fit="poly")
n_def = np.round(np.exp(dd_def[1][0])).astype("int32")
n_div = n_all[np.where(len(data) % n_all[:-1] == 0)]
dd_div = nolds.hurst_rs(data, nvals=n_div, debug_data=True, fit="poly")
def corr(nvals):
return [np.log(nolds.expected_rs(n)) for n in nvals]
l_all = plt.plot(dd_all[1][0], dd_all[1][1] - corr(n_all), "o")
l_def = plt.plot(dd_def[1][0], dd_def[1][1] - corr(n_def), "o")
l_div = plt.plot(dd_div[1][0], dd_div[1][1] - corr(n_div), "o")
l_cst = []
t_cst = []
if nvals is not None:
dd_cst = nolds.hurst_rs(data, nvals=nvals, debug_data=True, fit="poly")
l_cst = plt.plot(dd_cst[1][0], dd_cst[1][1] - corr(nvals), "o")
l_cst = l_cst
t_cst = ["custom"]
plt.xlabel("log(n)")
plt.ylabel("log((R/S)_n - E[(R/S)_n])")
plt.legend(l_all + l_def + l_div + l_cst, ["all", "default", "divisors"] + t_cst)
labeled_data = zip([dd_all[0], dd_def[0], dd_div[0]], ["all", "def", "div"])
for data, label in labeled_data:
print("%s: %.3f" % (label, data))
if nvals is not None:
print("custom: %.3f" % dd_cst[0])
plt.show() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fbm(n, H=0.75):
""" Generates fractional brownian motions of desired length. Author: Christian Thomae References: .. [fbm_1] https://en.wikipedia.org/wiki/Fractional_Brownian_motion#Method_1_of_simulation Args: n (int):
length of sequence to generate Kwargs: H (float):
hurst parameter Returns: array of float: simulated fractional brownian motion """ |
# TODO more detailed description of fbm
assert H > 0 and H < 1
def R(t, s):
twoH = 2 * H
return 0.5 * (s**twoH + t**twoH - np.abs(t - s)**twoH)
# form the matrix tau
gamma = R(*np.mgrid[0:n, 0:n]) # apply R to every element in matrix
w, P = np.linalg.eigh(gamma)
L = np.diag(w)
sigma = np.dot(np.dot(P, np.sqrt(L)), np.linalg.inv(P))
v = np.random.randn(n)
return np.dot(sigma, v) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def qrandom(n):
""" Creates an array of n true random numbers obtained from the quantum random number generator at qrng.anu.edu.au This function requires the package quantumrandom and an internet connection. Args: n (int):
length of the random array Return: array of ints: array of truly random unsigned 16 bit int values """ |
import quantumrandom
return np.concatenate([
quantumrandom.get_data(data_type='uint16', array_length=1024)
for i in range(int(np.ceil(n/1024.0)))
])[:n] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_qrandom():
""" Loads a set of 10000 random numbers generated by qrandom. This dataset can be used when you want to do some limited tests with "true" random data without an internet connection. Returns: int array the dataset """ |
fname = "datasets/qrandom.npy"
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_brown72():
""" Loads the dataset brown72 with a prescribed Hurst exponent of 0.72 Source: http://www.bearcave.com/misl/misl_tech/wavelets/hurst/ Returns: float array: the dataset """ |
fname = "datasets/brown72.npy"
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tent_map(x, steps, mu=2):
""" Generates a time series of the tent map. Characteristics and Background: The name of the tent map is derived from the fact that the plot of x_i vs x_i+1 looks like a tent. For mu > 1 one application of the mapping function can be viewed as stretching the surface on which the value is located and then folding the area that is greater than one back towards the zero. This corresponds nicely to the definition of chaos as expansion in one dimension which is counteracted by a compression in another dimension. Calculating the Lyapunov exponent: The lyapunov exponent of the tent map can be easily calculated as due to this stretching behavior a small difference delta between two neighboring points will indeed grow exponentially by a factor of mu in each iteration. We thus can assume that: delta_n = delta_0 * mu^n We now only have to change the basis to e to obtain the exact formula that is used for the definition of the lyapunov exponent: delta_n = delta_0 * e^(ln(mu) * n) Therefore the lyapunov exponent of the tent map is: lambda = ln(mu) References: .. [tm_1] https://en.wikipedia.org/wiki/Tent_map Args: x (float):
starting point steps (int):
number of steps for which the generator should run Kwargs: mu (int):
parameter mu that controls the behavior of the map Returns: generator object: the generator that creates the time series """ |
for _ in range(steps):
x = mu * x if x < 0.5 else mu * (1 - x)
yield x |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logistic_map(x, steps, r=4):
r""" Generates a time series of the logistic map. Characteristics and Background: The logistic map is among the simplest examples for a time series that can exhibit chaotic behavior depending on the parameter r. For r between 2 and 3, the series quickly becomes static. At r=3 the first bifurcation point is reached after which the series starts to oscillate. Beginning with r = 3.6 it shows chaotic behavior with a few islands of stability until perfect chaos is achieved at r = 4. Calculating the Lyapunov exponent: To calculate the "true" Lyapunov exponent of the logistic map, we first have to make a few observations for maps in general that are repeated applications of a function to a starting value. If we have two starting values that differ by some infinitesimal :math:`delta_0` then according to the definition of the lyapunov exponent we will have an exponential divergence: .. math:: |\delta_n| = |\delta_0| e^{\lambda n} We can now write that: .. math:: e^{\lambda n} = \lim_{\delta_0 -> 0} |\frac{\delta_n}{\delta_0}| This is the definition of the derivative :math:`\frac{dx_n}{dx_0}` of a point :math:`x_n` in the time series with respect to the starting point :math:`x_0` (or rather the absolute value of that derivative). Now we can use the fact that due to the definition of our map as repetitive application of some f we have: .. math:: with .. math:: e^{\lambda n} = |f^{n\prime}(x)| we now have .. math:: \Leftrightarrow \\ \Leftrightarrow \\ &= \frac{1}{n} \sum_{k=0}^{n-1} \ln |f'(x_k)| With this sum we can now calculate the lyapunov exponent for any map. For the logistic map we simply have to calculate :math:`f'(x)` and as we have .. math:: f(x) = r x (1-x) = rx - rx² we now get .. math:: f'(x) = r - 2 rx References: .. [lm_1] https://en.wikipedia.org/wiki/Tent_map .. [lm_2] https://blog.abhranil.net/2015/05/15/lyapunov-exponent-of-the-logistic-map-mathematica-code/ Args: x (float):
starting point steps (int):
number of steps for which the generator should run Kwargs: r (int):
parameter r that controls the behavior of the map Returns: generator object: the generator that creates the time series """ |
for _ in range(steps):
x = r * x * (1 - x)
yield x |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delay_embedding(data, emb_dim, lag=1):
""" Perform a time-delay embedding of a time series Args: data (array-like):
the data that should be embedded emb_dim (int):
the embedding dimension Kwargs: lag (int):
the lag between elements in the embedded vectors Returns: emb_dim x m array: matrix of embedded vectors of the form for i in 0 to m-1 (m = len(data)-(emb_dim-1)*lag) """ |
data = np.asarray(data)
min_len = (emb_dim - 1) * lag + 1
if len(data) < min_len:
msg = "cannot embed data of length {} with embedding dimension {} " \
+ "and lag {}, minimum required length is {}"
raise ValueError(msg.format(len(data), emb_dim, lag, min_len))
m = len(data) - min_len + 1
indices = np.repeat([np.arange(emb_dim) * lag], m, axis=0)
indices += np.arange(m).reshape((m, 1))
return data[indices] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lyap_r_len(**kwargs):
""" Helper function that calculates the minimum number of data points required to use lyap_r. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict):
arguments used for lyap_r (required: emb_dim, lag, trajectory_len and min_tsep) Returns: minimum number of data points required to call lyap_r with the given parameters """ |
# minimum length required to find single orbit vector
min_len = (kwargs['emb_dim'] - 1) * kwargs['lag'] + 1
# we need trajectory_len orbit vectors to follow a complete trajectory
min_len += kwargs['trajectory_len'] - 1
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2 + 1
return min_len |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lyap_e_len(**kwargs):
""" Helper function that calculates the minimum number of data points required to use lyap_e. Note that none of the required parameters may be set to None. Kwargs: kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb and min_tsep) Returns: minimum number of data points required to call lyap_e with the given parameters """ |
m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1)
# minimum length required to find single orbit vector
min_len = kwargs['emb_dim']
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2
# we need at least min_nb neighbors for each orbit vector
min_len += kwargs['min_nb']
return min_len |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.