repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop._find_current_phase
|
def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
"""
epoch_size = sum(phase.steps for phase in self._phases)
epoch = int(global_step // epoch_size)
steps_in = global_step % epoch_size
for phase in self._phases:
if steps_in < phase.steps:
return phase, epoch, steps_in
steps_in -= phase.steps
|
python
|
def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
"""
epoch_size = sum(phase.steps for phase in self._phases)
epoch = int(global_step // epoch_size)
steps_in = global_step % epoch_size
for phase in self._phases:
if steps_in < phase.steps:
return phase, epoch, steps_in
steps_in -= phase.steps
|
[
"def",
"_find_current_phase",
"(",
"self",
",",
"global_step",
")",
":",
"epoch_size",
"=",
"sum",
"(",
"phase",
".",
"steps",
"for",
"phase",
"in",
"self",
".",
"_phases",
")",
"epoch",
"=",
"int",
"(",
"global_step",
"//",
"epoch_size",
")",
"steps_in",
"=",
"global_step",
"%",
"epoch_size",
"for",
"phase",
"in",
"self",
".",
"_phases",
":",
"if",
"steps_in",
"<",
"phase",
".",
"steps",
":",
"return",
"phase",
",",
"epoch",
",",
"steps_in",
"steps_in",
"-=",
"phase",
".",
"steps"
] |
Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
|
[
"Determine",
"the",
"current",
"phase",
"based",
"on",
"the",
"global",
"step",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L170-L187
|
train
|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop._define_step
|
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), tf.float32)
with tf.control_dependencies([done, score, summary]):
done_score = tf.gather(score, tf.where(done)[:, 0])
submit_score = tf.cond(
tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
with tf.control_dependencies([submit_score]):
mean_score = tf.cond(self._report, score_mean.clear, float)
steps_made = tf.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with tf.control_dependencies([mean_score, next_step]):
return tf.identity(summary), mean_score, next_step, steps_made
|
python
|
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), tf.float32)
with tf.control_dependencies([done, score, summary]):
done_score = tf.gather(score, tf.where(done)[:, 0])
submit_score = tf.cond(
tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
with tf.control_dependencies([submit_score]):
mean_score = tf.cond(self._report, score_mean.clear, float)
steps_made = tf.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with tf.control_dependencies([mean_score, next_step]):
return tf.identity(summary), mean_score, next_step, steps_made
|
[
"def",
"_define_step",
"(",
"self",
",",
"done",
",",
"score",
",",
"summary",
")",
":",
"if",
"done",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"done",
"=",
"done",
"[",
"None",
"]",
"if",
"score",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"score",
"=",
"score",
"[",
"None",
"]",
"score_mean",
"=",
"streaming_mean",
".",
"StreamingMean",
"(",
"(",
")",
",",
"tf",
".",
"float32",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"done",
",",
"score",
",",
"summary",
"]",
")",
":",
"done_score",
"=",
"tf",
".",
"gather",
"(",
"score",
",",
"tf",
".",
"where",
"(",
"done",
")",
"[",
":",
",",
"0",
"]",
")",
"submit_score",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"reduce_any",
"(",
"done",
")",
",",
"lambda",
":",
"score_mean",
".",
"submit",
"(",
"done_score",
")",
",",
"tf",
".",
"no_op",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"submit_score",
"]",
")",
":",
"mean_score",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_report",
",",
"score_mean",
".",
"clear",
",",
"float",
")",
"steps_made",
"=",
"tf",
".",
"shape",
"(",
"score",
")",
"[",
"0",
"]",
"next_step",
"=",
"self",
".",
"_step",
".",
"assign_add",
"(",
"steps_made",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"mean_score",
",",
"next_step",
"]",
")",
":",
"return",
"tf",
".",
"identity",
"(",
"summary",
")",
",",
"mean_score",
",",
"next_step",
",",
"steps_made"
] |
Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
|
[
"Combine",
"operations",
"of",
"a",
"phase",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L189-L217
|
train
|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop._store_checkpoint
|
def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
"""
if not self._logdir or not saver:
return
tf.gfile.MakeDirs(self._logdir)
filename = os.path.join(self._logdir, 'model.ckpt')
saver.save(sess, filename, global_step)
|
python
|
def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
"""
if not self._logdir or not saver:
return
tf.gfile.MakeDirs(self._logdir)
filename = os.path.join(self._logdir, 'model.ckpt')
saver.save(sess, filename, global_step)
|
[
"def",
"_store_checkpoint",
"(",
"self",
",",
"sess",
",",
"saver",
",",
"global_step",
")",
":",
"if",
"not",
"self",
".",
"_logdir",
"or",
"not",
"saver",
":",
"return",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"self",
".",
"_logdir",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_logdir",
",",
"'model.ckpt'",
")",
"saver",
".",
"save",
"(",
"sess",
",",
"filename",
",",
"global_step",
")"
] |
Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
|
[
"Store",
"a",
"checkpoint",
"if",
"a",
"log",
"directory",
"was",
"provided",
"to",
"the",
"constructor",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L219-L233
|
train
|
google-research/batch-ppo
|
agents/scripts/train.py
|
_define_loop
|
def _define_loop(graph, logdir, train_steps, eval_steps):
"""Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoch.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
logdir, graph.step, graph.should_log, graph.do_report,
graph.force_reset)
loop.add_phase(
'train', graph.done, graph.score, graph.summary, train_steps,
report_every=train_steps,
log_every=train_steps // 2,
checkpoint_every=None,
feed={graph.is_training: True})
loop.add_phase(
'eval', graph.done, graph.score, graph.summary, eval_steps,
report_every=eval_steps,
log_every=eval_steps // 2,
checkpoint_every=10 * eval_steps,
feed={graph.is_training: False})
return loop
|
python
|
def _define_loop(graph, logdir, train_steps, eval_steps):
"""Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoch.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
logdir, graph.step, graph.should_log, graph.do_report,
graph.force_reset)
loop.add_phase(
'train', graph.done, graph.score, graph.summary, train_steps,
report_every=train_steps,
log_every=train_steps // 2,
checkpoint_every=None,
feed={graph.is_training: True})
loop.add_phase(
'eval', graph.done, graph.score, graph.summary, eval_steps,
report_every=eval_steps,
log_every=eval_steps // 2,
checkpoint_every=10 * eval_steps,
feed={graph.is_training: False})
return loop
|
[
"def",
"_define_loop",
"(",
"graph",
",",
"logdir",
",",
"train_steps",
",",
"eval_steps",
")",
":",
"loop",
"=",
"tools",
".",
"Loop",
"(",
"logdir",
",",
"graph",
".",
"step",
",",
"graph",
".",
"should_log",
",",
"graph",
".",
"do_report",
",",
"graph",
".",
"force_reset",
")",
"loop",
".",
"add_phase",
"(",
"'train'",
",",
"graph",
".",
"done",
",",
"graph",
".",
"score",
",",
"graph",
".",
"summary",
",",
"train_steps",
",",
"report_every",
"=",
"train_steps",
",",
"log_every",
"=",
"train_steps",
"//",
"2",
",",
"checkpoint_every",
"=",
"None",
",",
"feed",
"=",
"{",
"graph",
".",
"is_training",
":",
"True",
"}",
")",
"loop",
".",
"add_phase",
"(",
"'eval'",
",",
"graph",
".",
"done",
",",
"graph",
".",
"score",
",",
"graph",
".",
"summary",
",",
"eval_steps",
",",
"report_every",
"=",
"eval_steps",
",",
"log_every",
"=",
"eval_steps",
"//",
"2",
",",
"checkpoint_every",
"=",
"10",
"*",
"eval_steps",
",",
"feed",
"=",
"{",
"graph",
".",
"is_training",
":",
"False",
"}",
")",
"return",
"loop"
] |
Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoch.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
|
[
"Create",
"and",
"configure",
"a",
"training",
"loop",
"with",
"training",
"and",
"evaluation",
"phases",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L70-L97
|
train
|
google-research/batch-ppo
|
agents/scripts/train.py
|
train
|
def train(config, env_processes):
"""Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: Whether to step environments in separate processes.
Yields:
Evaluation scores.
"""
tf.reset_default_graph()
if config.update_every % config.num_agents:
tf.logging.warn('Number of agents should divide episodes per update.')
with tf.device('/cpu:0'):
batch_env = utility.define_batch_env(
lambda: _create_environment(config),
config.num_agents, env_processes)
graph = utility.define_simulation_graph(
batch_env, config.algorithm, config)
loop = _define_loop(
graph, config.logdir,
config.update_every * config.max_length,
config.eval_episodes * config.max_length)
total_steps = int(
config.steps / config.update_every *
(config.update_every + config.eval_episodes))
# Exclude episode related variables since the Python state of environments is
# not checkpointed and thus new episodes start after resuming.
saver = utility.define_saver(exclude=(r'.*_temporary.*',))
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
utility.initialize_variables(sess, saver, config.logdir)
for score in loop.run(sess, saver, total_steps):
yield score
batch_env.close()
|
python
|
def train(config, env_processes):
"""Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: Whether to step environments in separate processes.
Yields:
Evaluation scores.
"""
tf.reset_default_graph()
if config.update_every % config.num_agents:
tf.logging.warn('Number of agents should divide episodes per update.')
with tf.device('/cpu:0'):
batch_env = utility.define_batch_env(
lambda: _create_environment(config),
config.num_agents, env_processes)
graph = utility.define_simulation_graph(
batch_env, config.algorithm, config)
loop = _define_loop(
graph, config.logdir,
config.update_every * config.max_length,
config.eval_episodes * config.max_length)
total_steps = int(
config.steps / config.update_every *
(config.update_every + config.eval_episodes))
# Exclude episode related variables since the Python state of environments is
# not checkpointed and thus new episodes start after resuming.
saver = utility.define_saver(exclude=(r'.*_temporary.*',))
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
utility.initialize_variables(sess, saver, config.logdir)
for score in loop.run(sess, saver, total_steps):
yield score
batch_env.close()
|
[
"def",
"train",
"(",
"config",
",",
"env_processes",
")",
":",
"tf",
".",
"reset_default_graph",
"(",
")",
"if",
"config",
".",
"update_every",
"%",
"config",
".",
"num_agents",
":",
"tf",
".",
"logging",
".",
"warn",
"(",
"'Number of agents should divide episodes per update.'",
")",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"batch_env",
"=",
"utility",
".",
"define_batch_env",
"(",
"lambda",
":",
"_create_environment",
"(",
"config",
")",
",",
"config",
".",
"num_agents",
",",
"env_processes",
")",
"graph",
"=",
"utility",
".",
"define_simulation_graph",
"(",
"batch_env",
",",
"config",
".",
"algorithm",
",",
"config",
")",
"loop",
"=",
"_define_loop",
"(",
"graph",
",",
"config",
".",
"logdir",
",",
"config",
".",
"update_every",
"*",
"config",
".",
"max_length",
",",
"config",
".",
"eval_episodes",
"*",
"config",
".",
"max_length",
")",
"total_steps",
"=",
"int",
"(",
"config",
".",
"steps",
"/",
"config",
".",
"update_every",
"*",
"(",
"config",
".",
"update_every",
"+",
"config",
".",
"eval_episodes",
")",
")",
"# Exclude episode related variables since the Python state of environments is",
"# not checkpointed and thus new episodes start after resuming.",
"saver",
"=",
"utility",
".",
"define_saver",
"(",
"exclude",
"=",
"(",
"r'.*_temporary.*'",
",",
")",
")",
"sess_config",
"=",
"tf",
".",
"ConfigProto",
"(",
"allow_soft_placement",
"=",
"True",
")",
"sess_config",
".",
"gpu_options",
".",
"allow_growth",
"=",
"True",
"with",
"tf",
".",
"Session",
"(",
"config",
"=",
"sess_config",
")",
"as",
"sess",
":",
"utility",
".",
"initialize_variables",
"(",
"sess",
",",
"saver",
",",
"config",
".",
"logdir",
")",
"for",
"score",
"in",
"loop",
".",
"run",
"(",
"sess",
",",
"saver",
",",
"total_steps",
")",
":",
"yield",
"score",
"batch_env",
".",
"close",
"(",
")"
] |
Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: Whether to step environments in separate processes.
Yields:
Evaluation scores.
|
[
"Training",
"and",
"evaluation",
"entry",
"point",
"yielding",
"scores",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L100-L138
|
train
|
google-research/batch-ppo
|
agents/scripts/train.py
|
main
|
def main(_):
"""Create or load configuration and launch the trainer."""
utility.set_up_logging()
if not FLAGS.config:
raise KeyError('You must specify a configuration.')
logdir = FLAGS.logdir and os.path.expanduser(os.path.join(
FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))
try:
config = utility.load_config(logdir)
except IOError:
config = tools.AttrDict(getattr(configs, FLAGS.config)())
config = utility.save_config(config, logdir)
for score in train(config, FLAGS.env_processes):
tf.logging.info('Score {}.'.format(score))
|
python
|
def main(_):
"""Create or load configuration and launch the trainer."""
utility.set_up_logging()
if not FLAGS.config:
raise KeyError('You must specify a configuration.')
logdir = FLAGS.logdir and os.path.expanduser(os.path.join(
FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))
try:
config = utility.load_config(logdir)
except IOError:
config = tools.AttrDict(getattr(configs, FLAGS.config)())
config = utility.save_config(config, logdir)
for score in train(config, FLAGS.env_processes):
tf.logging.info('Score {}.'.format(score))
|
[
"def",
"main",
"(",
"_",
")",
":",
"utility",
".",
"set_up_logging",
"(",
")",
"if",
"not",
"FLAGS",
".",
"config",
":",
"raise",
"KeyError",
"(",
"'You must specify a configuration.'",
")",
"logdir",
"=",
"FLAGS",
".",
"logdir",
"and",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"logdir",
",",
"'{}-{}'",
".",
"format",
"(",
"FLAGS",
".",
"timestamp",
",",
"FLAGS",
".",
"config",
")",
")",
")",
"try",
":",
"config",
"=",
"utility",
".",
"load_config",
"(",
"logdir",
")",
"except",
"IOError",
":",
"config",
"=",
"tools",
".",
"AttrDict",
"(",
"getattr",
"(",
"configs",
",",
"FLAGS",
".",
"config",
")",
"(",
")",
")",
"config",
"=",
"utility",
".",
"save_config",
"(",
"config",
",",
"logdir",
")",
"for",
"score",
"in",
"train",
"(",
"config",
",",
"FLAGS",
".",
"env_processes",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"'Score {}.'",
".",
"format",
"(",
"score",
")",
")"
] |
Create or load configuration and launch the trainer.
|
[
"Create",
"or",
"load",
"configuration",
"and",
"launch",
"the",
"trainer",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L141-L154
|
train
|
google-research/batch-ppo
|
agents/parts/iterate_sequences.py
|
iterate_sequences
|
def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
"""
if not length.shape[0].value:
raise ValueError('Batch size of length tensor must be set.')
num_sequences = length.shape[0].value
sequences = dict(sequence=sequences, length=length)
dataset = tf.data.Dataset.from_tensor_slices(sequences)
dataset = dataset.repeat(num_epochs)
if chunk_length:
dataset = dataset.map(remove_padding).flat_map(
# pylint: disable=g-long-lambda
lambda x: tf.data.Dataset.from_tensor_slices(
chunk_sequence(x, chunk_length, padding_value)))
num_chunks = tf.reduce_sum((length - 1) // chunk_length + 1)
else:
num_chunks = num_sequences
if batch_size:
dataset = dataset.shuffle(num_sequences // 2)
dataset = dataset.batch(batch_size or num_sequences)
dataset = dataset.prefetch(num_epochs)
iterator = dataset.make_initializable_iterator()
with tf.control_dependencies([iterator.initializer]):
num_batches = num_epochs * num_chunks // (batch_size or num_sequences)
return tf.scan(
# pylint: disable=g-long-lambda
lambda _1, index: consumer_fn(iterator.get_next()),
tf.range(num_batches), output_template, parallel_iterations=1)
|
python
|
def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
"""
if not length.shape[0].value:
raise ValueError('Batch size of length tensor must be set.')
num_sequences = length.shape[0].value
sequences = dict(sequence=sequences, length=length)
dataset = tf.data.Dataset.from_tensor_slices(sequences)
dataset = dataset.repeat(num_epochs)
if chunk_length:
dataset = dataset.map(remove_padding).flat_map(
# pylint: disable=g-long-lambda
lambda x: tf.data.Dataset.from_tensor_slices(
chunk_sequence(x, chunk_length, padding_value)))
num_chunks = tf.reduce_sum((length - 1) // chunk_length + 1)
else:
num_chunks = num_sequences
if batch_size:
dataset = dataset.shuffle(num_sequences // 2)
dataset = dataset.batch(batch_size or num_sequences)
dataset = dataset.prefetch(num_epochs)
iterator = dataset.make_initializable_iterator()
with tf.control_dependencies([iterator.initializer]):
num_batches = num_epochs * num_chunks // (batch_size or num_sequences)
return tf.scan(
# pylint: disable=g-long-lambda
lambda _1, index: consumer_fn(iterator.get_next()),
tf.range(num_batches), output_template, parallel_iterations=1)
|
[
"def",
"iterate_sequences",
"(",
"consumer_fn",
",",
"output_template",
",",
"sequences",
",",
"length",
",",
"chunk_length",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"num_epochs",
"=",
"1",
",",
"padding_value",
"=",
"0",
")",
":",
"if",
"not",
"length",
".",
"shape",
"[",
"0",
"]",
".",
"value",
":",
"raise",
"ValueError",
"(",
"'Batch size of length tensor must be set.'",
")",
"num_sequences",
"=",
"length",
".",
"shape",
"[",
"0",
"]",
".",
"value",
"sequences",
"=",
"dict",
"(",
"sequence",
"=",
"sequences",
",",
"length",
"=",
"length",
")",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"sequences",
")",
"dataset",
"=",
"dataset",
".",
"repeat",
"(",
"num_epochs",
")",
"if",
"chunk_length",
":",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"remove_padding",
")",
".",
"flat_map",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"x",
":",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"chunk_sequence",
"(",
"x",
",",
"chunk_length",
",",
"padding_value",
")",
")",
")",
"num_chunks",
"=",
"tf",
".",
"reduce_sum",
"(",
"(",
"length",
"-",
"1",
")",
"//",
"chunk_length",
"+",
"1",
")",
"else",
":",
"num_chunks",
"=",
"num_sequences",
"if",
"batch_size",
":",
"dataset",
"=",
"dataset",
".",
"shuffle",
"(",
"num_sequences",
"//",
"2",
")",
"dataset",
"=",
"dataset",
".",
"batch",
"(",
"batch_size",
"or",
"num_sequences",
")",
"dataset",
"=",
"dataset",
".",
"prefetch",
"(",
"num_epochs",
")",
"iterator",
"=",
"dataset",
".",
"make_initializable_iterator",
"(",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"iterator",
".",
"initializer",
"]",
")",
":",
"num_batches",
"=",
"num_epochs",
"*",
"num_chunks",
"//",
"(",
"batch_size",
"or",
"num_sequences",
")",
"return",
"tf",
".",
"scan",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"_1",
",",
"index",
":",
"consumer_fn",
"(",
"iterator",
".",
"get_next",
"(",
")",
")",
",",
"tf",
".",
"range",
"(",
"num_batches",
")",
",",
"output_template",
",",
"parallel_iterations",
"=",
"1",
")"
] |
Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
|
[
"Iterate",
"over",
"batches",
"of",
"chunks",
"of",
"sequences",
"for",
"multiple",
"epochs",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L26-L74
|
train
|
google-research/batch-ppo
|
agents/parts/iterate_sequences.py
|
chunk_sequence
|
def chunk_sequence(sequence, chunk_length=200, padding_value=0):
"""Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with time dimension.
chunk_length: Size of chunks the sequence will be split into.
padding_value: Value used for padding the last chunk after the sequence.
Returns:
Nested dict of sequence tensors with chunk dimension.
"""
if 'length' in sequence:
length = sequence.pop('length')
else:
length = tf.shape(tools.nested.flatten(sequence)[0])[0]
num_chunks = (length - 1) // chunk_length + 1
padding_length = chunk_length * num_chunks - length
padded = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.concat([
tensor, 0 * tensor[:padding_length] + padding_value], 0),
sequence)
chunks = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.reshape(
tensor, [num_chunks, chunk_length] + tensor.shape[1:].as_list()),
padded)
chunks['length'] = tf.concat([
chunk_length * tf.ones((num_chunks - 1,), dtype=tf.int32),
[chunk_length - padding_length]], 0)
return chunks
|
python
|
def chunk_sequence(sequence, chunk_length=200, padding_value=0):
"""Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with time dimension.
chunk_length: Size of chunks the sequence will be split into.
padding_value: Value used for padding the last chunk after the sequence.
Returns:
Nested dict of sequence tensors with chunk dimension.
"""
if 'length' in sequence:
length = sequence.pop('length')
else:
length = tf.shape(tools.nested.flatten(sequence)[0])[0]
num_chunks = (length - 1) // chunk_length + 1
padding_length = chunk_length * num_chunks - length
padded = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.concat([
tensor, 0 * tensor[:padding_length] + padding_value], 0),
sequence)
chunks = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.reshape(
tensor, [num_chunks, chunk_length] + tensor.shape[1:].as_list()),
padded)
chunks['length'] = tf.concat([
chunk_length * tf.ones((num_chunks - 1,), dtype=tf.int32),
[chunk_length - padding_length]], 0)
return chunks
|
[
"def",
"chunk_sequence",
"(",
"sequence",
",",
"chunk_length",
"=",
"200",
",",
"padding_value",
"=",
"0",
")",
":",
"if",
"'length'",
"in",
"sequence",
":",
"length",
"=",
"sequence",
".",
"pop",
"(",
"'length'",
")",
"else",
":",
"length",
"=",
"tf",
".",
"shape",
"(",
"tools",
".",
"nested",
".",
"flatten",
"(",
"sequence",
")",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"num_chunks",
"=",
"(",
"length",
"-",
"1",
")",
"//",
"chunk_length",
"+",
"1",
"padding_length",
"=",
"chunk_length",
"*",
"num_chunks",
"-",
"length",
"padded",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"tensor",
":",
"tf",
".",
"concat",
"(",
"[",
"tensor",
",",
"0",
"*",
"tensor",
"[",
":",
"padding_length",
"]",
"+",
"padding_value",
"]",
",",
"0",
")",
",",
"sequence",
")",
"chunks",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"tensor",
":",
"tf",
".",
"reshape",
"(",
"tensor",
",",
"[",
"num_chunks",
",",
"chunk_length",
"]",
"+",
"tensor",
".",
"shape",
"[",
"1",
":",
"]",
".",
"as_list",
"(",
")",
")",
",",
"padded",
")",
"chunks",
"[",
"'length'",
"]",
"=",
"tf",
".",
"concat",
"(",
"[",
"chunk_length",
"*",
"tf",
".",
"ones",
"(",
"(",
"num_chunks",
"-",
"1",
",",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"[",
"chunk_length",
"-",
"padding_length",
"]",
"]",
",",
"0",
")",
"return",
"chunks"
] |
Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with time dimension.
chunk_length: Size of chunks the sequence will be split into.
padding_value: Value used for padding the last chunk after the sequence.
Returns:
Nested dict of sequence tensors with chunk dimension.
|
[
"Split",
"a",
"nested",
"dict",
"of",
"sequence",
"tensors",
"into",
"a",
"batch",
"of",
"chunks",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L77-L110
|
train
|
google-research/batch-ppo
|
agents/parts/iterate_sequences.py
|
remove_padding
|
def remove_padding(sequence):
"""Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed.
"""
length = sequence.pop('length')
sequence = tools.nested.map(lambda tensor: tensor[:length], sequence)
return sequence
|
python
|
def remove_padding(sequence):
"""Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed.
"""
length = sequence.pop('length')
sequence = tools.nested.map(lambda tensor: tensor[:length], sequence)
return sequence
|
[
"def",
"remove_padding",
"(",
"sequence",
")",
":",
"length",
"=",
"sequence",
".",
"pop",
"(",
"'length'",
")",
"sequence",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"tensor",
":",
"tensor",
"[",
":",
"length",
"]",
",",
"sequence",
")",
"return",
"sequence"
] |
Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed.
|
[
"Selects",
"the",
"used",
"frames",
"of",
"a",
"sequence",
"up",
"to",
"its",
"length",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L113-L128
|
train
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize.transform
|
def transform(self, value):
"""Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
"""
with tf.name_scope(self._name + '/transform'):
no_batch_dim = value.shape.ndims == self._mean.shape.ndims
if no_batch_dim:
# Add a batch dimension if necessary.
value = value[None, ...]
if self._center:
value -= self._mean[None, ...]
if self._scale:
# We cannot scale before seeing at least two samples.
value /= tf.cond(
self._count > 1, lambda: self._std() + 1e-8,
lambda: tf.ones_like(self._var_sum))[None]
if self._clip:
value = tf.clip_by_value(value, -self._clip, self._clip)
# Remove batch dimension if necessary.
if no_batch_dim:
value = value[0]
return tf.check_numerics(value, 'value')
|
python
|
def transform(self, value):
"""Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
"""
with tf.name_scope(self._name + '/transform'):
no_batch_dim = value.shape.ndims == self._mean.shape.ndims
if no_batch_dim:
# Add a batch dimension if necessary.
value = value[None, ...]
if self._center:
value -= self._mean[None, ...]
if self._scale:
# We cannot scale before seeing at least two samples.
value /= tf.cond(
self._count > 1, lambda: self._std() + 1e-8,
lambda: tf.ones_like(self._var_sum))[None]
if self._clip:
value = tf.clip_by_value(value, -self._clip, self._clip)
# Remove batch dimension if necessary.
if no_batch_dim:
value = value[0]
return tf.check_numerics(value, 'value')
|
[
"def",
"transform",
"(",
"self",
",",
"value",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/transform'",
")",
":",
"no_batch_dim",
"=",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_mean",
".",
"shape",
".",
"ndims",
"if",
"no_batch_dim",
":",
"# Add a batch dimension if necessary.",
"value",
"=",
"value",
"[",
"None",
",",
"...",
"]",
"if",
"self",
".",
"_center",
":",
"value",
"-=",
"self",
".",
"_mean",
"[",
"None",
",",
"...",
"]",
"if",
"self",
".",
"_scale",
":",
"# We cannot scale before seeing at least two samples.",
"value",
"/=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"self",
".",
"_std",
"(",
")",
"+",
"1e-8",
",",
"lambda",
":",
"tf",
".",
"ones_like",
"(",
"self",
".",
"_var_sum",
")",
")",
"[",
"None",
"]",
"if",
"self",
".",
"_clip",
":",
"value",
"=",
"tf",
".",
"clip_by_value",
"(",
"value",
",",
"-",
"self",
".",
"_clip",
",",
"self",
".",
"_clip",
")",
"# Remove batch dimension if necessary.",
"if",
"no_batch_dim",
":",
"value",
"=",
"value",
"[",
"0",
"]",
"return",
"tf",
".",
"check_numerics",
"(",
"value",
",",
"'value'",
")"
] |
Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
|
[
"Normalize",
"a",
"single",
"or",
"batch",
"tensor",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L50-L79
|
train
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize.update
|
def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
value = value[None, ...]
count = tf.shape(value)[0]
with tf.control_dependencies([self._count.assign_add(count)]):
step = tf.cast(self._count, tf.float32)
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (
value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
with tf.control_dependencies(update):
if value.shape.ndims == 1:
value = tf.reduce_mean(value)
return self._summary('value', tf.reduce_mean(value))
|
python
|
def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
value = value[None, ...]
count = tf.shape(value)[0]
with tf.control_dependencies([self._count.assign_add(count)]):
step = tf.cast(self._count, tf.float32)
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (
value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
with tf.control_dependencies(update):
if value.shape.ndims == 1:
value = tf.reduce_mean(value)
return self._summary('value', tf.reduce_mean(value))
|
[
"def",
"update",
"(",
"self",
",",
"value",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/update'",
")",
":",
"if",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_mean",
".",
"shape",
".",
"ndims",
":",
"# Add a batch dimension if necessary.",
"value",
"=",
"value",
"[",
"None",
",",
"...",
"]",
"count",
"=",
"tf",
".",
"shape",
"(",
"value",
")",
"[",
"0",
"]",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"self",
".",
"_count",
".",
"assign_add",
"(",
"count",
")",
"]",
")",
":",
"step",
"=",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
",",
"tf",
".",
"float32",
")",
"mean_delta",
"=",
"tf",
".",
"reduce_sum",
"(",
"value",
"-",
"self",
".",
"_mean",
"[",
"None",
",",
"...",
"]",
",",
"0",
")",
"new_mean",
"=",
"self",
".",
"_mean",
"+",
"mean_delta",
"/",
"step",
"new_mean",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"new_mean",
",",
"lambda",
":",
"value",
"[",
"0",
"]",
")",
"var_delta",
"=",
"(",
"value",
"-",
"self",
".",
"_mean",
"[",
"None",
",",
"...",
"]",
")",
"*",
"(",
"value",
"-",
"new_mean",
"[",
"None",
",",
"...",
"]",
")",
"new_var_sum",
"=",
"self",
".",
"_var_sum",
"+",
"tf",
".",
"reduce_sum",
"(",
"var_delta",
",",
"0",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"new_mean",
",",
"new_var_sum",
"]",
")",
":",
"update",
"=",
"self",
".",
"_mean",
".",
"assign",
"(",
"new_mean",
")",
",",
"self",
".",
"_var_sum",
".",
"assign",
"(",
"new_var_sum",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"update",
")",
":",
"if",
"value",
".",
"shape",
".",
"ndims",
"==",
"1",
":",
"value",
"=",
"tf",
".",
"reduce_mean",
"(",
"value",
")",
"return",
"self",
".",
"_summary",
"(",
"'value'",
",",
"tf",
".",
"reduce_mean",
"(",
"value",
")",
")"
] |
Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
|
[
"Update",
"the",
"mean",
"and",
"variance",
"estimates",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L81-L108
|
train
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize.reset
|
def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum)))
|
python
|
def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum)))
|
[
"def",
"reset",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/reset'",
")",
":",
"return",
"tf",
".",
"group",
"(",
"self",
".",
"_count",
".",
"assign",
"(",
"0",
")",
",",
"self",
".",
"_mean",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_mean",
")",
")",
",",
"self",
".",
"_var_sum",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_var_sum",
")",
")",
")"
] |
Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
|
[
"Reset",
"the",
"estimates",
"of",
"mean",
"and",
"variance",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L110-L122
|
train
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize.summary
|
def summary(self):
"""Summary string of mean and standard deviation.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(
self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(
self._count > 1, lambda: self._summary('stddev', self._std()), str)
return tf.summary.merge([mean_summary, std_summary])
|
python
|
def summary(self):
"""Summary string of mean and standard deviation.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(
self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(
self._count > 1, lambda: self._summary('stddev', self._std()), str)
return tf.summary.merge([mean_summary, std_summary])
|
[
"def",
"summary",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/summary'",
")",
":",
"mean_summary",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"0",
",",
"lambda",
":",
"self",
".",
"_summary",
"(",
"'mean'",
",",
"self",
".",
"_mean",
")",
",",
"str",
")",
"std_summary",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"self",
".",
"_summary",
"(",
"'stddev'",
",",
"self",
".",
"_std",
"(",
")",
")",
",",
"str",
")",
"return",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"mean_summary",
",",
"std_summary",
"]",
")"
] |
Summary string of mean and standard deviation.
Returns:
Summary tensor.
|
[
"Summary",
"string",
"of",
"mean",
"and",
"standard",
"deviation",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L124-L135
|
train
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize._std
|
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
|
python
|
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
|
[
"def",
"_std",
"(",
"self",
")",
":",
"variance",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"self",
".",
"_var_sum",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
"-",
"1",
",",
"tf",
".",
"float32",
")",
",",
"lambda",
":",
"tf",
".",
"ones_like",
"(",
"self",
".",
"_var_sum",
")",
"*",
"float",
"(",
"'nan'",
")",
")",
"# The epsilon corrects for small negative variance values caused by",
"# the algorithm. It was empirically chosen to work with all environments",
"# tested.",
"return",
"tf",
".",
"sqrt",
"(",
"variance",
"+",
"1e-4",
")"
] |
Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
|
[
"Computes",
"the",
"current",
"estimate",
"of",
"the",
"standard",
"deviation",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L137-L153
|
train
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize._summary
|
def _summary(self, name, tensor):
"""Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
"""
if tensor.shape.ndims == 0:
return tf.summary.scalar(name, tensor)
else:
return tf.summary.histogram(name, tensor)
|
python
|
def _summary(self, name, tensor):
"""Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
"""
if tensor.shape.ndims == 0:
return tf.summary.scalar(name, tensor)
else:
return tf.summary.histogram(name, tensor)
|
[
"def",
"_summary",
"(",
"self",
",",
"name",
",",
"tensor",
")",
":",
"if",
"tensor",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"return",
"tf",
".",
"summary",
".",
"scalar",
"(",
"name",
",",
"tensor",
")",
"else",
":",
"return",
"tf",
".",
"summary",
".",
"histogram",
"(",
"name",
",",
"tensor",
")"
] |
Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
|
[
"Create",
"a",
"scalar",
"or",
"histogram",
"summary",
"matching",
"the",
"rank",
"of",
"the",
"tensor",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L155-L168
|
train
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.length
|
def length(self, rows=None):
"""Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows)
|
python
|
def length(self, rows=None):
"""Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows)
|
[
"def",
"length",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"return",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"rows",
")"
] |
Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
|
[
"Tensor",
"holding",
"the",
"current",
"length",
"of",
"episodes",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L52-L62
|
train
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.append
|
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops = tools.nested.map(
lambda var, val: tf.scatter_nd_update(var, indices, val),
self._buffers, transitions, flatten=True)
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
|
python
|
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops = tools.nested.map(
lambda var, val: tf.scatter_nd_update(var, indices, val),
self._buffers, transitions, flatten=True)
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
|
[
"def",
"append",
"(",
"self",
",",
"transitions",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"assert_capacity",
"=",
"tf",
".",
"assert_less",
"(",
"rows",
",",
"self",
".",
"_capacity",
",",
"message",
"=",
"'capacity exceeded'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_capacity",
"]",
")",
":",
"assert_max_length",
"=",
"tf",
".",
"assert_less",
"(",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"rows",
")",
",",
"self",
".",
"_max_length",
",",
"message",
"=",
"'max length exceeded'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_max_length",
"]",
")",
":",
"timestep",
"=",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"rows",
")",
"indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"rows",
",",
"timestep",
"]",
",",
"1",
")",
"append_ops",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"var",
",",
"val",
":",
"tf",
".",
"scatter_nd_update",
"(",
"var",
",",
"indices",
",",
"val",
")",
",",
"self",
".",
"_buffers",
",",
"transitions",
",",
"flatten",
"=",
"True",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"append_ops",
")",
":",
"episode_mask",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"one_hot",
"(",
"rows",
",",
"self",
".",
"_capacity",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"0",
")",
"return",
"self",
".",
"_length",
".",
"assign_add",
"(",
"episode_mask",
")"
] |
Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
|
[
"Append",
"a",
"batch",
"of",
"transitions",
"to",
"rows",
"of",
"the",
"memory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L64-L92
|
train
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.replace
|
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
replace_ops = tools.nested.map(
lambda var, val: tf.scatter_update(var, rows, val),
self._buffers, episodes, flatten=True)
with tf.control_dependencies(replace_ops):
return tf.scatter_update(self._length, rows, length)
|
python
|
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
replace_ops = tools.nested.map(
lambda var, val: tf.scatter_update(var, rows, val),
self._buffers, episodes, flatten=True)
with tf.control_dependencies(replace_ops):
return tf.scatter_update(self._length, rows, length)
|
[
"def",
"replace",
"(",
"self",
",",
"episodes",
",",
"length",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"assert_capacity",
"=",
"tf",
".",
"assert_less",
"(",
"rows",
",",
"self",
".",
"_capacity",
",",
"message",
"=",
"'capacity exceeded'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_capacity",
"]",
")",
":",
"assert_max_length",
"=",
"tf",
".",
"assert_less_equal",
"(",
"length",
",",
"self",
".",
"_max_length",
",",
"message",
"=",
"'max length exceeded'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_max_length",
"]",
")",
":",
"replace_ops",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"var",
",",
"val",
":",
"tf",
".",
"scatter_update",
"(",
"var",
",",
"rows",
",",
"val",
")",
",",
"self",
".",
"_buffers",
",",
"episodes",
",",
"flatten",
"=",
"True",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"replace_ops",
")",
":",
"return",
"tf",
".",
"scatter_update",
"(",
"self",
".",
"_length",
",",
"rows",
",",
"length",
")"
] |
Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
|
[
"Replace",
"full",
"episodes",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L94-L117
|
train
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.data
|
def data(self, rows=None):
"""Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batch and time
dimensions, and a batch of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
episode = tools.nested.map(lambda var: tf.gather(var, rows), self._buffers)
length = tf.gather(self._length, rows)
return episode, length
|
python
|
def data(self, rows=None):
"""Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batch and time
dimensions, and a batch of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
episode = tools.nested.map(lambda var: tf.gather(var, rows), self._buffers)
length = tf.gather(self._length, rows)
return episode, length
|
[
"def",
"data",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"episode",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"var",
":",
"tf",
".",
"gather",
"(",
"var",
",",
"rows",
")",
",",
"self",
".",
"_buffers",
")",
"length",
"=",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"rows",
")",
"return",
"episode",
",",
"length"
] |
Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batch and time
dimensions, and a batch of sequence lengths.
|
[
"Access",
"a",
"batch",
"of",
"episodes",
"from",
"the",
"memory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L119-L136
|
train
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.clear
|
def clear(self, rows=None):
"""Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
|
python
|
def clear(self, rows=None):
"""Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
|
[
"def",
"clear",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"return",
"tf",
".",
"scatter_update",
"(",
"self",
".",
"_length",
",",
"rows",
",",
"tf",
".",
"zeros_like",
"(",
"rows",
")",
")"
] |
Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
|
[
"Reset",
"episodes",
"in",
"the",
"memory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L138-L152
|
train
|
google-research/batch-ppo
|
agents/tools/in_graph_env.py
|
InGraphEnv._parse_shape
|
def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError()
|
python
|
def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError()
|
[
"def",
"_parse_shape",
"(",
"self",
",",
"space",
")",
":",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Discrete",
")",
":",
"return",
"(",
")",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Box",
")",
":",
"return",
"space",
".",
"shape",
"raise",
"NotImplementedError",
"(",
")"
] |
Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
|
[
"Get",
"a",
"tensor",
"shape",
"from",
"a",
"OpenAI",
"Gym",
"space",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/in_graph_env.py#L134-L150
|
train
|
google-research/batch-ppo
|
agents/tools/in_graph_env.py
|
InGraphEnv._parse_dtype
|
def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError()
|
python
|
def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError()
|
[
"def",
"_parse_dtype",
"(",
"self",
",",
"space",
")",
":",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Discrete",
")",
":",
"return",
"tf",
".",
"int32",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Box",
")",
":",
"return",
"tf",
".",
"float32",
"raise",
"NotImplementedError",
"(",
")"
] |
Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
|
[
"Get",
"a",
"tensor",
"dtype",
"from",
"a",
"OpenAI",
"Gym",
"space",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/in_graph_env.py#L152-L168
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO.begin_episode
|
def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_buffer = self._current_episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
|
python
|
def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_buffer = self._current_episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
|
[
"def",
"begin_episode",
"(",
"self",
",",
"agent_indices",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'begin_episode/'",
")",
":",
"if",
"self",
".",
"_last_state",
"is",
"None",
":",
"reset_state",
"=",
"tf",
".",
"no_op",
"(",
")",
"else",
":",
"reset_state",
"=",
"utility",
".",
"reinit_nested_vars",
"(",
"self",
".",
"_last_state",
",",
"agent_indices",
")",
"reset_buffer",
"=",
"self",
".",
"_current_episodes",
".",
"clear",
"(",
"agent_indices",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"reset_state",
",",
"reset_buffer",
"]",
")",
":",
"return",
"tf",
".",
"constant",
"(",
"''",
")"
] |
Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
|
[
"Reset",
"the",
"recurrent",
"states",
"and",
"stored",
"episode",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L81-L98
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO.perform
|
def perform(self, agent_indices, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
"""
with tf.name_scope('perform/'):
observ = self._observ_filter.transform(observ)
if self._last_state is None:
state = None
else:
state = tools.nested.map(
lambda x: tf.gather(x, agent_indices), self._last_state)
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
output = self._network(
observ[:, None], tf.ones(observ.shape[0]), state)
action = tf.cond(
self._is_training, output.policy.sample, output.policy.mode)
logprob = output.policy.log_prob(action)[:, 0]
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
tf.summary.histogram('mode', output.policy.mode()[:, 0]),
tf.summary.histogram('action', action[:, 0]),
tf.summary.histogram('logprob', logprob)]), str)
# Remember current policy to append to memory in the experience callback.
if self._last_state is None:
assign_state = tf.no_op()
else:
assign_state = utility.assign_nested_vars(
self._last_state, output.state, agent_indices)
remember_last_action = tf.scatter_update(
self._last_action, agent_indices, action[:, 0])
policy_params = tools.nested.filter(
lambda x: isinstance(x, tf.Tensor), output.policy.parameters)
assert policy_params, 'Policy has no parameters to store.'
remember_last_policy = tools.nested.map(
lambda var, val: tf.scatter_update(var, agent_indices, val[:, 0]),
self._last_policy, policy_params, flatten=True)
with tf.control_dependencies((
assign_state, remember_last_action) + remember_last_policy):
return action[:, 0], tf.identity(summary)
|
python
|
def perform(self, agent_indices, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
"""
with tf.name_scope('perform/'):
observ = self._observ_filter.transform(observ)
if self._last_state is None:
state = None
else:
state = tools.nested.map(
lambda x: tf.gather(x, agent_indices), self._last_state)
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
output = self._network(
observ[:, None], tf.ones(observ.shape[0]), state)
action = tf.cond(
self._is_training, output.policy.sample, output.policy.mode)
logprob = output.policy.log_prob(action)[:, 0]
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
tf.summary.histogram('mode', output.policy.mode()[:, 0]),
tf.summary.histogram('action', action[:, 0]),
tf.summary.histogram('logprob', logprob)]), str)
# Remember current policy to append to memory in the experience callback.
if self._last_state is None:
assign_state = tf.no_op()
else:
assign_state = utility.assign_nested_vars(
self._last_state, output.state, agent_indices)
remember_last_action = tf.scatter_update(
self._last_action, agent_indices, action[:, 0])
policy_params = tools.nested.filter(
lambda x: isinstance(x, tf.Tensor), output.policy.parameters)
assert policy_params, 'Policy has no parameters to store.'
remember_last_policy = tools.nested.map(
lambda var, val: tf.scatter_update(var, agent_indices, val[:, 0]),
self._last_policy, policy_params, flatten=True)
with tf.control_dependencies((
assign_state, remember_last_action) + remember_last_policy):
return action[:, 0], tf.identity(summary)
|
[
"def",
"perform",
"(",
"self",
",",
"agent_indices",
",",
"observ",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'perform/'",
")",
":",
"observ",
"=",
"self",
".",
"_observ_filter",
".",
"transform",
"(",
"observ",
")",
"if",
"self",
".",
"_last_state",
"is",
"None",
":",
"state",
"=",
"None",
"else",
":",
"state",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"x",
":",
"tf",
".",
"gather",
"(",
"x",
",",
"agent_indices",
")",
",",
"self",
".",
"_last_state",
")",
"with",
"tf",
".",
"device",
"(",
"'/gpu:0'",
"if",
"self",
".",
"_use_gpu",
"else",
"'/cpu:0'",
")",
":",
"output",
"=",
"self",
".",
"_network",
"(",
"observ",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"ones",
"(",
"observ",
".",
"shape",
"[",
"0",
"]",
")",
",",
"state",
")",
"action",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_is_training",
",",
"output",
".",
"policy",
".",
"sample",
",",
"output",
".",
"policy",
".",
"mode",
")",
"logprob",
"=",
"output",
".",
"policy",
".",
"log_prob",
"(",
"action",
")",
"[",
":",
",",
"0",
"]",
"# pylint: disable=g-long-lambda",
"summary",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_should_log",
",",
"lambda",
":",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'mode'",
",",
"output",
".",
"policy",
".",
"mode",
"(",
")",
"[",
":",
",",
"0",
"]",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'action'",
",",
"action",
"[",
":",
",",
"0",
"]",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'logprob'",
",",
"logprob",
")",
"]",
")",
",",
"str",
")",
"# Remember current policy to append to memory in the experience callback.",
"if",
"self",
".",
"_last_state",
"is",
"None",
":",
"assign_state",
"=",
"tf",
".",
"no_op",
"(",
")",
"else",
":",
"assign_state",
"=",
"utility",
".",
"assign_nested_vars",
"(",
"self",
".",
"_last_state",
",",
"output",
".",
"state",
",",
"agent_indices",
")",
"remember_last_action",
"=",
"tf",
".",
"scatter_update",
"(",
"self",
".",
"_last_action",
",",
"agent_indices",
",",
"action",
"[",
":",
",",
"0",
"]",
")",
"policy_params",
"=",
"tools",
".",
"nested",
".",
"filter",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"tf",
".",
"Tensor",
")",
",",
"output",
".",
"policy",
".",
"parameters",
")",
"assert",
"policy_params",
",",
"'Policy has no parameters to store.'",
"remember_last_policy",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"var",
",",
"val",
":",
"tf",
".",
"scatter_update",
"(",
"var",
",",
"agent_indices",
",",
"val",
"[",
":",
",",
"0",
"]",
")",
",",
"self",
".",
"_last_policy",
",",
"policy_params",
",",
"flatten",
"=",
"True",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"(",
"assign_state",
",",
"remember_last_action",
")",
"+",
"remember_last_policy",
")",
":",
"return",
"action",
"[",
":",
",",
"0",
"]",
",",
"tf",
".",
"identity",
"(",
"summary",
")"
] |
Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
|
[
"Compute",
"batch",
"of",
"actions",
"and",
"a",
"summary",
"for",
"a",
"batch",
"of",
"observation",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L100-L144
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO.experience
|
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(
self._is_training,
# pylint: disable=g-long-lambda
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
|
python
|
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(
self._is_training,
# pylint: disable=g-long-lambda
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
|
[
"def",
"experience",
"(",
"self",
",",
"agent_indices",
",",
"observ",
",",
"action",
",",
"reward",
",",
"unused_done",
",",
"unused_nextob",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'experience/'",
")",
":",
"return",
"tf",
".",
"cond",
"(",
"self",
".",
"_is_training",
",",
"# pylint: disable=g-long-lambda",
"lambda",
":",
"self",
".",
"_define_experience",
"(",
"agent_indices",
",",
"observ",
",",
"action",
",",
"reward",
")",
",",
"str",
")"
] |
Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
|
[
"Process",
"the",
"transition",
"tuple",
"of",
"the",
"current",
"step",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L146-L170
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO.end_episode
|
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
|
python
|
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
|
[
"def",
"end_episode",
"(",
"self",
",",
"agent_indices",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'end_episode/'",
")",
":",
"return",
"tf",
".",
"cond",
"(",
"self",
".",
"_is_training",
",",
"lambda",
":",
"self",
".",
"_define_end_episode",
"(",
"agent_indices",
")",
",",
"str",
")"
] |
Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
|
[
"Add",
"episodes",
"to",
"the",
"memory",
"and",
"perform",
"update",
"steps",
"if",
"memory",
"is",
"full",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L199-L216
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._initialize_policy
|
def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
network = functools.partial(
self._config.network, self._config, self._batch_env.action_space)
self._network = tf.make_template('network', network)
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
if output.policy.event_shape != self._batch_env.action.shape[1:]:
message = 'Policy event shape {} does not match action shape {}.'
message = message.format(
output.policy.event_shape, self._batch_env.action.shape[1:])
raise ValueError(message)
self._policy_type = type(output.policy)
is_tensor = lambda x: isinstance(x, tf.Tensor)
policy_params = tools.nested.filter(is_tensor, output.policy.parameters)
set_batch_dim = lambda x: utility.set_dimension(x, 0, len(self._batch_env))
tools.nested.map(set_batch_dim, policy_params)
if output.state is not None:
tools.nested.map(set_batch_dim, output.state)
return policy_params, output.state
|
python
|
def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
network = functools.partial(
self._config.network, self._config, self._batch_env.action_space)
self._network = tf.make_template('network', network)
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
if output.policy.event_shape != self._batch_env.action.shape[1:]:
message = 'Policy event shape {} does not match action shape {}.'
message = message.format(
output.policy.event_shape, self._batch_env.action.shape[1:])
raise ValueError(message)
self._policy_type = type(output.policy)
is_tensor = lambda x: isinstance(x, tf.Tensor)
policy_params = tools.nested.filter(is_tensor, output.policy.parameters)
set_batch_dim = lambda x: utility.set_dimension(x, 0, len(self._batch_env))
tools.nested.map(set_batch_dim, policy_params)
if output.state is not None:
tools.nested.map(set_batch_dim, output.state)
return policy_params, output.state
|
[
"def",
"_initialize_policy",
"(",
"self",
")",
":",
"with",
"tf",
".",
"device",
"(",
"'/gpu:0'",
"if",
"self",
".",
"_use_gpu",
"else",
"'/cpu:0'",
")",
":",
"network",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"_config",
".",
"network",
",",
"self",
".",
"_config",
",",
"self",
".",
"_batch_env",
".",
"action_space",
")",
"self",
".",
"_network",
"=",
"tf",
".",
"make_template",
"(",
"'network'",
",",
"network",
")",
"output",
"=",
"self",
".",
"_network",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_batch_env",
".",
"observ",
")",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"ones",
"(",
"len",
"(",
"self",
".",
"_batch_env",
")",
")",
")",
"if",
"output",
".",
"policy",
".",
"event_shape",
"!=",
"self",
".",
"_batch_env",
".",
"action",
".",
"shape",
"[",
"1",
":",
"]",
":",
"message",
"=",
"'Policy event shape {} does not match action shape {}.'",
"message",
"=",
"message",
".",
"format",
"(",
"output",
".",
"policy",
".",
"event_shape",
",",
"self",
".",
"_batch_env",
".",
"action",
".",
"shape",
"[",
"1",
":",
"]",
")",
"raise",
"ValueError",
"(",
"message",
")",
"self",
".",
"_policy_type",
"=",
"type",
"(",
"output",
".",
"policy",
")",
"is_tensor",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"tf",
".",
"Tensor",
")",
"policy_params",
"=",
"tools",
".",
"nested",
".",
"filter",
"(",
"is_tensor",
",",
"output",
".",
"policy",
".",
"parameters",
")",
"set_batch_dim",
"=",
"lambda",
"x",
":",
"utility",
".",
"set_dimension",
"(",
"x",
",",
"0",
",",
"len",
"(",
"self",
".",
"_batch_env",
")",
")",
"tools",
".",
"nested",
".",
"map",
"(",
"set_batch_dim",
",",
"policy_params",
")",
"if",
"output",
".",
"state",
"is",
"not",
"None",
":",
"tools",
".",
"nested",
".",
"map",
"(",
"set_batch_dim",
",",
"output",
".",
"state",
")",
"return",
"policy_params",
",",
"output",
".",
"state"
] |
Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
|
[
"Initialize",
"the",
"policy",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L218-L250
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._initialize_memory
|
def _initialize_memory(self, policy_params):
"""Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in parallel. Finished episodes
are copied into the next free slot of the second memory. The memory index
points to the next free slot.
"""
# We store observation, action, policy parameters, and reward.
template = (
self._batch_env.observ[0],
self._batch_env.action[0],
tools.nested.map(lambda x: x[0, 0], policy_params),
self._batch_env.reward[0])
with tf.variable_scope('ppo_temporary'):
self._current_episodes = parts.EpisodeMemory(
template, len(self._batch_env), self._config.max_length, 'episodes')
self._finished_episodes = parts.EpisodeMemory(
template, self._config.update_every, self._config.max_length, 'memory')
self._num_finished_episodes = tf.Variable(0, False)
|
python
|
def _initialize_memory(self, policy_params):
"""Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in parallel. Finished episodes
are copied into the next free slot of the second memory. The memory index
points to the next free slot.
"""
# We store observation, action, policy parameters, and reward.
template = (
self._batch_env.observ[0],
self._batch_env.action[0],
tools.nested.map(lambda x: x[0, 0], policy_params),
self._batch_env.reward[0])
with tf.variable_scope('ppo_temporary'):
self._current_episodes = parts.EpisodeMemory(
template, len(self._batch_env), self._config.max_length, 'episodes')
self._finished_episodes = parts.EpisodeMemory(
template, self._config.update_every, self._config.max_length, 'memory')
self._num_finished_episodes = tf.Variable(0, False)
|
[
"def",
"_initialize_memory",
"(",
"self",
",",
"policy_params",
")",
":",
"# We store observation, action, policy parameters, and reward.",
"template",
"=",
"(",
"self",
".",
"_batch_env",
".",
"observ",
"[",
"0",
"]",
",",
"self",
".",
"_batch_env",
".",
"action",
"[",
"0",
"]",
",",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
",",
"0",
"]",
",",
"policy_params",
")",
",",
"self",
".",
"_batch_env",
".",
"reward",
"[",
"0",
"]",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"'ppo_temporary'",
")",
":",
"self",
".",
"_current_episodes",
"=",
"parts",
".",
"EpisodeMemory",
"(",
"template",
",",
"len",
"(",
"self",
".",
"_batch_env",
")",
",",
"self",
".",
"_config",
".",
"max_length",
",",
"'episodes'",
")",
"self",
".",
"_finished_episodes",
"=",
"parts",
".",
"EpisodeMemory",
"(",
"template",
",",
"self",
".",
"_config",
".",
"update_every",
",",
"self",
".",
"_config",
".",
"max_length",
",",
"'memory'",
")",
"self",
".",
"_num_finished_episodes",
"=",
"tf",
".",
"Variable",
"(",
"0",
",",
"False",
")"
] |
Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in parallel. Finished episodes
are copied into the next free slot of the second memory. The memory index
points to the next free slot.
|
[
"Initialize",
"temporary",
"and",
"permanent",
"memory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L252-L275
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._training
|
def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
with tf.name_scope('training'):
assert_full = tf.assert_equal(
self._num_finished_episodes, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._finished_episodes.data()
(observ, action, old_policy_params, reward), length = data
# We set padding frames of the parameters to ones to prevent Gaussians
# with zero variance. This would result in an infinite KL divergence,
# which, even if masked out, would result in NaN gradients.
old_policy_params = tools.nested.map(
lambda param: self._mask(param, length, 1), old_policy_params)
with tf.control_dependencies([tf.assert_greater(length, 0)]):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(
observ, action, old_policy_params, reward, length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(
observ, old_policy_params, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(
self._finished_episodes.clear(),
self._num_finished_episodes.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(
tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([
update_summary, penalty_summary, weight_summary])
|
python
|
def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
with tf.name_scope('training'):
assert_full = tf.assert_equal(
self._num_finished_episodes, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._finished_episodes.data()
(observ, action, old_policy_params, reward), length = data
# We set padding frames of the parameters to ones to prevent Gaussians
# with zero variance. This would result in an infinite KL divergence,
# which, even if masked out, would result in NaN gradients.
old_policy_params = tools.nested.map(
lambda param: self._mask(param, length, 1), old_policy_params)
with tf.control_dependencies([tf.assert_greater(length, 0)]):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(
observ, action, old_policy_params, reward, length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(
observ, old_policy_params, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(
self._finished_episodes.clear(),
self._num_finished_episodes.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(
tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([
update_summary, penalty_summary, weight_summary])
|
[
"def",
"_training",
"(",
"self",
")",
":",
"with",
"tf",
".",
"device",
"(",
"'/gpu:0'",
"if",
"self",
".",
"_use_gpu",
"else",
"'/cpu:0'",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'training'",
")",
":",
"assert_full",
"=",
"tf",
".",
"assert_equal",
"(",
"self",
".",
"_num_finished_episodes",
",",
"self",
".",
"_config",
".",
"update_every",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_full",
"]",
")",
":",
"data",
"=",
"self",
".",
"_finished_episodes",
".",
"data",
"(",
")",
"(",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
")",
",",
"length",
"=",
"data",
"# We set padding frames of the parameters to ones to prevent Gaussians",
"# with zero variance. This would result in an infinite KL divergence,",
"# which, even if masked out, would result in NaN gradients.",
"old_policy_params",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"param",
":",
"self",
".",
"_mask",
"(",
"param",
",",
"length",
",",
"1",
")",
",",
"old_policy_params",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"assert_greater",
"(",
"length",
",",
"0",
")",
"]",
")",
":",
"length",
"=",
"tf",
".",
"identity",
"(",
"length",
")",
"observ",
"=",
"self",
".",
"_observ_filter",
".",
"transform",
"(",
"observ",
")",
"reward",
"=",
"self",
".",
"_reward_filter",
".",
"transform",
"(",
"reward",
")",
"update_summary",
"=",
"self",
".",
"_perform_update_steps",
"(",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"length",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"update_summary",
"]",
")",
":",
"penalty_summary",
"=",
"self",
".",
"_adjust_penalty",
"(",
"observ",
",",
"old_policy_params",
",",
"length",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"penalty_summary",
"]",
")",
":",
"clear_memory",
"=",
"tf",
".",
"group",
"(",
"self",
".",
"_finished_episodes",
".",
"clear",
"(",
")",
",",
"self",
".",
"_num_finished_episodes",
".",
"assign",
"(",
"0",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"clear_memory",
"]",
")",
":",
"weight_summary",
"=",
"utility",
".",
"variable_summaries",
"(",
"tf",
".",
"trainable_variables",
"(",
")",
",",
"self",
".",
"_config",
".",
"weight_summaries",
")",
"return",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"update_summary",
",",
"penalty_summary",
",",
"weight_summary",
"]",
")"
] |
Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
|
[
"Perform",
"multiple",
"training",
"iterations",
"of",
"both",
"policy",
"and",
"value",
"baseline",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L294-L332
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._perform_update_steps
|
def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
return_ = utility.discounted_return(
reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_advantage(
reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)],
'return and value: ')
advantage = tf.Print(
advantage, [tf.reduce_mean(advantage)],
'normalized advantage: ')
episodes = (observ, action, old_policy_params, reward, advantage)
value_loss, policy_loss, summary = parts.iterate_sequences(
self._update_step, [0., 0., ''], episodes, length,
self._config.chunk_length,
self._config.batch_size,
self._config.update_epochs,
padding_value=1)
print_losses = tf.group(
tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
with tf.control_dependencies([value_loss, policy_loss, print_losses]):
return summary[self._config.update_epochs // 2]
|
python
|
def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
return_ = utility.discounted_return(
reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_advantage(
reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)],
'return and value: ')
advantage = tf.Print(
advantage, [tf.reduce_mean(advantage)],
'normalized advantage: ')
episodes = (observ, action, old_policy_params, reward, advantage)
value_loss, policy_loss, summary = parts.iterate_sequences(
self._update_step, [0., 0., ''], episodes, length,
self._config.chunk_length,
self._config.batch_size,
self._config.update_epochs,
padding_value=1)
print_losses = tf.group(
tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
with tf.control_dependencies([value_loss, policy_loss, print_losses]):
return summary[self._config.update_epochs // 2]
|
[
"def",
"_perform_update_steps",
"(",
"self",
",",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"length",
")",
":",
"return_",
"=",
"utility",
".",
"discounted_return",
"(",
"reward",
",",
"length",
",",
"self",
".",
"_config",
".",
"discount",
")",
"value",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
".",
"value",
"if",
"self",
".",
"_config",
".",
"gae_lambda",
":",
"advantage",
"=",
"utility",
".",
"lambda_advantage",
"(",
"reward",
",",
"value",
",",
"length",
",",
"self",
".",
"_config",
".",
"discount",
",",
"self",
".",
"_config",
".",
"gae_lambda",
")",
"else",
":",
"advantage",
"=",
"return_",
"-",
"value",
"mean",
",",
"variance",
"=",
"tf",
".",
"nn",
".",
"moments",
"(",
"advantage",
",",
"axes",
"=",
"[",
"0",
",",
"1",
"]",
",",
"keep_dims",
"=",
"True",
")",
"advantage",
"=",
"(",
"advantage",
"-",
"mean",
")",
"/",
"(",
"tf",
".",
"sqrt",
"(",
"variance",
")",
"+",
"1e-8",
")",
"advantage",
"=",
"tf",
".",
"Print",
"(",
"advantage",
",",
"[",
"tf",
".",
"reduce_mean",
"(",
"return_",
")",
",",
"tf",
".",
"reduce_mean",
"(",
"value",
")",
"]",
",",
"'return and value: '",
")",
"advantage",
"=",
"tf",
".",
"Print",
"(",
"advantage",
",",
"[",
"tf",
".",
"reduce_mean",
"(",
"advantage",
")",
"]",
",",
"'normalized advantage: '",
")",
"episodes",
"=",
"(",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"advantage",
")",
"value_loss",
",",
"policy_loss",
",",
"summary",
"=",
"parts",
".",
"iterate_sequences",
"(",
"self",
".",
"_update_step",
",",
"[",
"0.",
",",
"0.",
",",
"''",
"]",
",",
"episodes",
",",
"length",
",",
"self",
".",
"_config",
".",
"chunk_length",
",",
"self",
".",
"_config",
".",
"batch_size",
",",
"self",
".",
"_config",
".",
"update_epochs",
",",
"padding_value",
"=",
"1",
")",
"print_losses",
"=",
"tf",
".",
"group",
"(",
"tf",
".",
"Print",
"(",
"0",
",",
"[",
"tf",
".",
"reduce_mean",
"(",
"value_loss",
")",
"]",
",",
"'value loss: '",
")",
",",
"tf",
".",
"Print",
"(",
"0",
",",
"[",
"tf",
".",
"reduce_mean",
"(",
"policy_loss",
")",
"]",
",",
"'policy loss: '",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"value_loss",
",",
"policy_loss",
",",
"print_losses",
"]",
")",
":",
"return",
"summary",
"[",
"self",
".",
"_config",
".",
"update_epochs",
"//",
"2",
"]"
] |
Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
|
[
"Perform",
"multiple",
"update",
"steps",
"of",
"value",
"function",
"and",
"policy",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L334-L380
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._update_step
|
def _update_step(self, sequence):
"""Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of episodes or chunks of episodes.
Returns:
Tuple of value loss, policy loss, and summary tensor.
"""
observ, action, old_policy_params, reward, advantage = sequence['sequence']
length = sequence['length']
old_policy = self._policy_type(**old_policy_params)
value_loss, value_summary = self._value_loss(observ, reward, length)
network = self._network(observ, length)
policy_loss, policy_summary = self._policy_loss(
old_policy, network.policy, action, advantage, length)
network_loss = network.get('loss', 0.0)
loss = policy_loss + value_loss + tf.reduce_mean(network_loss)
gradients, variables = (
zip(*self._optimizer.compute_gradients(loss)))
optimize = self._optimizer.apply_gradients(
zip(gradients, variables))
summary = tf.summary.merge([
value_summary, policy_summary,
tf.summary.histogram('network_loss', network_loss),
tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)),
tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables))])
with tf.control_dependencies([optimize]):
return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
|
python
|
def _update_step(self, sequence):
"""Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of episodes or chunks of episodes.
Returns:
Tuple of value loss, policy loss, and summary tensor.
"""
observ, action, old_policy_params, reward, advantage = sequence['sequence']
length = sequence['length']
old_policy = self._policy_type(**old_policy_params)
value_loss, value_summary = self._value_loss(observ, reward, length)
network = self._network(observ, length)
policy_loss, policy_summary = self._policy_loss(
old_policy, network.policy, action, advantage, length)
network_loss = network.get('loss', 0.0)
loss = policy_loss + value_loss + tf.reduce_mean(network_loss)
gradients, variables = (
zip(*self._optimizer.compute_gradients(loss)))
optimize = self._optimizer.apply_gradients(
zip(gradients, variables))
summary = tf.summary.merge([
value_summary, policy_summary,
tf.summary.histogram('network_loss', network_loss),
tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)),
tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables))])
with tf.control_dependencies([optimize]):
return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
|
[
"def",
"_update_step",
"(",
"self",
",",
"sequence",
")",
":",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"advantage",
"=",
"sequence",
"[",
"'sequence'",
"]",
"length",
"=",
"sequence",
"[",
"'length'",
"]",
"old_policy",
"=",
"self",
".",
"_policy_type",
"(",
"*",
"*",
"old_policy_params",
")",
"value_loss",
",",
"value_summary",
"=",
"self",
".",
"_value_loss",
"(",
"observ",
",",
"reward",
",",
"length",
")",
"network",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
"policy_loss",
",",
"policy_summary",
"=",
"self",
".",
"_policy_loss",
"(",
"old_policy",
",",
"network",
".",
"policy",
",",
"action",
",",
"advantage",
",",
"length",
")",
"network_loss",
"=",
"network",
".",
"get",
"(",
"'loss'",
",",
"0.0",
")",
"loss",
"=",
"policy_loss",
"+",
"value_loss",
"+",
"tf",
".",
"reduce_mean",
"(",
"network_loss",
")",
"gradients",
",",
"variables",
"=",
"(",
"zip",
"(",
"*",
"self",
".",
"_optimizer",
".",
"compute_gradients",
"(",
"loss",
")",
")",
")",
"optimize",
"=",
"self",
".",
"_optimizer",
".",
"apply_gradients",
"(",
"zip",
"(",
"gradients",
",",
"variables",
")",
")",
"summary",
"=",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"value_summary",
",",
"policy_summary",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'network_loss'",
",",
"network_loss",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_network_loss'",
",",
"tf",
".",
"reduce_mean",
"(",
"network_loss",
")",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'gradient_norm'",
",",
"tf",
".",
"global_norm",
"(",
"gradients",
")",
")",
",",
"utility",
".",
"gradient_summaries",
"(",
"zip",
"(",
"gradients",
",",
"variables",
")",
")",
"]",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"optimize",
"]",
")",
":",
"return",
"[",
"tf",
".",
"identity",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"value_loss",
",",
"policy_loss",
",",
"summary",
")",
"]"
] |
Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of episodes or chunks of episodes.
Returns:
Tuple of value loss, policy loss, and summary tensor.
|
[
"Compute",
"the",
"current",
"combined",
"loss",
"and",
"perform",
"a",
"gradient",
"update",
"step",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L382-L415
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._value_loss
|
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary
|
python
|
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary
|
[
"def",
"_value_loss",
"(",
"self",
",",
"observ",
",",
"reward",
",",
"length",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'value_loss'",
")",
":",
"value",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
".",
"value",
"return_",
"=",
"utility",
".",
"discounted_return",
"(",
"reward",
",",
"length",
",",
"self",
".",
"_config",
".",
"discount",
")",
"advantage",
"=",
"return_",
"-",
"value",
"value_loss",
"=",
"0.5",
"*",
"self",
".",
"_mask",
"(",
"advantage",
"**",
"2",
",",
"length",
")",
"summary",
"=",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'value_loss'",
",",
"value_loss",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_value_loss'",
",",
"tf",
".",
"reduce_mean",
"(",
"value_loss",
")",
")",
"]",
")",
"value_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"value_loss",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"value_loss",
",",
"'value_loss'",
")",
",",
"summary"
] |
Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
|
[
"Compute",
"the",
"loss",
"function",
"for",
"the",
"value",
"baseline",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L417-L441
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._policy_loss
|
def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
kl = tf.contrib.distributions.kl_divergence(old_policy, policy)
# Infinite values in the KL, even for padding frames that we mask out,
# cause NaN gradients since TensorFlow computes gradients with respect to
# the whole input tensor.
kl = tf.check_numerics(kl, 'kl')
kl = tf.reduce_mean(self._mask(kl, length), 1)
policy_gradient = tf.exp(
policy.log_prob(action) - old_policy.log_prob(action))
surrogate_loss = -tf.reduce_mean(self._mask(
policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(
tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(
cutoff_count > 0,
lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (
self._config.kl_cutoff_coef *
tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
entropy = tf.reduce_mean(policy.entropy(), axis=1)
if self._config.entropy_regularization:
policy_loss -= self._config.entropy_regularization * entropy
summary = tf.summary.merge([
tf.summary.histogram('entropy', entropy),
tf.summary.histogram('kl', kl),
tf.summary.histogram('surrogate_loss', surrogate_loss),
tf.summary.histogram('kl_penalty', kl_penalty),
tf.summary.histogram('kl_cutoff', kl_cutoff),
tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
tf.summary.histogram('policy_loss', policy_loss),
tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.check_numerics(policy_loss, 'policy_loss'), summary
|
python
|
def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
kl = tf.contrib.distributions.kl_divergence(old_policy, policy)
# Infinite values in the KL, even for padding frames that we mask out,
# cause NaN gradients since TensorFlow computes gradients with respect to
# the whole input tensor.
kl = tf.check_numerics(kl, 'kl')
kl = tf.reduce_mean(self._mask(kl, length), 1)
policy_gradient = tf.exp(
policy.log_prob(action) - old_policy.log_prob(action))
surrogate_loss = -tf.reduce_mean(self._mask(
policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(
tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(
cutoff_count > 0,
lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (
self._config.kl_cutoff_coef *
tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
entropy = tf.reduce_mean(policy.entropy(), axis=1)
if self._config.entropy_regularization:
policy_loss -= self._config.entropy_regularization * entropy
summary = tf.summary.merge([
tf.summary.histogram('entropy', entropy),
tf.summary.histogram('kl', kl),
tf.summary.histogram('surrogate_loss', surrogate_loss),
tf.summary.histogram('kl_penalty', kl_penalty),
tf.summary.histogram('kl_cutoff', kl_cutoff),
tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
tf.summary.histogram('policy_loss', policy_loss),
tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.check_numerics(policy_loss, 'policy_loss'), summary
|
[
"def",
"_policy_loss",
"(",
"self",
",",
"old_policy",
",",
"policy",
",",
"action",
",",
"advantage",
",",
"length",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'policy_loss'",
")",
":",
"kl",
"=",
"tf",
".",
"contrib",
".",
"distributions",
".",
"kl_divergence",
"(",
"old_policy",
",",
"policy",
")",
"# Infinite values in the KL, even for padding frames that we mask out,",
"# cause NaN gradients since TensorFlow computes gradients with respect to",
"# the whole input tensor.",
"kl",
"=",
"tf",
".",
"check_numerics",
"(",
"kl",
",",
"'kl'",
")",
"kl",
"=",
"tf",
".",
"reduce_mean",
"(",
"self",
".",
"_mask",
"(",
"kl",
",",
"length",
")",
",",
"1",
")",
"policy_gradient",
"=",
"tf",
".",
"exp",
"(",
"policy",
".",
"log_prob",
"(",
"action",
")",
"-",
"old_policy",
".",
"log_prob",
"(",
"action",
")",
")",
"surrogate_loss",
"=",
"-",
"tf",
".",
"reduce_mean",
"(",
"self",
".",
"_mask",
"(",
"policy_gradient",
"*",
"tf",
".",
"stop_gradient",
"(",
"advantage",
")",
",",
"length",
")",
",",
"1",
")",
"surrogate_loss",
"=",
"tf",
".",
"check_numerics",
"(",
"surrogate_loss",
",",
"'surrogate_loss'",
")",
"kl_penalty",
"=",
"self",
".",
"_penalty",
"*",
"kl",
"cutoff_threshold",
"=",
"self",
".",
"_config",
".",
"kl_target",
"*",
"self",
".",
"_config",
".",
"kl_cutoff_factor",
"cutoff_count",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"kl",
">",
"cutoff_threshold",
",",
"tf",
".",
"int32",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"cond",
"(",
"cutoff_count",
">",
"0",
",",
"lambda",
":",
"tf",
".",
"Print",
"(",
"0",
",",
"[",
"cutoff_count",
"]",
",",
"'kl cutoff! '",
")",
",",
"int",
")",
"]",
")",
":",
"kl_cutoff",
"=",
"(",
"self",
".",
"_config",
".",
"kl_cutoff_coef",
"*",
"tf",
".",
"cast",
"(",
"kl",
">",
"cutoff_threshold",
",",
"tf",
".",
"float32",
")",
"*",
"(",
"kl",
"-",
"cutoff_threshold",
")",
"**",
"2",
")",
"policy_loss",
"=",
"surrogate_loss",
"+",
"kl_penalty",
"+",
"kl_cutoff",
"entropy",
"=",
"tf",
".",
"reduce_mean",
"(",
"policy",
".",
"entropy",
"(",
")",
",",
"axis",
"=",
"1",
")",
"if",
"self",
".",
"_config",
".",
"entropy_regularization",
":",
"policy_loss",
"-=",
"self",
".",
"_config",
".",
"entropy_regularization",
"*",
"entropy",
"summary",
"=",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'entropy'",
",",
"entropy",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'kl'",
",",
"kl",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'surrogate_loss'",
",",
"surrogate_loss",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'kl_penalty'",
",",
"kl_penalty",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'kl_cutoff'",
",",
"kl_cutoff",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'kl_penalty_combined'",
",",
"kl_penalty",
"+",
"kl_cutoff",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'policy_loss'",
",",
"policy_loss",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_surr_loss'",
",",
"tf",
".",
"reduce_mean",
"(",
"surrogate_loss",
")",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_kl_penalty'",
",",
"tf",
".",
"reduce_mean",
"(",
"kl_penalty",
")",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_policy_loss'",
",",
"tf",
".",
"reduce_mean",
"(",
"policy_loss",
")",
")",
"]",
")",
"policy_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"policy_loss",
",",
"0",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"policy_loss",
",",
"'policy_loss'",
")",
",",
"summary"
] |
Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
|
[
"Compute",
"the",
"policy",
"loss",
"composed",
"of",
"multiple",
"components",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L443-L503
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._adjust_penalty
|
def _adjust_penalty(self, observ, old_policy_params, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_policy_params: Parameters of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
old_policy = self._policy_type(**old_policy_params)
with tf.name_scope('adjust_penalty'):
network = self._network(observ, length)
print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')
with tf.control_dependencies([print_penalty]):
kl_change = tf.reduce_mean(self._mask(
tf.contrib.distributions.kl_divergence(old_policy, network.policy),
length))
kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')
maybe_increase = tf.cond(
kl_change > 1.3 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty * 1.5), [0], 'increase penalty '),
float)
maybe_decrease = tf.cond(
kl_change < 0.7 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty / 1.5), [0], 'decrease penalty '),
float)
with tf.control_dependencies([maybe_increase, maybe_decrease]):
return tf.summary.merge([
tf.summary.scalar('kl_change', kl_change),
tf.summary.scalar('penalty', self._penalty)])
|
python
|
def _adjust_penalty(self, observ, old_policy_params, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_policy_params: Parameters of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
old_policy = self._policy_type(**old_policy_params)
with tf.name_scope('adjust_penalty'):
network = self._network(observ, length)
print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')
with tf.control_dependencies([print_penalty]):
kl_change = tf.reduce_mean(self._mask(
tf.contrib.distributions.kl_divergence(old_policy, network.policy),
length))
kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')
maybe_increase = tf.cond(
kl_change > 1.3 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty * 1.5), [0], 'increase penalty '),
float)
maybe_decrease = tf.cond(
kl_change < 0.7 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty / 1.5), [0], 'decrease penalty '),
float)
with tf.control_dependencies([maybe_increase, maybe_decrease]):
return tf.summary.merge([
tf.summary.scalar('kl_change', kl_change),
tf.summary.scalar('penalty', self._penalty)])
|
[
"def",
"_adjust_penalty",
"(",
"self",
",",
"observ",
",",
"old_policy_params",
",",
"length",
")",
":",
"old_policy",
"=",
"self",
".",
"_policy_type",
"(",
"*",
"*",
"old_policy_params",
")",
"with",
"tf",
".",
"name_scope",
"(",
"'adjust_penalty'",
")",
":",
"network",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
"print_penalty",
"=",
"tf",
".",
"Print",
"(",
"0",
",",
"[",
"self",
".",
"_penalty",
"]",
",",
"'current penalty: '",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"print_penalty",
"]",
")",
":",
"kl_change",
"=",
"tf",
".",
"reduce_mean",
"(",
"self",
".",
"_mask",
"(",
"tf",
".",
"contrib",
".",
"distributions",
".",
"kl_divergence",
"(",
"old_policy",
",",
"network",
".",
"policy",
")",
",",
"length",
")",
")",
"kl_change",
"=",
"tf",
".",
"Print",
"(",
"kl_change",
",",
"[",
"kl_change",
"]",
",",
"'kl change: '",
")",
"maybe_increase",
"=",
"tf",
".",
"cond",
"(",
"kl_change",
">",
"1.3",
"*",
"self",
".",
"_config",
".",
"kl_target",
",",
"# pylint: disable=g-long-lambda",
"lambda",
":",
"tf",
".",
"Print",
"(",
"self",
".",
"_penalty",
".",
"assign",
"(",
"self",
".",
"_penalty",
"*",
"1.5",
")",
",",
"[",
"0",
"]",
",",
"'increase penalty '",
")",
",",
"float",
")",
"maybe_decrease",
"=",
"tf",
".",
"cond",
"(",
"kl_change",
"<",
"0.7",
"*",
"self",
".",
"_config",
".",
"kl_target",
",",
"# pylint: disable=g-long-lambda",
"lambda",
":",
"tf",
".",
"Print",
"(",
"self",
".",
"_penalty",
".",
"assign",
"(",
"self",
".",
"_penalty",
"/",
"1.5",
")",
",",
"[",
"0",
"]",
",",
"'decrease penalty '",
")",
",",
"float",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"maybe_increase",
",",
"maybe_decrease",
"]",
")",
":",
"return",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'kl_change'",
",",
"kl_change",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'penalty'",
",",
"self",
".",
"_penalty",
")",
"]",
")"
] |
Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_policy_params: Parameters of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
|
[
"Adjust",
"the",
"KL",
"policy",
"between",
"the",
"behavioral",
"and",
"current",
"policy",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L505-L544
|
train
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._mask
|
def _mask(self, tensor, length, padding_value=0):
"""Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences.
"""
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = range_[None, :] < length[:, None]
if tensor.shape.ndims > 2:
for _ in range(tensor.shape.ndims - 2):
mask = mask[..., None]
mask = tf.tile(mask, [1, 1] + tensor.shape[2:].as_list())
masked = tf.where(mask, tensor, padding_value * tf.ones_like(tensor))
return tf.check_numerics(masked, 'masked')
|
python
|
def _mask(self, tensor, length, padding_value=0):
"""Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences.
"""
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = range_[None, :] < length[:, None]
if tensor.shape.ndims > 2:
for _ in range(tensor.shape.ndims - 2):
mask = mask[..., None]
mask = tf.tile(mask, [1, 1] + tensor.shape[2:].as_list())
masked = tf.where(mask, tensor, padding_value * tf.ones_like(tensor))
return tf.check_numerics(masked, 'masked')
|
[
"def",
"_mask",
"(",
"self",
",",
"tensor",
",",
"length",
",",
"padding_value",
"=",
"0",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'mask'",
")",
":",
"range_",
"=",
"tf",
".",
"range",
"(",
"tensor",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"range_",
"[",
"None",
",",
":",
"]",
"<",
"length",
"[",
":",
",",
"None",
"]",
"if",
"tensor",
".",
"shape",
".",
"ndims",
">",
"2",
":",
"for",
"_",
"in",
"range",
"(",
"tensor",
".",
"shape",
".",
"ndims",
"-",
"2",
")",
":",
"mask",
"=",
"mask",
"[",
"...",
",",
"None",
"]",
"mask",
"=",
"tf",
".",
"tile",
"(",
"mask",
",",
"[",
"1",
",",
"1",
"]",
"+",
"tensor",
".",
"shape",
"[",
"2",
":",
"]",
".",
"as_list",
"(",
")",
")",
"masked",
"=",
"tf",
".",
"where",
"(",
"mask",
",",
"tensor",
",",
"padding_value",
"*",
"tf",
".",
"ones_like",
"(",
"tensor",
")",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"masked",
",",
"'masked'",
")"
] |
Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences.
|
[
"Set",
"padding",
"elements",
"of",
"a",
"batch",
"of",
"sequences",
"to",
"a",
"constant",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L546-L568
|
train
|
celery/cell
|
cell/workflow/entities.py
|
Server.main
|
def main(self, *args, **kwargs):
"""Implement the actor main loop by waiting forever for messages."""
self.start(*args, **kwargs)
try:
while 1:
body, message = yield self.receive()
handler = self.get_handler(message)
handler(body, message)
finally:
self.stop(*args, **kwargs)
|
python
|
def main(self, *args, **kwargs):
"""Implement the actor main loop by waiting forever for messages."""
self.start(*args, **kwargs)
try:
while 1:
body, message = yield self.receive()
handler = self.get_handler(message)
handler(body, message)
finally:
self.stop(*args, **kwargs)
|
[
"def",
"main",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"start",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"while",
"1",
":",
"body",
",",
"message",
"=",
"yield",
"self",
".",
"receive",
"(",
")",
"handler",
"=",
"self",
".",
"get_handler",
"(",
"message",
")",
"handler",
"(",
"body",
",",
"message",
")",
"finally",
":",
"self",
".",
"stop",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Implement the actor main loop by waiting forever for messages.
|
[
"Implement",
"the",
"actor",
"main",
"loop",
"by",
"waiting",
"forever",
"for",
"messages",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/workflow/entities.py#L73-L82
|
train
|
celery/cell
|
cell/actors.py
|
Actor.send
|
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
|
python
|
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
|
[
"def",
"send",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"to",
"=",
"None",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"to",
"is",
"None",
":",
"to",
"=",
"self",
".",
"routing_key",
"r",
"=",
"self",
".",
"call_or_cast",
"(",
"method",
",",
"args",
",",
"routing_key",
"=",
"to",
",",
"nowait",
"=",
"nowait",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"nowait",
":",
"return",
"r",
".",
"get",
"(",
")"
] |
Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
|
[
"Call",
"method",
"on",
"agent",
"listening",
"to",
"routing_key",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L259-L275
|
train
|
celery/cell
|
cell/actors.py
|
Actor.throw
|
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
|
python
|
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
|
[
"def",
"throw",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"self",
".",
"call_or_cast",
"(",
"method",
",",
"args",
",",
"type",
"=",
"ACTOR_TYPE",
".",
"RR",
",",
"nowait",
"=",
"nowait",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"nowait",
":",
"return",
"r"
] |
Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
|
[
"Call",
"method",
"on",
"one",
"of",
"the",
"agents",
"in",
"round",
"robin",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L277-L290
|
train
|
celery/cell
|
cell/actors.py
|
Actor.scatter
|
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
|
python
|
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
|
[
"def",
"scatter",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"timeout",
"=",
"timeout",
"if",
"timeout",
"is",
"not",
"None",
"else",
"self",
".",
"default_timeout",
"r",
"=",
"self",
".",
"call_or_cast",
"(",
"method",
",",
"args",
",",
"type",
"=",
"ACTOR_TYPE",
".",
"SCATTER",
",",
"nowait",
"=",
"nowait",
",",
"timeout",
"=",
"timeout",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"nowait",
":",
"return",
"r",
".",
"gather",
"(",
"timeout",
"=",
"timeout",
",",
"*",
"*",
"kwargs",
")"
] |
Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
|
[
"Broadcast",
"method",
"to",
"all",
"agents",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L292-L320
|
train
|
celery/cell
|
cell/actors.py
|
Actor.call_or_cast
|
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
|
python
|
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
|
[
"def",
"call_or_cast",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"nowait",
"and",
"self",
".",
"cast",
"or",
"self",
".",
"call",
")",
"(",
"method",
",",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
|
[
"Apply",
"remote",
"method",
"asynchronously",
"or",
"synchronously",
"depending",
"on",
"the",
"value",
"of",
"nowait",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L322-L347
|
train
|
celery/cell
|
cell/actors.py
|
Actor.cast
|
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
|
python
|
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
|
[
"def",
"cast",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"declare",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"retry_policy",
"=",
"None",
",",
"type",
"=",
"None",
",",
"exchange",
"=",
"None",
",",
"*",
"*",
"props",
")",
":",
"retry",
"=",
"self",
".",
"retry",
"if",
"retry",
"is",
"None",
"else",
"retry",
"body",
"=",
"{",
"'class'",
":",
"self",
".",
"name",
",",
"'method'",
":",
"method",
",",
"'args'",
":",
"args",
"}",
"_retry_policy",
"=",
"self",
".",
"retry_policy",
"if",
"retry_policy",
":",
"# merge default and custom policies.",
"_retry_policy",
"=",
"dict",
"(",
"_retry_policy",
",",
"*",
"*",
"retry_policy",
")",
"if",
"type",
"and",
"type",
"not",
"in",
"self",
".",
"types",
":",
"raise",
"ValueError",
"(",
"'Unsupported type: {0}'",
".",
"format",
"(",
"type",
")",
")",
"elif",
"not",
"type",
":",
"type",
"=",
"ACTOR_TYPE",
".",
"DIRECT",
"props",
".",
"setdefault",
"(",
"'routing_key'",
",",
"self",
".",
"routing_key",
")",
"props",
".",
"setdefault",
"(",
"'serializer'",
",",
"self",
".",
"serializer",
")",
"exchange",
"=",
"exchange",
"or",
"self",
".",
"type_to_exchange",
"[",
"type",
"]",
"(",
")",
"declare",
"=",
"(",
"maybe_list",
"(",
"declare",
")",
"or",
"[",
"]",
")",
"+",
"[",
"exchange",
"]",
"with",
"producers",
"[",
"self",
".",
"_connection",
"]",
".",
"acquire",
"(",
"block",
"=",
"True",
")",
"as",
"producer",
":",
"return",
"producer",
".",
"publish",
"(",
"body",
",",
"exchange",
"=",
"exchange",
",",
"declare",
"=",
"declare",
",",
"retry",
"=",
"retry",
",",
"retry_policy",
"=",
"retry_policy",
",",
"*",
"*",
"props",
")"
] |
Send message to actor. Discarding replies.
|
[
"Send",
"message",
"to",
"actor",
".",
"Discarding",
"replies",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L398-L420
|
train
|
celery/cell
|
cell/actors.py
|
Actor.handle_call
|
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
|
python
|
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
|
[
"def",
"handle_call",
"(",
"self",
",",
"body",
",",
"message",
")",
":",
"try",
":",
"r",
"=",
"self",
".",
"_DISPATCH",
"(",
"body",
",",
"ticket",
"=",
"message",
".",
"properties",
"[",
"'reply_to'",
"]",
")",
"except",
"self",
".",
"Next",
":",
"# don't reply, delegate to other agents.",
"pass",
"else",
":",
"self",
".",
"reply",
"(",
"message",
",",
"r",
")"
] |
Handle call message.
|
[
"Handle",
"call",
"message",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L434-L442
|
train
|
celery/cell
|
cell/actors.py
|
Actor._on_message
|
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
|
python
|
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
|
[
"def",
"_on_message",
"(",
"self",
",",
"body",
",",
"message",
")",
":",
"if",
"message",
".",
"properties",
".",
"get",
"(",
"'reply_to'",
")",
":",
"handler",
"=",
"self",
".",
"handle_call",
"else",
":",
"handler",
"=",
"self",
".",
"handle_cast",
"def",
"handle",
"(",
")",
":",
"# Do not ack the message if an exceptional error occurs,",
"# but do ack the message if SystemExit or KeyboardInterrupt",
"# is raised, as this is probably intended.",
"try",
":",
"handler",
"(",
"body",
",",
"message",
")",
"except",
"Exception",
":",
"raise",
"except",
"BaseException",
":",
"message",
".",
"ack",
"(",
")",
"raise",
"else",
":",
"message",
".",
"ack",
"(",
")",
"handle",
"(",
")"
] |
What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
|
[
"What",
"to",
"do",
"when",
"a",
"message",
"is",
"received",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L460-L489
|
train
|
celery/cell
|
cell/bin/base.py
|
Command.parse_options
|
def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = self.create_parser(prog_name)
options, args = parser.parse_args(arguments)
return options, args
|
python
|
def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = self.create_parser(prog_name)
options, args = parser.parse_args(arguments)
return options, args
|
[
"def",
"parse_options",
"(",
"self",
",",
"prog_name",
",",
"arguments",
")",
":",
"# Don't want to load configuration to just print the version,",
"# so we handle --version manually here.",
"if",
"'--version'",
"in",
"arguments",
":",
"self",
".",
"exit_status",
"(",
"self",
".",
"version",
",",
"fh",
"=",
"sys",
".",
"stdout",
")",
"parser",
"=",
"self",
".",
"create_parser",
"(",
"prog_name",
")",
"options",
",",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"arguments",
")",
"return",
"options",
",",
"args"
] |
Parse the available options.
|
[
"Parse",
"the",
"available",
"options",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/bin/base.py#L67-L75
|
train
|
celery/cell
|
cell/results.py
|
AsyncResult.get
|
def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs))
|
python
|
def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs))
|
[
"def",
"get",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'limit'",
",",
"1",
")",
"return",
"self",
".",
"_first",
"(",
"self",
".",
"gather",
"(",
"*",
"*",
"kwargs",
")",
")"
] |
What kind of arguments should be pass here
|
[
"What",
"kind",
"of",
"arguments",
"should",
"be",
"pass",
"here"
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L30-L33
|
train
|
celery/cell
|
cell/results.py
|
AsyncResult._gather
|
def _gather(self, *args, **kwargs):
"""Generator over the results
"""
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs))
|
python
|
def _gather(self, *args, **kwargs):
"""Generator over the results
"""
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs))
|
[
"def",
"_gather",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"propagate",
"=",
"kwargs",
".",
"pop",
"(",
"'propagate'",
",",
"True",
")",
"return",
"(",
"self",
".",
"to_python",
"(",
"reply",
",",
"propagate",
"=",
"propagate",
")",
"for",
"reply",
"in",
"self",
".",
"actor",
".",
"_collect_replies",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Generator over the results
|
[
"Generator",
"over",
"the",
"results"
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L47-L52
|
train
|
celery/cell
|
cell/results.py
|
AsyncResult.to_python
|
def to_python(self, reply, propagate=True):
"""Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error.
"""
try:
return reply['ok']
except KeyError:
error = self.Error(*reply.get('nok') or ())
if propagate:
raise error
return error
|
python
|
def to_python(self, reply, propagate=True):
"""Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error.
"""
try:
return reply['ok']
except KeyError:
error = self.Error(*reply.get('nok') or ())
if propagate:
raise error
return error
|
[
"def",
"to_python",
"(",
"self",
",",
"reply",
",",
"propagate",
"=",
"True",
")",
":",
"try",
":",
"return",
"reply",
"[",
"'ok'",
"]",
"except",
"KeyError",
":",
"error",
"=",
"self",
".",
"Error",
"(",
"*",
"reply",
".",
"get",
"(",
"'nok'",
")",
"or",
"(",
")",
")",
"if",
"propagate",
":",
"raise",
"error",
"return",
"error"
] |
Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error.
|
[
"Extracts",
"the",
"value",
"out",
"of",
"the",
"reply",
"message",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L54-L79
|
train
|
celery/cell
|
cell/agents.py
|
dAgent.spawn
|
def spawn(self, cls, kwargs={}, nowait=False):
"""Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor.
"""
actor_id = uuid()
if str(qualname(cls)) == '__builtin__.unicode':
name = cls
else:
name = qualname(cls)
res = self.call('spawn', {'cls': name, 'id': actor_id,
'kwargs': kwargs},
type=ACTOR_TYPE.RR, nowait=nowait)
return ActorProxy(name, actor_id, res, agent=self,
connection=self.connection, **kwargs)
|
python
|
def spawn(self, cls, kwargs={}, nowait=False):
"""Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor.
"""
actor_id = uuid()
if str(qualname(cls)) == '__builtin__.unicode':
name = cls
else:
name = qualname(cls)
res = self.call('spawn', {'cls': name, 'id': actor_id,
'kwargs': kwargs},
type=ACTOR_TYPE.RR, nowait=nowait)
return ActorProxy(name, actor_id, res, agent=self,
connection=self.connection, **kwargs)
|
[
"def",
"spawn",
"(",
"self",
",",
"cls",
",",
"kwargs",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
")",
":",
"actor_id",
"=",
"uuid",
"(",
")",
"if",
"str",
"(",
"qualname",
"(",
"cls",
")",
")",
"==",
"'__builtin__.unicode'",
":",
"name",
"=",
"cls",
"else",
":",
"name",
"=",
"qualname",
"(",
"cls",
")",
"res",
"=",
"self",
".",
"call",
"(",
"'spawn'",
",",
"{",
"'cls'",
":",
"name",
",",
"'id'",
":",
"actor_id",
",",
"'kwargs'",
":",
"kwargs",
"}",
",",
"type",
"=",
"ACTOR_TYPE",
".",
"RR",
",",
"nowait",
"=",
"nowait",
")",
"return",
"ActorProxy",
"(",
"name",
",",
"actor_id",
",",
"res",
",",
"agent",
"=",
"self",
",",
"connection",
"=",
"self",
".",
"connection",
",",
"*",
"*",
"kwargs",
")"
] |
Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor.
|
[
"Spawn",
"a",
"new",
"actor",
"on",
"a",
"celery",
"worker",
"by",
"sending",
"a",
"remote",
"command",
"to",
"the",
"worker",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L99-L128
|
train
|
celery/cell
|
cell/agents.py
|
dAgent.select
|
def select(self, cls, **kwargs):
"""Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
"""
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self,
connection=self.connection, **kwargs)
|
python
|
def select(self, cls, **kwargs):
"""Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
"""
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self,
connection=self.connection, **kwargs)
|
[
"def",
"select",
"(",
"self",
",",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"qualname",
"(",
"cls",
")",
"id",
"=",
"first_reply",
"(",
"self",
".",
"scatter",
"(",
"'select'",
",",
"{",
"'cls'",
":",
"name",
"}",
",",
"limit",
"=",
"1",
")",
",",
"cls",
")",
"return",
"ActorProxy",
"(",
"name",
",",
"id",
",",
"agent",
"=",
"self",
",",
"connection",
"=",
"self",
".",
"connection",
",",
"*",
"*",
"kwargs",
")"
] |
Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
|
[
"Get",
"the",
"id",
"of",
"already",
"spawned",
"actor"
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L130-L139
|
train
|
celery/cell
|
cell/agents.py
|
dAgent.process_message
|
def process_message(self, actor, body, message):
"""Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`.
"""
if actor is not self and self.is_green():
self.pool.spawn_n(actor._on_message, body, message)
else:
if not self.is_green() and message.properties.get('reply_to'):
warn('Starting a blocking call (%s) on actor (%s) '
'when greenlets are disabled.',
itemgetter('method')(body), actor.__class__)
actor._on_message(body, message)
|
python
|
def process_message(self, actor, body, message):
"""Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`.
"""
if actor is not self and self.is_green():
self.pool.spawn_n(actor._on_message, body, message)
else:
if not self.is_green() and message.properties.get('reply_to'):
warn('Starting a blocking call (%s) on actor (%s) '
'when greenlets are disabled.',
itemgetter('method')(body), actor.__class__)
actor._on_message(body, message)
|
[
"def",
"process_message",
"(",
"self",
",",
"actor",
",",
"body",
",",
"message",
")",
":",
"if",
"actor",
"is",
"not",
"self",
"and",
"self",
".",
"is_green",
"(",
")",
":",
"self",
".",
"pool",
".",
"spawn_n",
"(",
"actor",
".",
"_on_message",
",",
"body",
",",
"message",
")",
"else",
":",
"if",
"not",
"self",
".",
"is_green",
"(",
")",
"and",
"message",
".",
"properties",
".",
"get",
"(",
"'reply_to'",
")",
":",
"warn",
"(",
"'Starting a blocking call (%s) on actor (%s) '",
"'when greenlets are disabled.'",
",",
"itemgetter",
"(",
"'method'",
")",
"(",
"body",
")",
",",
"actor",
".",
"__class__",
")",
"actor",
".",
"_on_message",
"(",
"body",
",",
"message",
")"
] |
Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`.
|
[
"Process",
"actor",
"message",
"depending",
"depending",
"on",
"the",
"the",
"worker",
"settings",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L164-L187
|
train
|
yhat/pandasql
|
pandasql/sqldf.py
|
get_outer_frame_variables
|
def get_outer_frame_variables():
""" Get a dict of local and global variables of the first outer frame from another file. """
cur_filename = inspect.getframeinfo(inspect.currentframe()).filename
outer_frame = next(f
for f in inspect.getouterframes(inspect.currentframe())
if f.filename != cur_filename)
variables = {}
variables.update(outer_frame.frame.f_globals)
variables.update(outer_frame.frame.f_locals)
return variables
|
python
|
def get_outer_frame_variables():
""" Get a dict of local and global variables of the first outer frame from another file. """
cur_filename = inspect.getframeinfo(inspect.currentframe()).filename
outer_frame = next(f
for f in inspect.getouterframes(inspect.currentframe())
if f.filename != cur_filename)
variables = {}
variables.update(outer_frame.frame.f_globals)
variables.update(outer_frame.frame.f_locals)
return variables
|
[
"def",
"get_outer_frame_variables",
"(",
")",
":",
"cur_filename",
"=",
"inspect",
".",
"getframeinfo",
"(",
"inspect",
".",
"currentframe",
"(",
")",
")",
".",
"filename",
"outer_frame",
"=",
"next",
"(",
"f",
"for",
"f",
"in",
"inspect",
".",
"getouterframes",
"(",
"inspect",
".",
"currentframe",
"(",
")",
")",
"if",
"f",
".",
"filename",
"!=",
"cur_filename",
")",
"variables",
"=",
"{",
"}",
"variables",
".",
"update",
"(",
"outer_frame",
".",
"frame",
".",
"f_globals",
")",
"variables",
".",
"update",
"(",
"outer_frame",
".",
"frame",
".",
"f_locals",
")",
"return",
"variables"
] |
Get a dict of local and global variables of the first outer frame from another file.
|
[
"Get",
"a",
"dict",
"of",
"local",
"and",
"global",
"variables",
"of",
"the",
"first",
"outer",
"frame",
"from",
"another",
"file",
"."
] |
e799c6f53be9653e8998a25adb5e2f1643442699
|
https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L98-L107
|
train
|
yhat/pandasql
|
pandasql/sqldf.py
|
extract_table_names
|
def extract_table_names(query):
""" Extract table names from an SQL query. """
# a good old fashioned regex. turns out this worked better than actually parsing the code
tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query, re.IGNORECASE)
tables = [tbl
for block in tables_blocks
for tbl in re.findall(r'\w+', block)]
return set(tables)
|
python
|
def extract_table_names(query):
""" Extract table names from an SQL query. """
# a good old fashioned regex. turns out this worked better than actually parsing the code
tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query, re.IGNORECASE)
tables = [tbl
for block in tables_blocks
for tbl in re.findall(r'\w+', block)]
return set(tables)
|
[
"def",
"extract_table_names",
"(",
"query",
")",
":",
"# a good old fashioned regex. turns out this worked better than actually parsing the code",
"tables_blocks",
"=",
"re",
".",
"findall",
"(",
"r'(?:FROM|JOIN)\\s+(\\w+(?:\\s*,\\s*\\w+)*)'",
",",
"query",
",",
"re",
".",
"IGNORECASE",
")",
"tables",
"=",
"[",
"tbl",
"for",
"block",
"in",
"tables_blocks",
"for",
"tbl",
"in",
"re",
".",
"findall",
"(",
"r'\\w+'",
",",
"block",
")",
"]",
"return",
"set",
"(",
"tables",
")"
] |
Extract table names from an SQL query.
|
[
"Extract",
"table",
"names",
"from",
"an",
"SQL",
"query",
"."
] |
e799c6f53be9653e8998a25adb5e2f1643442699
|
https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L110-L117
|
train
|
yhat/pandasql
|
pandasql/sqldf.py
|
write_table
|
def write_table(df, tablename, conn):
""" Write a dataframe to the database. """
with catch_warnings():
filterwarnings('ignore',
message='The provided table name \'%s\' is not found exactly as such in the database' % tablename)
to_sql(df, name=tablename, con=conn,
index=not any(name is None for name in df.index.names))
|
python
|
def write_table(df, tablename, conn):
""" Write a dataframe to the database. """
with catch_warnings():
filterwarnings('ignore',
message='The provided table name \'%s\' is not found exactly as such in the database' % tablename)
to_sql(df, name=tablename, con=conn,
index=not any(name is None for name in df.index.names))
|
[
"def",
"write_table",
"(",
"df",
",",
"tablename",
",",
"conn",
")",
":",
"with",
"catch_warnings",
"(",
")",
":",
"filterwarnings",
"(",
"'ignore'",
",",
"message",
"=",
"'The provided table name \\'%s\\' is not found exactly as such in the database'",
"%",
"tablename",
")",
"to_sql",
"(",
"df",
",",
"name",
"=",
"tablename",
",",
"con",
"=",
"conn",
",",
"index",
"=",
"not",
"any",
"(",
"name",
"is",
"None",
"for",
"name",
"in",
"df",
".",
"index",
".",
"names",
")",
")"
] |
Write a dataframe to the database.
|
[
"Write",
"a",
"dataframe",
"to",
"the",
"database",
"."
] |
e799c6f53be9653e8998a25adb5e2f1643442699
|
https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L120-L126
|
train
|
bsmurphy/PyKrige
|
benchmarks/kriging_benchmarks.py
|
make_benchark
|
def make_benchark(n_train, n_test, n_dim=2):
""" Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results
"""
X_train = np.random.rand(n_train, n_dim)
y_train = np.random.rand(n_train)
X_test = np.random.rand(n_test, n_dim)
res = {}
for variogram_model in VARIOGRAM_MODELS:
tic = time()
OK = OrdinaryKriging(X_train[:, 0], X_train[:, 1], y_train,
variogram_model='linear',
verbose=False, enable_plotting=False)
res['t_train_{}'.format(variogram_model)] = time() - tic
# All the following tests are performed with the linear variogram model
for backend in BACKENDS:
for n_closest_points in N_MOVING_WINDOW:
if backend == 'vectorized' and n_closest_points is not None:
continue # this is not supported
tic = time()
OK.execute('points', X_test[:, 0], X_test[:, 1],
backend=backend,
n_closest_points=n_closest_points)
res['t_test_{}_{}'.format(backend, n_closest_points)] = time() - tic
return res
|
python
|
def make_benchark(n_train, n_test, n_dim=2):
""" Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results
"""
X_train = np.random.rand(n_train, n_dim)
y_train = np.random.rand(n_train)
X_test = np.random.rand(n_test, n_dim)
res = {}
for variogram_model in VARIOGRAM_MODELS:
tic = time()
OK = OrdinaryKriging(X_train[:, 0], X_train[:, 1], y_train,
variogram_model='linear',
verbose=False, enable_plotting=False)
res['t_train_{}'.format(variogram_model)] = time() - tic
# All the following tests are performed with the linear variogram model
for backend in BACKENDS:
for n_closest_points in N_MOVING_WINDOW:
if backend == 'vectorized' and n_closest_points is not None:
continue # this is not supported
tic = time()
OK.execute('points', X_test[:, 0], X_test[:, 1],
backend=backend,
n_closest_points=n_closest_points)
res['t_test_{}_{}'.format(backend, n_closest_points)] = time() - tic
return res
|
[
"def",
"make_benchark",
"(",
"n_train",
",",
"n_test",
",",
"n_dim",
"=",
"2",
")",
":",
"X_train",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n_train",
",",
"n_dim",
")",
"y_train",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n_train",
")",
"X_test",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n_test",
",",
"n_dim",
")",
"res",
"=",
"{",
"}",
"for",
"variogram_model",
"in",
"VARIOGRAM_MODELS",
":",
"tic",
"=",
"time",
"(",
")",
"OK",
"=",
"OrdinaryKriging",
"(",
"X_train",
"[",
":",
",",
"0",
"]",
",",
"X_train",
"[",
":",
",",
"1",
"]",
",",
"y_train",
",",
"variogram_model",
"=",
"'linear'",
",",
"verbose",
"=",
"False",
",",
"enable_plotting",
"=",
"False",
")",
"res",
"[",
"'t_train_{}'",
".",
"format",
"(",
"variogram_model",
")",
"]",
"=",
"time",
"(",
")",
"-",
"tic",
"# All the following tests are performed with the linear variogram model",
"for",
"backend",
"in",
"BACKENDS",
":",
"for",
"n_closest_points",
"in",
"N_MOVING_WINDOW",
":",
"if",
"backend",
"==",
"'vectorized'",
"and",
"n_closest_points",
"is",
"not",
"None",
":",
"continue",
"# this is not supported",
"tic",
"=",
"time",
"(",
")",
"OK",
".",
"execute",
"(",
"'points'",
",",
"X_test",
"[",
":",
",",
"0",
"]",
",",
"X_test",
"[",
":",
",",
"1",
"]",
",",
"backend",
"=",
"backend",
",",
"n_closest_points",
"=",
"n_closest_points",
")",
"res",
"[",
"'t_test_{}_{}'",
".",
"format",
"(",
"backend",
",",
"n_closest_points",
")",
"]",
"=",
"time",
"(",
")",
"-",
"tic",
"return",
"res"
] |
Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results
|
[
"Compute",
"the",
"benchmarks",
"for",
"Ordianry",
"Kriging"
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/benchmarks/kriging_benchmarks.py#L14-L57
|
train
|
bsmurphy/PyKrige
|
benchmarks/kriging_benchmarks.py
|
print_benchmark
|
def print_benchmark(n_train, n_test, n_dim, res):
""" Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with the timing results
"""
print('='*80)
print(' '*10, 'N_dim={}, N_train={}, N_test={}'.format(n_dim,
n_train, n_test))
print('='*80)
print('\n', '# Training the model', '\n')
print('|'.join(['{:>11} '.format(el) for el in ['t_train (s)'] +
VARIOGRAM_MODELS]))
print('-' * (11 + 2) * (len(VARIOGRAM_MODELS) + 1))
print('|'.join(['{:>11} '.format('Training')] +
['{:>11.2} '.format(el) for el in
[res['t_train_{}'.format(mod)]
for mod in VARIOGRAM_MODELS]]))
print('\n', '# Predicting kriging points', '\n')
print('|'.join(['{:>11} '.format(el) for el in ['t_test (s)'] + BACKENDS]))
print('-' * (11 + 2) * (len(BACKENDS) + 1))
for n_closest_points in N_MOVING_WINDOW:
timing_results = [res.get(
't_test_{}_{}'.format(mod, n_closest_points), '')
for mod in BACKENDS]
print('|'.join(['{:>11} '.format('N_nn=' + str(n_closest_points))] +
['{:>11.2} '.format(el) for el in timing_results]))
|
python
|
def print_benchmark(n_train, n_test, n_dim, res):
""" Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with the timing results
"""
print('='*80)
print(' '*10, 'N_dim={}, N_train={}, N_test={}'.format(n_dim,
n_train, n_test))
print('='*80)
print('\n', '# Training the model', '\n')
print('|'.join(['{:>11} '.format(el) for el in ['t_train (s)'] +
VARIOGRAM_MODELS]))
print('-' * (11 + 2) * (len(VARIOGRAM_MODELS) + 1))
print('|'.join(['{:>11} '.format('Training')] +
['{:>11.2} '.format(el) for el in
[res['t_train_{}'.format(mod)]
for mod in VARIOGRAM_MODELS]]))
print('\n', '# Predicting kriging points', '\n')
print('|'.join(['{:>11} '.format(el) for el in ['t_test (s)'] + BACKENDS]))
print('-' * (11 + 2) * (len(BACKENDS) + 1))
for n_closest_points in N_MOVING_WINDOW:
timing_results = [res.get(
't_test_{}_{}'.format(mod, n_closest_points), '')
for mod in BACKENDS]
print('|'.join(['{:>11} '.format('N_nn=' + str(n_closest_points))] +
['{:>11.2} '.format(el) for el in timing_results]))
|
[
"def",
"print_benchmark",
"(",
"n_train",
",",
"n_test",
",",
"n_dim",
",",
"res",
")",
":",
"print",
"(",
"'='",
"*",
"80",
")",
"print",
"(",
"' '",
"*",
"10",
",",
"'N_dim={}, N_train={}, N_test={}'",
".",
"format",
"(",
"n_dim",
",",
"n_train",
",",
"n_test",
")",
")",
"print",
"(",
"'='",
"*",
"80",
")",
"print",
"(",
"'\\n'",
",",
"'# Training the model'",
",",
"'\\n'",
")",
"print",
"(",
"'|'",
".",
"join",
"(",
"[",
"'{:>11} '",
".",
"format",
"(",
"el",
")",
"for",
"el",
"in",
"[",
"'t_train (s)'",
"]",
"+",
"VARIOGRAM_MODELS",
"]",
")",
")",
"print",
"(",
"'-'",
"*",
"(",
"11",
"+",
"2",
")",
"*",
"(",
"len",
"(",
"VARIOGRAM_MODELS",
")",
"+",
"1",
")",
")",
"print",
"(",
"'|'",
".",
"join",
"(",
"[",
"'{:>11} '",
".",
"format",
"(",
"'Training'",
")",
"]",
"+",
"[",
"'{:>11.2} '",
".",
"format",
"(",
"el",
")",
"for",
"el",
"in",
"[",
"res",
"[",
"'t_train_{}'",
".",
"format",
"(",
"mod",
")",
"]",
"for",
"mod",
"in",
"VARIOGRAM_MODELS",
"]",
"]",
")",
")",
"print",
"(",
"'\\n'",
",",
"'# Predicting kriging points'",
",",
"'\\n'",
")",
"print",
"(",
"'|'",
".",
"join",
"(",
"[",
"'{:>11} '",
".",
"format",
"(",
"el",
")",
"for",
"el",
"in",
"[",
"'t_test (s)'",
"]",
"+",
"BACKENDS",
"]",
")",
")",
"print",
"(",
"'-'",
"*",
"(",
"11",
"+",
"2",
")",
"*",
"(",
"len",
"(",
"BACKENDS",
")",
"+",
"1",
")",
")",
"for",
"n_closest_points",
"in",
"N_MOVING_WINDOW",
":",
"timing_results",
"=",
"[",
"res",
".",
"get",
"(",
"'t_test_{}_{}'",
".",
"format",
"(",
"mod",
",",
"n_closest_points",
")",
",",
"''",
")",
"for",
"mod",
"in",
"BACKENDS",
"]",
"print",
"(",
"'|'",
".",
"join",
"(",
"[",
"'{:>11} '",
".",
"format",
"(",
"'N_nn='",
"+",
"str",
"(",
"n_closest_points",
")",
")",
"]",
"+",
"[",
"'{:>11.2} '",
".",
"format",
"(",
"el",
")",
"for",
"el",
"in",
"timing_results",
"]",
")",
")"
] |
Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with the timing results
|
[
"Print",
"the",
"benchmarks"
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/benchmarks/kriging_benchmarks.py#L60-L96
|
train
|
bsmurphy/PyKrige
|
pykrige/uk.py
|
UniversalKriging.display_variogram_model
|
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters,
self.lags), 'k-')
plt.show()
|
python
|
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters,
self.lags), 'k-')
plt.show()
|
[
"def",
"display_variogram_model",
"(",
"self",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"plot",
"(",
"self",
".",
"lags",
",",
"self",
".",
"semivariance",
",",
"'r*'",
")",
"ax",
".",
"plot",
"(",
"self",
".",
"lags",
",",
"self",
".",
"variogram_function",
"(",
"self",
".",
"variogram_model_parameters",
",",
"self",
".",
"lags",
")",
",",
"'k-'",
")",
"plt",
".",
"show",
"(",
")"
] |
Displays variogram model with the actual binned data.
|
[
"Displays",
"variogram",
"model",
"with",
"the",
"actual",
"binned",
"data",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L608-L616
|
train
|
bsmurphy/PyKrige
|
pykrige/uk.py
|
UniversalKriging.plot_epsilon_residuals
|
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
|
python
|
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
|
[
"def",
"plot_epsilon_residuals",
"(",
"self",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"scatter",
"(",
"range",
"(",
"self",
".",
"epsilon",
".",
"size",
")",
",",
"self",
".",
"epsilon",
",",
"c",
"=",
"'k'",
",",
"marker",
"=",
"'*'",
")",
"ax",
".",
"axhline",
"(",
"y",
"=",
"0.0",
")",
"plt",
".",
"show",
"(",
")"
] |
Plots the epsilon residuals for the variogram fit.
|
[
"Plots",
"the",
"epsilon",
"residuals",
"for",
"the",
"variogram",
"fit",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L647-L653
|
train
|
bsmurphy/PyKrige
|
pykrige/uk.py
|
UniversalKriging.print_statistics
|
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
|
python
|
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
|
[
"def",
"print_statistics",
"(",
"self",
")",
":",
"print",
"(",
"\"Q1 =\"",
",",
"self",
".",
"Q1",
")",
"print",
"(",
"\"Q2 =\"",
",",
"self",
".",
"Q2",
")",
"print",
"(",
"\"cR =\"",
",",
"self",
".",
"cR",
")"
] |
Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
|
[
"Prints",
"out",
"the",
"Q1",
"Q2",
"and",
"cR",
"statistics",
"for",
"the",
"variogram",
"fit",
".",
"NOTE",
"that",
"ideally",
"Q1",
"is",
"close",
"to",
"zero",
"Q2",
"is",
"close",
"to",
"1",
"and",
"cR",
"is",
"as",
"small",
"as",
"possible",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L661-L668
|
train
|
bsmurphy/PyKrige
|
pykrige/core.py
|
_adjust_for_anisotropy
|
def _adjust_for_anisotropy(X, center, scaling, angle):
"""Adjusts data coordinates to take into account anisotropy.
Can also be used to take into account data scaling. Angles are CCW about
specified axes. Scaling is applied in rotated coordinate system.
Parameters
----------
X : ndarray
float array [n_samples, n_dim], the input array of coordinates
center : ndarray
float array [n_dim], the coordinate of centers
scaling : ndarray
float array [n_dim - 1], the scaling of last two dimensions
angle : ndarray
float array [2*n_dim - 3], the anisotropy angle (degrees)
Returns
-------
X_adj : ndarray
float array [n_samples, n_dim], the X array adjusted for anisotropy.
"""
center = np.asarray(center)[None, :]
angle = np.asarray(angle)*np.pi/180
X -= center
Ndim = X.shape[1]
if Ndim == 1:
raise NotImplementedError('Not implemnented yet?')
elif Ndim == 2:
stretch = np.array([[1, 0], [0, scaling[0]]])
rot_tot = np.array([[np.cos(-angle[0]), -np.sin(-angle[0])],
[np.sin(-angle[0]), np.cos(-angle[0])]])
elif Ndim == 3:
stretch = np.array([[1., 0., 0.], [0., scaling[0], 0.], [0., 0., scaling[1]]])
rotate_x = np.array([[1., 0., 0.],
[0., np.cos(-angle[0]), -np.sin(-angle[0])],
[0., np.sin(-angle[0]), np.cos(-angle[0])]])
rotate_y = np.array([[np.cos(-angle[1]), 0., np.sin(-angle[1])],
[0., 1., 0.],
[-np.sin(-angle[1]), 0., np.cos(-angle[1])]])
rotate_z = np.array([[np.cos(-angle[2]), -np.sin(-angle[2]), 0.],
[np.sin(-angle[2]), np.cos(-angle[2]), 0.],
[0., 0., 1.]])
rot_tot = np.dot(rotate_z, np.dot(rotate_y, rotate_x))
else:
raise ValueError("Adjust for anisotropy function doesn't "
"support ND spaces where N>3")
X_adj = np.dot(stretch, np.dot(rot_tot, X.T)).T
X_adj += center
return X_adj
|
python
|
def _adjust_for_anisotropy(X, center, scaling, angle):
"""Adjusts data coordinates to take into account anisotropy.
Can also be used to take into account data scaling. Angles are CCW about
specified axes. Scaling is applied in rotated coordinate system.
Parameters
----------
X : ndarray
float array [n_samples, n_dim], the input array of coordinates
center : ndarray
float array [n_dim], the coordinate of centers
scaling : ndarray
float array [n_dim - 1], the scaling of last two dimensions
angle : ndarray
float array [2*n_dim - 3], the anisotropy angle (degrees)
Returns
-------
X_adj : ndarray
float array [n_samples, n_dim], the X array adjusted for anisotropy.
"""
center = np.asarray(center)[None, :]
angle = np.asarray(angle)*np.pi/180
X -= center
Ndim = X.shape[1]
if Ndim == 1:
raise NotImplementedError('Not implemnented yet?')
elif Ndim == 2:
stretch = np.array([[1, 0], [0, scaling[0]]])
rot_tot = np.array([[np.cos(-angle[0]), -np.sin(-angle[0])],
[np.sin(-angle[0]), np.cos(-angle[0])]])
elif Ndim == 3:
stretch = np.array([[1., 0., 0.], [0., scaling[0], 0.], [0., 0., scaling[1]]])
rotate_x = np.array([[1., 0., 0.],
[0., np.cos(-angle[0]), -np.sin(-angle[0])],
[0., np.sin(-angle[0]), np.cos(-angle[0])]])
rotate_y = np.array([[np.cos(-angle[1]), 0., np.sin(-angle[1])],
[0., 1., 0.],
[-np.sin(-angle[1]), 0., np.cos(-angle[1])]])
rotate_z = np.array([[np.cos(-angle[2]), -np.sin(-angle[2]), 0.],
[np.sin(-angle[2]), np.cos(-angle[2]), 0.],
[0., 0., 1.]])
rot_tot = np.dot(rotate_z, np.dot(rotate_y, rotate_x))
else:
raise ValueError("Adjust for anisotropy function doesn't "
"support ND spaces where N>3")
X_adj = np.dot(stretch, np.dot(rot_tot, X.T)).T
X_adj += center
return X_adj
|
[
"def",
"_adjust_for_anisotropy",
"(",
"X",
",",
"center",
",",
"scaling",
",",
"angle",
")",
":",
"center",
"=",
"np",
".",
"asarray",
"(",
"center",
")",
"[",
"None",
",",
":",
"]",
"angle",
"=",
"np",
".",
"asarray",
"(",
"angle",
")",
"*",
"np",
".",
"pi",
"/",
"180",
"X",
"-=",
"center",
"Ndim",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"if",
"Ndim",
"==",
"1",
":",
"raise",
"NotImplementedError",
"(",
"'Not implemnented yet?'",
")",
"elif",
"Ndim",
"==",
"2",
":",
"stretch",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"scaling",
"[",
"0",
"]",
"]",
"]",
")",
"rot_tot",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"np",
".",
"cos",
"(",
"-",
"angle",
"[",
"0",
"]",
")",
",",
"-",
"np",
".",
"sin",
"(",
"-",
"angle",
"[",
"0",
"]",
")",
"]",
",",
"[",
"np",
".",
"sin",
"(",
"-",
"angle",
"[",
"0",
"]",
")",
",",
"np",
".",
"cos",
"(",
"-",
"angle",
"[",
"0",
"]",
")",
"]",
"]",
")",
"elif",
"Ndim",
"==",
"3",
":",
"stretch",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1.",
",",
"0.",
",",
"0.",
"]",
",",
"[",
"0.",
",",
"scaling",
"[",
"0",
"]",
",",
"0.",
"]",
",",
"[",
"0.",
",",
"0.",
",",
"scaling",
"[",
"1",
"]",
"]",
"]",
")",
"rotate_x",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1.",
",",
"0.",
",",
"0.",
"]",
",",
"[",
"0.",
",",
"np",
".",
"cos",
"(",
"-",
"angle",
"[",
"0",
"]",
")",
",",
"-",
"np",
".",
"sin",
"(",
"-",
"angle",
"[",
"0",
"]",
")",
"]",
",",
"[",
"0.",
",",
"np",
".",
"sin",
"(",
"-",
"angle",
"[",
"0",
"]",
")",
",",
"np",
".",
"cos",
"(",
"-",
"angle",
"[",
"0",
"]",
")",
"]",
"]",
")",
"rotate_y",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"np",
".",
"cos",
"(",
"-",
"angle",
"[",
"1",
"]",
")",
",",
"0.",
",",
"np",
".",
"sin",
"(",
"-",
"angle",
"[",
"1",
"]",
")",
"]",
",",
"[",
"0.",
",",
"1.",
",",
"0.",
"]",
",",
"[",
"-",
"np",
".",
"sin",
"(",
"-",
"angle",
"[",
"1",
"]",
")",
",",
"0.",
",",
"np",
".",
"cos",
"(",
"-",
"angle",
"[",
"1",
"]",
")",
"]",
"]",
")",
"rotate_z",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"np",
".",
"cos",
"(",
"-",
"angle",
"[",
"2",
"]",
")",
",",
"-",
"np",
".",
"sin",
"(",
"-",
"angle",
"[",
"2",
"]",
")",
",",
"0.",
"]",
",",
"[",
"np",
".",
"sin",
"(",
"-",
"angle",
"[",
"2",
"]",
")",
",",
"np",
".",
"cos",
"(",
"-",
"angle",
"[",
"2",
"]",
")",
",",
"0.",
"]",
",",
"[",
"0.",
",",
"0.",
",",
"1.",
"]",
"]",
")",
"rot_tot",
"=",
"np",
".",
"dot",
"(",
"rotate_z",
",",
"np",
".",
"dot",
"(",
"rotate_y",
",",
"rotate_x",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Adjust for anisotropy function doesn't \"",
"\"support ND spaces where N>3\"",
")",
"X_adj",
"=",
"np",
".",
"dot",
"(",
"stretch",
",",
"np",
".",
"dot",
"(",
"rot_tot",
",",
"X",
".",
"T",
")",
")",
".",
"T",
"X_adj",
"+=",
"center",
"return",
"X_adj"
] |
Adjusts data coordinates to take into account anisotropy.
Can also be used to take into account data scaling. Angles are CCW about
specified axes. Scaling is applied in rotated coordinate system.
Parameters
----------
X : ndarray
float array [n_samples, n_dim], the input array of coordinates
center : ndarray
float array [n_dim], the coordinate of centers
scaling : ndarray
float array [n_dim - 1], the scaling of last two dimensions
angle : ndarray
float array [2*n_dim - 3], the anisotropy angle (degrees)
Returns
-------
X_adj : ndarray
float array [n_samples, n_dim], the X array adjusted for anisotropy.
|
[
"Adjusts",
"data",
"coordinates",
"to",
"take",
"into",
"account",
"anisotropy",
".",
"Can",
"also",
"be",
"used",
"to",
"take",
"into",
"account",
"data",
"scaling",
".",
"Angles",
"are",
"CCW",
"about",
"specified",
"axes",
".",
"Scaling",
"is",
"applied",
"in",
"rotated",
"coordinate",
"system",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/core.py#L113-L167
|
train
|
bsmurphy/PyKrige
|
pykrige/core.py
|
_calculate_variogram_model
|
def _calculate_variogram_model(lags, semivariance, variogram_model,
variogram_function, weight):
"""Function that fits a variogram model when parameters are not specified.
Returns variogram model parameters that minimize the RMSE between the
specified variogram function and the actual calculated variogram points.
Parameters
----------
lags: 1d array
binned lags/distances to use for variogram model parameter estimation
semivariance: 1d array
binned/averaged experimental semivariances to use for variogram model
parameter estimation
variogram_model: str/unicode
specified variogram model to use for parameter estimation
variogram_function: callable
the actual funtion that evaluates the model variogram
weight: bool
flag for implementing the crude weighting routine, used in order to fit
smaller lags better this is passed on to the residual calculation
cfunction, where weighting is actually applied...
Returns
-------
res: list
list of estimated variogram model parameters
NOTE that the estimation routine works in terms of the partial sill
(psill = sill - nugget) -- setting bounds such that psill > 0 ensures that
the sill will always be greater than the nugget...
"""
if variogram_model == 'linear':
x0 = [(np.amax(semivariance) - np.amin(semivariance)) /
(np.amax(lags) - np.amin(lags)), np.amin(semivariance)]
bnds = ([0., 0.], [np.inf, np.amax(semivariance)])
elif variogram_model == 'power':
x0 = [(np.amax(semivariance) - np.amin(semivariance)) /
(np.amax(lags) - np.amin(lags)), 1.1, np.amin(semivariance)]
bnds = ([0., 0.001, 0.], [np.inf, 1.999, np.amax(semivariance)])
else:
x0 = [np.amax(semivariance) - np.amin(semivariance),
0.25*np.amax(lags), np.amin(semivariance)]
bnds = ([0., 0., 0.], [10.*np.amax(semivariance), np.amax(lags),
np.amax(semivariance)])
# use 'soft' L1-norm minimization in order to buffer against
# potential outliers (weird/skewed points)
res = least_squares(_variogram_residuals, x0, bounds=bnds, loss='soft_l1',
args=(lags, semivariance, variogram_function, weight))
return res.x
|
python
|
def _calculate_variogram_model(lags, semivariance, variogram_model,
variogram_function, weight):
"""Function that fits a variogram model when parameters are not specified.
Returns variogram model parameters that minimize the RMSE between the
specified variogram function and the actual calculated variogram points.
Parameters
----------
lags: 1d array
binned lags/distances to use for variogram model parameter estimation
semivariance: 1d array
binned/averaged experimental semivariances to use for variogram model
parameter estimation
variogram_model: str/unicode
specified variogram model to use for parameter estimation
variogram_function: callable
the actual funtion that evaluates the model variogram
weight: bool
flag for implementing the crude weighting routine, used in order to fit
smaller lags better this is passed on to the residual calculation
cfunction, where weighting is actually applied...
Returns
-------
res: list
list of estimated variogram model parameters
NOTE that the estimation routine works in terms of the partial sill
(psill = sill - nugget) -- setting bounds such that psill > 0 ensures that
the sill will always be greater than the nugget...
"""
if variogram_model == 'linear':
x0 = [(np.amax(semivariance) - np.amin(semivariance)) /
(np.amax(lags) - np.amin(lags)), np.amin(semivariance)]
bnds = ([0., 0.], [np.inf, np.amax(semivariance)])
elif variogram_model == 'power':
x0 = [(np.amax(semivariance) - np.amin(semivariance)) /
(np.amax(lags) - np.amin(lags)), 1.1, np.amin(semivariance)]
bnds = ([0., 0.001, 0.], [np.inf, 1.999, np.amax(semivariance)])
else:
x0 = [np.amax(semivariance) - np.amin(semivariance),
0.25*np.amax(lags), np.amin(semivariance)]
bnds = ([0., 0., 0.], [10.*np.amax(semivariance), np.amax(lags),
np.amax(semivariance)])
# use 'soft' L1-norm minimization in order to buffer against
# potential outliers (weird/skewed points)
res = least_squares(_variogram_residuals, x0, bounds=bnds, loss='soft_l1',
args=(lags, semivariance, variogram_function, weight))
return res.x
|
[
"def",
"_calculate_variogram_model",
"(",
"lags",
",",
"semivariance",
",",
"variogram_model",
",",
"variogram_function",
",",
"weight",
")",
":",
"if",
"variogram_model",
"==",
"'linear'",
":",
"x0",
"=",
"[",
"(",
"np",
".",
"amax",
"(",
"semivariance",
")",
"-",
"np",
".",
"amin",
"(",
"semivariance",
")",
")",
"/",
"(",
"np",
".",
"amax",
"(",
"lags",
")",
"-",
"np",
".",
"amin",
"(",
"lags",
")",
")",
",",
"np",
".",
"amin",
"(",
"semivariance",
")",
"]",
"bnds",
"=",
"(",
"[",
"0.",
",",
"0.",
"]",
",",
"[",
"np",
".",
"inf",
",",
"np",
".",
"amax",
"(",
"semivariance",
")",
"]",
")",
"elif",
"variogram_model",
"==",
"'power'",
":",
"x0",
"=",
"[",
"(",
"np",
".",
"amax",
"(",
"semivariance",
")",
"-",
"np",
".",
"amin",
"(",
"semivariance",
")",
")",
"/",
"(",
"np",
".",
"amax",
"(",
"lags",
")",
"-",
"np",
".",
"amin",
"(",
"lags",
")",
")",
",",
"1.1",
",",
"np",
".",
"amin",
"(",
"semivariance",
")",
"]",
"bnds",
"=",
"(",
"[",
"0.",
",",
"0.001",
",",
"0.",
"]",
",",
"[",
"np",
".",
"inf",
",",
"1.999",
",",
"np",
".",
"amax",
"(",
"semivariance",
")",
"]",
")",
"else",
":",
"x0",
"=",
"[",
"np",
".",
"amax",
"(",
"semivariance",
")",
"-",
"np",
".",
"amin",
"(",
"semivariance",
")",
",",
"0.25",
"*",
"np",
".",
"amax",
"(",
"lags",
")",
",",
"np",
".",
"amin",
"(",
"semivariance",
")",
"]",
"bnds",
"=",
"(",
"[",
"0.",
",",
"0.",
",",
"0.",
"]",
",",
"[",
"10.",
"*",
"np",
".",
"amax",
"(",
"semivariance",
")",
",",
"np",
".",
"amax",
"(",
"lags",
")",
",",
"np",
".",
"amax",
"(",
"semivariance",
")",
"]",
")",
"# use 'soft' L1-norm minimization in order to buffer against",
"# potential outliers (weird/skewed points)",
"res",
"=",
"least_squares",
"(",
"_variogram_residuals",
",",
"x0",
",",
"bounds",
"=",
"bnds",
",",
"loss",
"=",
"'soft_l1'",
",",
"args",
"=",
"(",
"lags",
",",
"semivariance",
",",
"variogram_function",
",",
"weight",
")",
")",
"return",
"res",
".",
"x"
] |
Function that fits a variogram model when parameters are not specified.
Returns variogram model parameters that minimize the RMSE between the
specified variogram function and the actual calculated variogram points.
Parameters
----------
lags: 1d array
binned lags/distances to use for variogram model parameter estimation
semivariance: 1d array
binned/averaged experimental semivariances to use for variogram model
parameter estimation
variogram_model: str/unicode
specified variogram model to use for parameter estimation
variogram_function: callable
the actual funtion that evaluates the model variogram
weight: bool
flag for implementing the crude weighting routine, used in order to fit
smaller lags better this is passed on to the residual calculation
cfunction, where weighting is actually applied...
Returns
-------
res: list
list of estimated variogram model parameters
NOTE that the estimation routine works in terms of the partial sill
(psill = sill - nugget) -- setting bounds such that psill > 0 ensures that
the sill will always be greater than the nugget...
|
[
"Function",
"that",
"fits",
"a",
"variogram",
"model",
"when",
"parameters",
"are",
"not",
"specified",
".",
"Returns",
"variogram",
"model",
"parameters",
"that",
"minimize",
"the",
"RMSE",
"between",
"the",
"specified",
"variogram",
"function",
"and",
"the",
"actual",
"calculated",
"variogram",
"points",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/core.py#L531-L582
|
train
|
bsmurphy/PyKrige
|
pykrige/core.py
|
_krige
|
def _krige(X, y, coords, variogram_function,
variogram_model_parameters, coordinates_type):
"""Sets up and solves the ordinary kriging system for the given
coordinate pair. This function is only used for the statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
coords: ndarray
float array [1, n_dim], point at which to evaluate the kriging system
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
zinterp: float
kriging estimate at the specified point
sigmasq: float
mean square error of the kriging estimate
"""
zero_index = None
zero_value = False
# calculate distance between points... need a square distance matrix
# of inter-measurement-point distances and a vector of distances between
# measurement points (X) and the kriging point (coords)
if coordinates_type == 'euclidean':
d = squareform(pdist(X, metric='euclidean'))
bd = np.squeeze(cdist(X, coords[None, :], metric='euclidean'))
# geographic coordinate distances still calculated in the old way...
# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat
# also assume problem is 2D; check done earlier in initializing variogram
elif coordinates_type == 'geographic':
x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True)
y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True)
d = great_circle_distance(x1, y1, x2, y2)
bd = great_circle_distance(X[:, 0], X[:, 1],
coords[0] * np.ones(X.shape[0]),
coords[1] * np.ones(X.shape[0]))
# this check is done when initializing variogram, but kept here anyways...
else:
raise ValueError("Specified coordinate type '%s' "
"is not supported." % coordinates_type)
# check if kriging point overlaps with measurement point
if np.any(np.absolute(bd) <= 1e-10):
zero_value = True
zero_index = np.where(bd <= 1e-10)[0][0]
# set up kriging matrix
n = X.shape[0]
a = np.zeros((n+1, n+1))
a[:n, :n] = - variogram_function(variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
# set up RHS
b = np.zeros((n+1, 1))
b[:n, 0] = - variogram_function(variogram_model_parameters, bd)
if zero_value:
b[zero_index, 0] = 0.0
b[n, 0] = 1.0
# solve
res = np.linalg.solve(a, b)
zinterp = np.sum(res[:n, 0] * y)
sigmasq = np.sum(res[:, 0] * -b[:, 0])
return zinterp, sigmasq
|
python
|
def _krige(X, y, coords, variogram_function,
variogram_model_parameters, coordinates_type):
"""Sets up and solves the ordinary kriging system for the given
coordinate pair. This function is only used for the statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
coords: ndarray
float array [1, n_dim], point at which to evaluate the kriging system
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
zinterp: float
kriging estimate at the specified point
sigmasq: float
mean square error of the kriging estimate
"""
zero_index = None
zero_value = False
# calculate distance between points... need a square distance matrix
# of inter-measurement-point distances and a vector of distances between
# measurement points (X) and the kriging point (coords)
if coordinates_type == 'euclidean':
d = squareform(pdist(X, metric='euclidean'))
bd = np.squeeze(cdist(X, coords[None, :], metric='euclidean'))
# geographic coordinate distances still calculated in the old way...
# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat
# also assume problem is 2D; check done earlier in initializing variogram
elif coordinates_type == 'geographic':
x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True)
y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True)
d = great_circle_distance(x1, y1, x2, y2)
bd = great_circle_distance(X[:, 0], X[:, 1],
coords[0] * np.ones(X.shape[0]),
coords[1] * np.ones(X.shape[0]))
# this check is done when initializing variogram, but kept here anyways...
else:
raise ValueError("Specified coordinate type '%s' "
"is not supported." % coordinates_type)
# check if kriging point overlaps with measurement point
if np.any(np.absolute(bd) <= 1e-10):
zero_value = True
zero_index = np.where(bd <= 1e-10)[0][0]
# set up kriging matrix
n = X.shape[0]
a = np.zeros((n+1, n+1))
a[:n, :n] = - variogram_function(variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
# set up RHS
b = np.zeros((n+1, 1))
b[:n, 0] = - variogram_function(variogram_model_parameters, bd)
if zero_value:
b[zero_index, 0] = 0.0
b[n, 0] = 1.0
# solve
res = np.linalg.solve(a, b)
zinterp = np.sum(res[:n, 0] * y)
sigmasq = np.sum(res[:, 0] * -b[:, 0])
return zinterp, sigmasq
|
[
"def",
"_krige",
"(",
"X",
",",
"y",
",",
"coords",
",",
"variogram_function",
",",
"variogram_model_parameters",
",",
"coordinates_type",
")",
":",
"zero_index",
"=",
"None",
"zero_value",
"=",
"False",
"# calculate distance between points... need a square distance matrix",
"# of inter-measurement-point distances and a vector of distances between",
"# measurement points (X) and the kriging point (coords)",
"if",
"coordinates_type",
"==",
"'euclidean'",
":",
"d",
"=",
"squareform",
"(",
"pdist",
"(",
"X",
",",
"metric",
"=",
"'euclidean'",
")",
")",
"bd",
"=",
"np",
".",
"squeeze",
"(",
"cdist",
"(",
"X",
",",
"coords",
"[",
"None",
",",
":",
"]",
",",
"metric",
"=",
"'euclidean'",
")",
")",
"# geographic coordinate distances still calculated in the old way...",
"# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat",
"# also assume problem is 2D; check done earlier in initializing variogram",
"elif",
"coordinates_type",
"==",
"'geographic'",
":",
"x1",
",",
"x2",
"=",
"np",
".",
"meshgrid",
"(",
"X",
"[",
":",
",",
"0",
"]",
",",
"X",
"[",
":",
",",
"0",
"]",
",",
"sparse",
"=",
"True",
")",
"y1",
",",
"y2",
"=",
"np",
".",
"meshgrid",
"(",
"X",
"[",
":",
",",
"1",
"]",
",",
"X",
"[",
":",
",",
"1",
"]",
",",
"sparse",
"=",
"True",
")",
"d",
"=",
"great_circle_distance",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
"bd",
"=",
"great_circle_distance",
"(",
"X",
"[",
":",
",",
"0",
"]",
",",
"X",
"[",
":",
",",
"1",
"]",
",",
"coords",
"[",
"0",
"]",
"*",
"np",
".",
"ones",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
",",
"coords",
"[",
"1",
"]",
"*",
"np",
".",
"ones",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
")",
"# this check is done when initializing variogram, but kept here anyways...",
"else",
":",
"raise",
"ValueError",
"(",
"\"Specified coordinate type '%s' \"",
"\"is not supported.\"",
"%",
"coordinates_type",
")",
"# check if kriging point overlaps with measurement point",
"if",
"np",
".",
"any",
"(",
"np",
".",
"absolute",
"(",
"bd",
")",
"<=",
"1e-10",
")",
":",
"zero_value",
"=",
"True",
"zero_index",
"=",
"np",
".",
"where",
"(",
"bd",
"<=",
"1e-10",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# set up kriging matrix",
"n",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"a",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
"+",
"1",
",",
"n",
"+",
"1",
")",
")",
"a",
"[",
":",
"n",
",",
":",
"n",
"]",
"=",
"-",
"variogram_function",
"(",
"variogram_model_parameters",
",",
"d",
")",
"np",
".",
"fill_diagonal",
"(",
"a",
",",
"0.0",
")",
"a",
"[",
"n",
",",
":",
"]",
"=",
"1.0",
"a",
"[",
":",
",",
"n",
"]",
"=",
"1.0",
"a",
"[",
"n",
",",
"n",
"]",
"=",
"0.0",
"# set up RHS",
"b",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
"+",
"1",
",",
"1",
")",
")",
"b",
"[",
":",
"n",
",",
"0",
"]",
"=",
"-",
"variogram_function",
"(",
"variogram_model_parameters",
",",
"bd",
")",
"if",
"zero_value",
":",
"b",
"[",
"zero_index",
",",
"0",
"]",
"=",
"0.0",
"b",
"[",
"n",
",",
"0",
"]",
"=",
"1.0",
"# solve",
"res",
"=",
"np",
".",
"linalg",
".",
"solve",
"(",
"a",
",",
"b",
")",
"zinterp",
"=",
"np",
".",
"sum",
"(",
"res",
"[",
":",
"n",
",",
"0",
"]",
"*",
"y",
")",
"sigmasq",
"=",
"np",
".",
"sum",
"(",
"res",
"[",
":",
",",
"0",
"]",
"*",
"-",
"b",
"[",
":",
",",
"0",
"]",
")",
"return",
"zinterp",
",",
"sigmasq"
] |
Sets up and solves the ordinary kriging system for the given
coordinate pair. This function is only used for the statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
coords: ndarray
float array [1, n_dim], point at which to evaluate the kriging system
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
zinterp: float
kriging estimate at the specified point
sigmasq: float
mean square error of the kriging estimate
|
[
"Sets",
"up",
"and",
"solves",
"the",
"ordinary",
"kriging",
"system",
"for",
"the",
"given",
"coordinate",
"pair",
".",
"This",
"function",
"is",
"only",
"used",
"for",
"the",
"statistics",
"calculations",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/core.py#L585-L666
|
train
|
bsmurphy/PyKrige
|
pykrige/core.py
|
_find_statistics
|
def _find_statistics(X, y, variogram_function,
variogram_model_parameters, coordinates_type):
"""Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
"""
delta = np.zeros(y.shape)
sigma = np.zeros(y.shape)
for i in range(y.shape[0]):
# skip the first value in the kriging problem
if i == 0:
continue
else:
k, ss = _krige(X[:i, :], y[:i], X[i, :], variogram_function,
variogram_model_parameters, coordinates_type)
# if the estimation error is zero, it's probably because
# the evaluation point X[i, :] is really close to one of the
# kriging system points in X[:i, :]...
# in the case of zero estimation error, the results are not stored
if np.absolute(ss) < eps:
continue
delta[i] = y[i] - k
sigma[i] = np.sqrt(ss)
# only use non-zero entries in these arrays... sigma is used to pull out
# non-zero entries in both cases because it is guaranteed to be positive,
# whereas delta can be either positive or negative
delta = delta[sigma > eps]
sigma = sigma[sigma > eps]
epsilon = delta/sigma
return delta, sigma, epsilon
|
python
|
def _find_statistics(X, y, variogram_function,
variogram_model_parameters, coordinates_type):
"""Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
"""
delta = np.zeros(y.shape)
sigma = np.zeros(y.shape)
for i in range(y.shape[0]):
# skip the first value in the kriging problem
if i == 0:
continue
else:
k, ss = _krige(X[:i, :], y[:i], X[i, :], variogram_function,
variogram_model_parameters, coordinates_type)
# if the estimation error is zero, it's probably because
# the evaluation point X[i, :] is really close to one of the
# kriging system points in X[:i, :]...
# in the case of zero estimation error, the results are not stored
if np.absolute(ss) < eps:
continue
delta[i] = y[i] - k
sigma[i] = np.sqrt(ss)
# only use non-zero entries in these arrays... sigma is used to pull out
# non-zero entries in both cases because it is guaranteed to be positive,
# whereas delta can be either positive or negative
delta = delta[sigma > eps]
sigma = sigma[sigma > eps]
epsilon = delta/sigma
return delta, sigma, epsilon
|
[
"def",
"_find_statistics",
"(",
"X",
",",
"y",
",",
"variogram_function",
",",
"variogram_model_parameters",
",",
"coordinates_type",
")",
":",
"delta",
"=",
"np",
".",
"zeros",
"(",
"y",
".",
"shape",
")",
"sigma",
"=",
"np",
".",
"zeros",
"(",
"y",
".",
"shape",
")",
"for",
"i",
"in",
"range",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# skip the first value in the kriging problem",
"if",
"i",
"==",
"0",
":",
"continue",
"else",
":",
"k",
",",
"ss",
"=",
"_krige",
"(",
"X",
"[",
":",
"i",
",",
":",
"]",
",",
"y",
"[",
":",
"i",
"]",
",",
"X",
"[",
"i",
",",
":",
"]",
",",
"variogram_function",
",",
"variogram_model_parameters",
",",
"coordinates_type",
")",
"# if the estimation error is zero, it's probably because",
"# the evaluation point X[i, :] is really close to one of the",
"# kriging system points in X[:i, :]...",
"# in the case of zero estimation error, the results are not stored",
"if",
"np",
".",
"absolute",
"(",
"ss",
")",
"<",
"eps",
":",
"continue",
"delta",
"[",
"i",
"]",
"=",
"y",
"[",
"i",
"]",
"-",
"k",
"sigma",
"[",
"i",
"]",
"=",
"np",
".",
"sqrt",
"(",
"ss",
")",
"# only use non-zero entries in these arrays... sigma is used to pull out",
"# non-zero entries in both cases because it is guaranteed to be positive,",
"# whereas delta can be either positive or negative",
"delta",
"=",
"delta",
"[",
"sigma",
">",
"eps",
"]",
"sigma",
"=",
"sigma",
"[",
"sigma",
">",
"eps",
"]",
"epsilon",
"=",
"delta",
"/",
"sigma",
"return",
"delta",
",",
"sigma",
",",
"epsilon"
] |
Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
|
[
"Calculates",
"variogram",
"fit",
"statistics",
".",
"Returns",
"the",
"delta",
"sigma",
"and",
"epsilon",
"values",
"for",
"the",
"variogram",
"fit",
".",
"These",
"arrays",
"are",
"used",
"for",
"statistics",
"calculations",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/core.py#L669-L729
|
train
|
bsmurphy/PyKrige
|
pykrige/rk.py
|
RegressionKriging.fit
|
def fit(self, p, x, y):
"""
fit the regression method and also Krige the residual
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example 2d regression kriging.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
"""
self.regression_model.fit(p, y)
ml_pred = self.regression_model.predict(p)
print('Finished learning regression model')
# residual=y-ml_pred
self.krige.fit(x=x, y=y - ml_pred)
print('Finished kriging residuals')
|
python
|
def fit(self, p, x, y):
"""
fit the regression method and also Krige the residual
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example 2d regression kriging.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
"""
self.regression_model.fit(p, y)
ml_pred = self.regression_model.predict(p)
print('Finished learning regression model')
# residual=y-ml_pred
self.krige.fit(x=x, y=y - ml_pred)
print('Finished kriging residuals')
|
[
"def",
"fit",
"(",
"self",
",",
"p",
",",
"x",
",",
"y",
")",
":",
"self",
".",
"regression_model",
".",
"fit",
"(",
"p",
",",
"y",
")",
"ml_pred",
"=",
"self",
".",
"regression_model",
".",
"predict",
"(",
"p",
")",
"print",
"(",
"'Finished learning regression model'",
")",
"# residual=y-ml_pred",
"self",
".",
"krige",
".",
"fit",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
"-",
"ml_pred",
")",
"print",
"(",
"'Finished kriging residuals'",
")"
] |
fit the regression method and also Krige the residual
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example 2d regression kriging.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
|
[
"fit",
"the",
"regression",
"method",
"and",
"also",
"Krige",
"the",
"residual"
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/rk.py#L195-L216
|
train
|
bsmurphy/PyKrige
|
pykrige/rk.py
|
RegressionKriging.score
|
def score(self, p, x, y, sample_weight=None):
"""
Overloading default regression score method
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
"""
return r2_score(y_pred=self.predict(p, x),
y_true=y,
sample_weight=sample_weight)
|
python
|
def score(self, p, x, y, sample_weight=None):
"""
Overloading default regression score method
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
"""
return r2_score(y_pred=self.predict(p, x),
y_true=y,
sample_weight=sample_weight)
|
[
"def",
"score",
"(",
"self",
",",
"p",
",",
"x",
",",
"y",
",",
"sample_weight",
"=",
"None",
")",
":",
"return",
"r2_score",
"(",
"y_pred",
"=",
"self",
".",
"predict",
"(",
"p",
",",
"x",
")",
",",
"y_true",
"=",
"y",
",",
"sample_weight",
"=",
"sample_weight",
")"
] |
Overloading default regression score method
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
|
[
"Overloading",
"default",
"regression",
"score",
"method"
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/rk.py#L254-L273
|
train
|
bsmurphy/PyKrige
|
pykrige/ok3d.py
|
OrdinaryKriging3D._exec_loop_moving_window
|
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
"""Solves the kriging system by looping over all specified points.
Uses only a certain number of closest points. Not very memory intensive,
but the loop is done in pure Python.
"""
import scipy.linalg.lapack
npt = bd_all.shape[0]
n = bd_idx.shape[1]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
for i in np.nonzero(~mask)[0]:
b_selector = bd_idx[i]
bd = bd_all[i]
a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
a = a_all[a_selector[:, None], a_selector]
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = scipy.linalg.solve(a, b)
kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector])
sigmasq[i] = - x[:, 0].dot(b[:, 0])
return kvalues, sigmasq
|
python
|
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
"""Solves the kriging system by looping over all specified points.
Uses only a certain number of closest points. Not very memory intensive,
but the loop is done in pure Python.
"""
import scipy.linalg.lapack
npt = bd_all.shape[0]
n = bd_idx.shape[1]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
for i in np.nonzero(~mask)[0]:
b_selector = bd_idx[i]
bd = bd_all[i]
a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
a = a_all[a_selector[:, None], a_selector]
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = scipy.linalg.solve(a, b)
kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector])
sigmasq[i] = - x[:, 0].dot(b[:, 0])
return kvalues, sigmasq
|
[
"def",
"_exec_loop_moving_window",
"(",
"self",
",",
"a_all",
",",
"bd_all",
",",
"mask",
",",
"bd_idx",
")",
":",
"import",
"scipy",
".",
"linalg",
".",
"lapack",
"npt",
"=",
"bd_all",
".",
"shape",
"[",
"0",
"]",
"n",
"=",
"bd_idx",
".",
"shape",
"[",
"1",
"]",
"kvalues",
"=",
"np",
".",
"zeros",
"(",
"npt",
")",
"sigmasq",
"=",
"np",
".",
"zeros",
"(",
"npt",
")",
"for",
"i",
"in",
"np",
".",
"nonzero",
"(",
"~",
"mask",
")",
"[",
"0",
"]",
":",
"b_selector",
"=",
"bd_idx",
"[",
"i",
"]",
"bd",
"=",
"bd_all",
"[",
"i",
"]",
"a_selector",
"=",
"np",
".",
"concatenate",
"(",
"(",
"b_selector",
",",
"np",
".",
"array",
"(",
"[",
"a_all",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
"]",
")",
")",
")",
"a",
"=",
"a_all",
"[",
"a_selector",
"[",
":",
",",
"None",
"]",
",",
"a_selector",
"]",
"if",
"np",
".",
"any",
"(",
"np",
".",
"absolute",
"(",
"bd",
")",
"<=",
"self",
".",
"eps",
")",
":",
"zero_value",
"=",
"True",
"zero_index",
"=",
"np",
".",
"where",
"(",
"np",
".",
"absolute",
"(",
"bd",
")",
"<=",
"self",
".",
"eps",
")",
"else",
":",
"zero_value",
"=",
"False",
"zero_index",
"=",
"None",
"b",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
"+",
"1",
",",
"1",
")",
")",
"b",
"[",
":",
"n",
",",
"0",
"]",
"=",
"-",
"self",
".",
"variogram_function",
"(",
"self",
".",
"variogram_model_parameters",
",",
"bd",
")",
"if",
"zero_value",
":",
"b",
"[",
"zero_index",
"[",
"0",
"]",
",",
"0",
"]",
"=",
"0.0",
"b",
"[",
"n",
",",
"0",
"]",
"=",
"1.0",
"x",
"=",
"scipy",
".",
"linalg",
".",
"solve",
"(",
"a",
",",
"b",
")",
"kvalues",
"[",
"i",
"]",
"=",
"x",
"[",
":",
"n",
",",
"0",
"]",
".",
"dot",
"(",
"self",
".",
"VALUES",
"[",
"b_selector",
"]",
")",
"sigmasq",
"[",
"i",
"]",
"=",
"-",
"x",
"[",
":",
",",
"0",
"]",
".",
"dot",
"(",
"b",
"[",
":",
",",
"0",
"]",
")",
"return",
"kvalues",
",",
"sigmasq"
] |
Solves the kriging system by looping over all specified points.
Uses only a certain number of closest points. Not very memory intensive,
but the loop is done in pure Python.
|
[
"Solves",
"the",
"kriging",
"system",
"by",
"looping",
"over",
"all",
"specified",
"points",
".",
"Uses",
"only",
"a",
"certain",
"number",
"of",
"closest",
"points",
".",
"Not",
"very",
"memory",
"intensive",
"but",
"the",
"loop",
"is",
"done",
"in",
"pure",
"Python",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/ok3d.py#L524-L560
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/debug/util.py
|
get_frame_info
|
def get_frame_info(tb, context_lines=7, simple=False):
"""
Return a dict of information about a given traceback.
"""
# line numbers / function / variables
lineno = tb.tb_lineno
function = tb.tb_frame.f_code.co_name
variables = tb.tb_frame.f_locals
files = {}
# get filename
if simple:
fn = tb.tb_frame.f_code.co_filename
else:
fn = tb.tb_frame.f_globals.get('__file__')
if not fn:
fn = os.path.realpath(inspect.getsourcefile(tb) or
inspect.getfile(tb))
if fn[-4:] in ('.pyc', '.pyo'):
fn = fn[:-1]
#if filename is existed, then just read the file
# get loader
loader = None
if not os.path.exists(fn):
loader = tb.tb_frame.f_globals.get('__loader__')
while not loader and tb.tb_next:
tb = tb.tb_next
loader = tb.tb_frame.f_globals.get('__loader__')
# sourcecode
source = ''
pre_context, post_context = [], []
context_line = raw_context_line = context_lineno = None
try:
if loader:
source = loader.get_source(fn)
else:
if not fn in files:
source = open(fn).read()
files[fn] = source
else:
source = files[fn]
except:
pass
else:
try:
raw_context_line = source.splitlines()[lineno - 1].strip()
except IndexError:
pass
if not simple:
parsed_source = highlight_python(source)
lbound = max(0, lineno - context_lines - 1)
ubound = lineno + context_lines
try:
context_line = parsed_source[lineno - 1]
pre_context = parsed_source[lbound:lineno - 1]
post_context = parsed_source[lineno:ubound]
except IndexError as e:
pass
context_lineno = lbound
if isinstance(fn, unicode):
fn = fn.encode('utf-8')
return {
'tb': tb,
'filename': fn,
'basename': os.path.basename(fn),
'loader': loader,
'function': function,
'lineno': lineno,
'vars': variables,
'pre_context': pre_context,
'context_line': context_line,
'raw_context_line': raw_context_line,
'post_context': post_context,
'context_lineno': context_lineno,
'source': source
}
|
python
|
def get_frame_info(tb, context_lines=7, simple=False):
"""
Return a dict of information about a given traceback.
"""
# line numbers / function / variables
lineno = tb.tb_lineno
function = tb.tb_frame.f_code.co_name
variables = tb.tb_frame.f_locals
files = {}
# get filename
if simple:
fn = tb.tb_frame.f_code.co_filename
else:
fn = tb.tb_frame.f_globals.get('__file__')
if not fn:
fn = os.path.realpath(inspect.getsourcefile(tb) or
inspect.getfile(tb))
if fn[-4:] in ('.pyc', '.pyo'):
fn = fn[:-1]
#if filename is existed, then just read the file
# get loader
loader = None
if not os.path.exists(fn):
loader = tb.tb_frame.f_globals.get('__loader__')
while not loader and tb.tb_next:
tb = tb.tb_next
loader = tb.tb_frame.f_globals.get('__loader__')
# sourcecode
source = ''
pre_context, post_context = [], []
context_line = raw_context_line = context_lineno = None
try:
if loader:
source = loader.get_source(fn)
else:
if not fn in files:
source = open(fn).read()
files[fn] = source
else:
source = files[fn]
except:
pass
else:
try:
raw_context_line = source.splitlines()[lineno - 1].strip()
except IndexError:
pass
if not simple:
parsed_source = highlight_python(source)
lbound = max(0, lineno - context_lines - 1)
ubound = lineno + context_lines
try:
context_line = parsed_source[lineno - 1]
pre_context = parsed_source[lbound:lineno - 1]
post_context = parsed_source[lineno:ubound]
except IndexError as e:
pass
context_lineno = lbound
if isinstance(fn, unicode):
fn = fn.encode('utf-8')
return {
'tb': tb,
'filename': fn,
'basename': os.path.basename(fn),
'loader': loader,
'function': function,
'lineno': lineno,
'vars': variables,
'pre_context': pre_context,
'context_line': context_line,
'raw_context_line': raw_context_line,
'post_context': post_context,
'context_lineno': context_lineno,
'source': source
}
|
[
"def",
"get_frame_info",
"(",
"tb",
",",
"context_lines",
"=",
"7",
",",
"simple",
"=",
"False",
")",
":",
"# line numbers / function / variables",
"lineno",
"=",
"tb",
".",
"tb_lineno",
"function",
"=",
"tb",
".",
"tb_frame",
".",
"f_code",
".",
"co_name",
"variables",
"=",
"tb",
".",
"tb_frame",
".",
"f_locals",
"files",
"=",
"{",
"}",
"# get filename",
"if",
"simple",
":",
"fn",
"=",
"tb",
".",
"tb_frame",
".",
"f_code",
".",
"co_filename",
"else",
":",
"fn",
"=",
"tb",
".",
"tb_frame",
".",
"f_globals",
".",
"get",
"(",
"'__file__'",
")",
"if",
"not",
"fn",
":",
"fn",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"inspect",
".",
"getsourcefile",
"(",
"tb",
")",
"or",
"inspect",
".",
"getfile",
"(",
"tb",
")",
")",
"if",
"fn",
"[",
"-",
"4",
":",
"]",
"in",
"(",
"'.pyc'",
",",
"'.pyo'",
")",
":",
"fn",
"=",
"fn",
"[",
":",
"-",
"1",
"]",
"#if filename is existed, then just read the file",
"# get loader",
"loader",
"=",
"None",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fn",
")",
":",
"loader",
"=",
"tb",
".",
"tb_frame",
".",
"f_globals",
".",
"get",
"(",
"'__loader__'",
")",
"while",
"not",
"loader",
"and",
"tb",
".",
"tb_next",
":",
"tb",
"=",
"tb",
".",
"tb_next",
"loader",
"=",
"tb",
".",
"tb_frame",
".",
"f_globals",
".",
"get",
"(",
"'__loader__'",
")",
"# sourcecode",
"source",
"=",
"''",
"pre_context",
",",
"post_context",
"=",
"[",
"]",
",",
"[",
"]",
"context_line",
"=",
"raw_context_line",
"=",
"context_lineno",
"=",
"None",
"try",
":",
"if",
"loader",
":",
"source",
"=",
"loader",
".",
"get_source",
"(",
"fn",
")",
"else",
":",
"if",
"not",
"fn",
"in",
"files",
":",
"source",
"=",
"open",
"(",
"fn",
")",
".",
"read",
"(",
")",
"files",
"[",
"fn",
"]",
"=",
"source",
"else",
":",
"source",
"=",
"files",
"[",
"fn",
"]",
"except",
":",
"pass",
"else",
":",
"try",
":",
"raw_context_line",
"=",
"source",
".",
"splitlines",
"(",
")",
"[",
"lineno",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"except",
"IndexError",
":",
"pass",
"if",
"not",
"simple",
":",
"parsed_source",
"=",
"highlight_python",
"(",
"source",
")",
"lbound",
"=",
"max",
"(",
"0",
",",
"lineno",
"-",
"context_lines",
"-",
"1",
")",
"ubound",
"=",
"lineno",
"+",
"context_lines",
"try",
":",
"context_line",
"=",
"parsed_source",
"[",
"lineno",
"-",
"1",
"]",
"pre_context",
"=",
"parsed_source",
"[",
"lbound",
":",
"lineno",
"-",
"1",
"]",
"post_context",
"=",
"parsed_source",
"[",
"lineno",
":",
"ubound",
"]",
"except",
"IndexError",
"as",
"e",
":",
"pass",
"context_lineno",
"=",
"lbound",
"if",
"isinstance",
"(",
"fn",
",",
"unicode",
")",
":",
"fn",
"=",
"fn",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"{",
"'tb'",
":",
"tb",
",",
"'filename'",
":",
"fn",
",",
"'basename'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"fn",
")",
",",
"'loader'",
":",
"loader",
",",
"'function'",
":",
"function",
",",
"'lineno'",
":",
"lineno",
",",
"'vars'",
":",
"variables",
",",
"'pre_context'",
":",
"pre_context",
",",
"'context_line'",
":",
"context_line",
",",
"'raw_context_line'",
":",
"raw_context_line",
",",
"'post_context'",
":",
"post_context",
",",
"'context_lineno'",
":",
"context_lineno",
",",
"'source'",
":",
"source",
"}"
] |
Return a dict of information about a given traceback.
|
[
"Return",
"a",
"dict",
"of",
"information",
"about",
"a",
"given",
"traceback",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/debug/util.py#L204-L284
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/debug/util.py
|
PythonParser.get_html_output
|
def get_html_output(self):
""" Return line generator. """
def html_splitlines(lines):
# this cool function was taken from trac.
# http://projects.edgewall.com/trac/
open_tag_re = re.compile(r'<(\w+)(\s.*)?[^/]?>')
close_tag_re = re.compile(r'</(\w+)>')
open_tags = []
for line in lines:
for tag in open_tags:
line = tag.group(0) + line
open_tags = []
for tag in open_tag_re.finditer(line):
open_tags.append(tag)
open_tags.reverse()
for ctag in close_tag_re.finditer(line):
for otag in open_tags:
if otag.group(1) == ctag.group(1):
open_tags.remove(otag)
break
for tag in open_tags:
line += '</%s>' % tag.group(1)
yield line
if self.error:
return escape(self.raw).splitlines()
return list(html_splitlines(self.out.getvalue().splitlines()))
|
python
|
def get_html_output(self):
""" Return line generator. """
def html_splitlines(lines):
# this cool function was taken from trac.
# http://projects.edgewall.com/trac/
open_tag_re = re.compile(r'<(\w+)(\s.*)?[^/]?>')
close_tag_re = re.compile(r'</(\w+)>')
open_tags = []
for line in lines:
for tag in open_tags:
line = tag.group(0) + line
open_tags = []
for tag in open_tag_re.finditer(line):
open_tags.append(tag)
open_tags.reverse()
for ctag in close_tag_re.finditer(line):
for otag in open_tags:
if otag.group(1) == ctag.group(1):
open_tags.remove(otag)
break
for tag in open_tags:
line += '</%s>' % tag.group(1)
yield line
if self.error:
return escape(self.raw).splitlines()
return list(html_splitlines(self.out.getvalue().splitlines()))
|
[
"def",
"get_html_output",
"(",
"self",
")",
":",
"def",
"html_splitlines",
"(",
"lines",
")",
":",
"# this cool function was taken from trac.",
"# http://projects.edgewall.com/trac/",
"open_tag_re",
"=",
"re",
".",
"compile",
"(",
"r'<(\\w+)(\\s.*)?[^/]?>'",
")",
"close_tag_re",
"=",
"re",
".",
"compile",
"(",
"r'</(\\w+)>'",
")",
"open_tags",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"for",
"tag",
"in",
"open_tags",
":",
"line",
"=",
"tag",
".",
"group",
"(",
"0",
")",
"+",
"line",
"open_tags",
"=",
"[",
"]",
"for",
"tag",
"in",
"open_tag_re",
".",
"finditer",
"(",
"line",
")",
":",
"open_tags",
".",
"append",
"(",
"tag",
")",
"open_tags",
".",
"reverse",
"(",
")",
"for",
"ctag",
"in",
"close_tag_re",
".",
"finditer",
"(",
"line",
")",
":",
"for",
"otag",
"in",
"open_tags",
":",
"if",
"otag",
".",
"group",
"(",
"1",
")",
"==",
"ctag",
".",
"group",
"(",
"1",
")",
":",
"open_tags",
".",
"remove",
"(",
"otag",
")",
"break",
"for",
"tag",
"in",
"open_tags",
":",
"line",
"+=",
"'</%s>'",
"%",
"tag",
".",
"group",
"(",
"1",
")",
"yield",
"line",
"if",
"self",
".",
"error",
":",
"return",
"escape",
"(",
"self",
".",
"raw",
")",
".",
"splitlines",
"(",
")",
"return",
"list",
"(",
"html_splitlines",
"(",
"self",
".",
"out",
".",
"getvalue",
"(",
")",
".",
"splitlines",
"(",
")",
")",
")"
] |
Return line generator.
|
[
"Return",
"line",
"generator",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/debug/util.py#L148-L174
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
get_columns
|
def get_columns(model=None, fields=None, meta=None):
"""
Get model columns list
"""
if model:
M = get_model(model)
else:
M = None
if fields is not None:
f = fields
if M:
if meta and hasattr(M, meta):
m = getattr(model, meta)
if hasattr(m, 'fields'):
f = m.fields
else:
f = M._fields_list
else:
f = M._fields_list
columns = []
for x in f:
if isinstance(x, str): # so x is field_name
field_name = x
elif isinstance(x, dict):
field_name = x['name']
else:
raise UliwebError("Field definition is not right, it should be just like str or {'name':xxx}")
if '.' in field_name:
model_name, field_name = field_name.split('.')
M = get_model(model_name)
if not M:
raise UliwebError("Model can't be empty, because field name not has `model.` prefix")
if field_name in M.c:
columns.append(M.c[field_name])
return columns
|
python
|
def get_columns(model=None, fields=None, meta=None):
"""
Get model columns list
"""
if model:
M = get_model(model)
else:
M = None
if fields is not None:
f = fields
if M:
if meta and hasattr(M, meta):
m = getattr(model, meta)
if hasattr(m, 'fields'):
f = m.fields
else:
f = M._fields_list
else:
f = M._fields_list
columns = []
for x in f:
if isinstance(x, str): # so x is field_name
field_name = x
elif isinstance(x, dict):
field_name = x['name']
else:
raise UliwebError("Field definition is not right, it should be just like str or {'name':xxx}")
if '.' in field_name:
model_name, field_name = field_name.split('.')
M = get_model(model_name)
if not M:
raise UliwebError("Model can't be empty, because field name not has `model.` prefix")
if field_name in M.c:
columns.append(M.c[field_name])
return columns
|
[
"def",
"get_columns",
"(",
"model",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"meta",
"=",
"None",
")",
":",
"if",
"model",
":",
"M",
"=",
"get_model",
"(",
"model",
")",
"else",
":",
"M",
"=",
"None",
"if",
"fields",
"is",
"not",
"None",
":",
"f",
"=",
"fields",
"if",
"M",
":",
"if",
"meta",
"and",
"hasattr",
"(",
"M",
",",
"meta",
")",
":",
"m",
"=",
"getattr",
"(",
"model",
",",
"meta",
")",
"if",
"hasattr",
"(",
"m",
",",
"'fields'",
")",
":",
"f",
"=",
"m",
".",
"fields",
"else",
":",
"f",
"=",
"M",
".",
"_fields_list",
"else",
":",
"f",
"=",
"M",
".",
"_fields_list",
"columns",
"=",
"[",
"]",
"for",
"x",
"in",
"f",
":",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"# so x is field_name\r",
"field_name",
"=",
"x",
"elif",
"isinstance",
"(",
"x",
",",
"dict",
")",
":",
"field_name",
"=",
"x",
"[",
"'name'",
"]",
"else",
":",
"raise",
"UliwebError",
"(",
"\"Field definition is not right, it should be just like str or {'name':xxx}\"",
")",
"if",
"'.'",
"in",
"field_name",
":",
"model_name",
",",
"field_name",
"=",
"field_name",
".",
"split",
"(",
"'.'",
")",
"M",
"=",
"get_model",
"(",
"model_name",
")",
"if",
"not",
"M",
":",
"raise",
"UliwebError",
"(",
"\"Model can't be empty, because field name not has `model.` prefix\"",
")",
"if",
"field_name",
"in",
"M",
".",
"c",
":",
"columns",
".",
"append",
"(",
"M",
".",
"c",
"[",
"field_name",
"]",
")",
"return",
"columns"
] |
Get model columns list
|
[
"Get",
"model",
"columns",
"list"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L381-L421
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
get_field
|
def get_field(name, model=None):
"""
get model field according to name, the name can be like `model.column`
"""
if '.' in name:
m, name = name.split('.')
model = get_model(m)
if model:
return getattr(model, name, None)
|
python
|
def get_field(name, model=None):
"""
get model field according to name, the name can be like `model.column`
"""
if '.' in name:
m, name = name.split('.')
model = get_model(m)
if model:
return getattr(model, name, None)
|
[
"def",
"get_field",
"(",
"name",
",",
"model",
"=",
"None",
")",
":",
"if",
"'.'",
"in",
"name",
":",
"m",
",",
"name",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"model",
"=",
"get_model",
"(",
"m",
")",
"if",
"model",
":",
"return",
"getattr",
"(",
"model",
",",
"name",
",",
"None",
")"
] |
get model field according to name, the name can be like `model.column`
|
[
"get",
"model",
"field",
"according",
"to",
"name",
"the",
"name",
"can",
"be",
"like",
"model",
".",
"column"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L497-L506
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
get_column
|
def get_column(name, model=None):
"""
get table column according to name, the name can be like `model.column`
"""
if '.' in name:
m, name = name.split('.')
model = get_model(m)
if model:
return model.c.get(name)
|
python
|
def get_column(name, model=None):
"""
get table column according to name, the name can be like `model.column`
"""
if '.' in name:
m, name = name.split('.')
model = get_model(m)
if model:
return model.c.get(name)
|
[
"def",
"get_column",
"(",
"name",
",",
"model",
"=",
"None",
")",
":",
"if",
"'.'",
"in",
"name",
":",
"m",
",",
"name",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"model",
"=",
"get_model",
"(",
"m",
")",
"if",
"model",
":",
"return",
"model",
".",
"c",
".",
"get",
"(",
"name",
")"
] |
get table column according to name, the name can be like `model.column`
|
[
"get",
"table",
"column",
"according",
"to",
"name",
"the",
"name",
"can",
"be",
"like",
"model",
".",
"column"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L508-L517
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
AddView._process_file
|
def _process_file(self, obj, fobj, field):
"""
obj is record object
fobj is data
field is FileField instance
"""
from uliweb import settings
paths = []
upload_to = self.upload_to or self._get_upload_path(field, 'upload_to', obj)
if upload_to:
self.fileserving.to_path = upload_to
upload_to_sub = self.upload_to_sub or self._get_upload_path(field, 'upload_to_sub', obj)
if upload_to_sub:
paths.append(upload_to_sub)
paths.append(fobj['filename'])
return self.fileserving.save_file(os.path.join(*paths),
fobj['file'], replace=self.file_replace,
convert=self.file_convert)
|
python
|
def _process_file(self, obj, fobj, field):
"""
obj is record object
fobj is data
field is FileField instance
"""
from uliweb import settings
paths = []
upload_to = self.upload_to or self._get_upload_path(field, 'upload_to', obj)
if upload_to:
self.fileserving.to_path = upload_to
upload_to_sub = self.upload_to_sub or self._get_upload_path(field, 'upload_to_sub', obj)
if upload_to_sub:
paths.append(upload_to_sub)
paths.append(fobj['filename'])
return self.fileserving.save_file(os.path.join(*paths),
fobj['file'], replace=self.file_replace,
convert=self.file_convert)
|
[
"def",
"_process_file",
"(",
"self",
",",
"obj",
",",
"fobj",
",",
"field",
")",
":",
"from",
"uliweb",
"import",
"settings",
"paths",
"=",
"[",
"]",
"upload_to",
"=",
"self",
".",
"upload_to",
"or",
"self",
".",
"_get_upload_path",
"(",
"field",
",",
"'upload_to'",
",",
"obj",
")",
"if",
"upload_to",
":",
"self",
".",
"fileserving",
".",
"to_path",
"=",
"upload_to",
"upload_to_sub",
"=",
"self",
".",
"upload_to_sub",
"or",
"self",
".",
"_get_upload_path",
"(",
"field",
",",
"'upload_to_sub'",
",",
"obj",
")",
"if",
"upload_to_sub",
":",
"paths",
".",
"append",
"(",
"upload_to_sub",
")",
"paths",
".",
"append",
"(",
"fobj",
"[",
"'filename'",
"]",
")",
"return",
"self",
".",
"fileserving",
".",
"save_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"*",
"paths",
")",
",",
"fobj",
"[",
"'file'",
"]",
",",
"replace",
"=",
"self",
".",
"file_replace",
",",
"convert",
"=",
"self",
".",
"file_convert",
")"
] |
obj is record object
fobj is data
field is FileField instance
|
[
"obj",
"is",
"record",
"object",
"fobj",
"is",
"data",
"field",
"is",
"FileField",
"instance"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L1060-L1079
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
SimpleListView.count
|
def count(self, query):
"""
If query is Select object, this function will try to get count of select
"""
if self.manual:
return self.total
if isinstance(query, Select):
q = query.with_only_columns([func.count()]).order_by(None).limit(None).offset(None)
return do_(q).scalar()
return query.count()
|
python
|
def count(self, query):
"""
If query is Select object, this function will try to get count of select
"""
if self.manual:
return self.total
if isinstance(query, Select):
q = query.with_only_columns([func.count()]).order_by(None).limit(None).offset(None)
return do_(q).scalar()
return query.count()
|
[
"def",
"count",
"(",
"self",
",",
"query",
")",
":",
"if",
"self",
".",
"manual",
":",
"return",
"self",
".",
"total",
"if",
"isinstance",
"(",
"query",
",",
"Select",
")",
":",
"q",
"=",
"query",
".",
"with_only_columns",
"(",
"[",
"func",
".",
"count",
"(",
")",
"]",
")",
".",
"order_by",
"(",
"None",
")",
".",
"limit",
"(",
"None",
")",
".",
"offset",
"(",
"None",
")",
"return",
"do_",
"(",
"q",
")",
".",
"scalar",
"(",
")",
"return",
"query",
".",
"count",
"(",
")"
] |
If query is Select object, this function will try to get count of select
|
[
"If",
"query",
"is",
"Select",
"object",
"this",
"function",
"will",
"try",
"to",
"get",
"count",
"of",
"select"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L1967-L1978
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
SimpleListView.get_data
|
def get_data(self, query, fields_convert_map, encoding='utf-8', auto_convert=True,
include_hidden=False, header=None):
"""
If convert=True, will convert field value
"""
fields_convert_map = fields_convert_map or {}
d = self.fields_convert_map.copy()
d.update(fields_convert_map)
if isinstance(query, Select):
query = do_(query)
# def get_value(name, value, record):
# convert = d.get(name)
# if convert:
# value = convert(value, record)
# return safe_unicode(value, encoding)
for record in query:
self._cal_sum(record)
row = []
record = self._get_record(record)
if self.before_record_render:
self.before_record_render(record)
if isinstance(record, orm.Model):
model = record.__class__
else:
model = None
for i, x in enumerate(self.table_info['fields_list']):
field = get_field(x['name'], model)
if not field:
field = {'name':x['name']}
else:
field = {'name':x['name'], 'prop':field}
if not include_hidden and x.get('hidden'):
continue
if isinstance(record, orm.Model):
v = make_view_field(field, record, fields_convert_map=d,
auto_convert=auto_convert)
else:
v = make_view_field(field, record, fields_convert_map=d,
auto_convert=auto_convert, value=record[x['name']])
value = v['display']
#value = safe_unicode(v['display'], encoding)
row.append(value)
if header:
ret = dict(zip(header, row))
else:
ret = row
yield ret
total = self._get_sum()
if total:
row = []
for x in total:
v = x
if isinstance(x, str):
v = safe_unicode(x, encoding)
row.append(v)
if header:
ret = dict(zip(header, row))
else:
ret = row
yield ret
|
python
|
def get_data(self, query, fields_convert_map, encoding='utf-8', auto_convert=True,
include_hidden=False, header=None):
"""
If convert=True, will convert field value
"""
fields_convert_map = fields_convert_map or {}
d = self.fields_convert_map.copy()
d.update(fields_convert_map)
if isinstance(query, Select):
query = do_(query)
# def get_value(name, value, record):
# convert = d.get(name)
# if convert:
# value = convert(value, record)
# return safe_unicode(value, encoding)
for record in query:
self._cal_sum(record)
row = []
record = self._get_record(record)
if self.before_record_render:
self.before_record_render(record)
if isinstance(record, orm.Model):
model = record.__class__
else:
model = None
for i, x in enumerate(self.table_info['fields_list']):
field = get_field(x['name'], model)
if not field:
field = {'name':x['name']}
else:
field = {'name':x['name'], 'prop':field}
if not include_hidden and x.get('hidden'):
continue
if isinstance(record, orm.Model):
v = make_view_field(field, record, fields_convert_map=d,
auto_convert=auto_convert)
else:
v = make_view_field(field, record, fields_convert_map=d,
auto_convert=auto_convert, value=record[x['name']])
value = v['display']
#value = safe_unicode(v['display'], encoding)
row.append(value)
if header:
ret = dict(zip(header, row))
else:
ret = row
yield ret
total = self._get_sum()
if total:
row = []
for x in total:
v = x
if isinstance(x, str):
v = safe_unicode(x, encoding)
row.append(v)
if header:
ret = dict(zip(header, row))
else:
ret = row
yield ret
|
[
"def",
"get_data",
"(",
"self",
",",
"query",
",",
"fields_convert_map",
",",
"encoding",
"=",
"'utf-8'",
",",
"auto_convert",
"=",
"True",
",",
"include_hidden",
"=",
"False",
",",
"header",
"=",
"None",
")",
":",
"fields_convert_map",
"=",
"fields_convert_map",
"or",
"{",
"}",
"d",
"=",
"self",
".",
"fields_convert_map",
".",
"copy",
"(",
")",
"d",
".",
"update",
"(",
"fields_convert_map",
")",
"if",
"isinstance",
"(",
"query",
",",
"Select",
")",
":",
"query",
"=",
"do_",
"(",
"query",
")",
"# def get_value(name, value, record):\r",
"# convert = d.get(name)\r",
"# if convert:\r",
"# value = convert(value, record)\r",
"# return safe_unicode(value, encoding)\r",
"for",
"record",
"in",
"query",
":",
"self",
".",
"_cal_sum",
"(",
"record",
")",
"row",
"=",
"[",
"]",
"record",
"=",
"self",
".",
"_get_record",
"(",
"record",
")",
"if",
"self",
".",
"before_record_render",
":",
"self",
".",
"before_record_render",
"(",
"record",
")",
"if",
"isinstance",
"(",
"record",
",",
"orm",
".",
"Model",
")",
":",
"model",
"=",
"record",
".",
"__class__",
"else",
":",
"model",
"=",
"None",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"self",
".",
"table_info",
"[",
"'fields_list'",
"]",
")",
":",
"field",
"=",
"get_field",
"(",
"x",
"[",
"'name'",
"]",
",",
"model",
")",
"if",
"not",
"field",
":",
"field",
"=",
"{",
"'name'",
":",
"x",
"[",
"'name'",
"]",
"}",
"else",
":",
"field",
"=",
"{",
"'name'",
":",
"x",
"[",
"'name'",
"]",
",",
"'prop'",
":",
"field",
"}",
"if",
"not",
"include_hidden",
"and",
"x",
".",
"get",
"(",
"'hidden'",
")",
":",
"continue",
"if",
"isinstance",
"(",
"record",
",",
"orm",
".",
"Model",
")",
":",
"v",
"=",
"make_view_field",
"(",
"field",
",",
"record",
",",
"fields_convert_map",
"=",
"d",
",",
"auto_convert",
"=",
"auto_convert",
")",
"else",
":",
"v",
"=",
"make_view_field",
"(",
"field",
",",
"record",
",",
"fields_convert_map",
"=",
"d",
",",
"auto_convert",
"=",
"auto_convert",
",",
"value",
"=",
"record",
"[",
"x",
"[",
"'name'",
"]",
"]",
")",
"value",
"=",
"v",
"[",
"'display'",
"]",
"#value = safe_unicode(v['display'], encoding)\r",
"row",
".",
"append",
"(",
"value",
")",
"if",
"header",
":",
"ret",
"=",
"dict",
"(",
"zip",
"(",
"header",
",",
"row",
")",
")",
"else",
":",
"ret",
"=",
"row",
"yield",
"ret",
"total",
"=",
"self",
".",
"_get_sum",
"(",
")",
"if",
"total",
":",
"row",
"=",
"[",
"]",
"for",
"x",
"in",
"total",
":",
"v",
"=",
"x",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"v",
"=",
"safe_unicode",
"(",
"x",
",",
"encoding",
")",
"row",
".",
"append",
"(",
"v",
")",
"if",
"header",
":",
"ret",
"=",
"dict",
"(",
"zip",
"(",
"header",
",",
"row",
")",
")",
"else",
":",
"ret",
"=",
"row",
"yield",
"ret"
] |
If convert=True, will convert field value
|
[
"If",
"convert",
"=",
"True",
"will",
"convert",
"field",
"value"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L2070-L2135
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
SimpleListView.objects
|
def objects(self, json_result=False):
"""
Return a generator of all processed data, it just like render
but it'll not return a table or json format data but just
data. And the data will be processed by fields_convert_map if passed.
"""
self.rows_num = 0
query = self.query()
if not isinstance(query, (orm.Result, list, dict)):
query = do_(query)
for record in query:
self.rows_num += 1
r = self.object(record, json_result)
self._cal_sum(record)
yield r
total = self._render_sum(True)
if total:
yield total
|
python
|
def objects(self, json_result=False):
"""
Return a generator of all processed data, it just like render
but it'll not return a table or json format data but just
data. And the data will be processed by fields_convert_map if passed.
"""
self.rows_num = 0
query = self.query()
if not isinstance(query, (orm.Result, list, dict)):
query = do_(query)
for record in query:
self.rows_num += 1
r = self.object(record, json_result)
self._cal_sum(record)
yield r
total = self._render_sum(True)
if total:
yield total
|
[
"def",
"objects",
"(",
"self",
",",
"json_result",
"=",
"False",
")",
":",
"self",
".",
"rows_num",
"=",
"0",
"query",
"=",
"self",
".",
"query",
"(",
")",
"if",
"not",
"isinstance",
"(",
"query",
",",
"(",
"orm",
".",
"Result",
",",
"list",
",",
"dict",
")",
")",
":",
"query",
"=",
"do_",
"(",
"query",
")",
"for",
"record",
"in",
"query",
":",
"self",
".",
"rows_num",
"+=",
"1",
"r",
"=",
"self",
".",
"object",
"(",
"record",
",",
"json_result",
")",
"self",
".",
"_cal_sum",
"(",
"record",
")",
"yield",
"r",
"total",
"=",
"self",
".",
"_render_sum",
"(",
"True",
")",
"if",
"total",
":",
"yield",
"total"
] |
Return a generator of all processed data, it just like render
but it'll not return a table or json format data but just
data. And the data will be processed by fields_convert_map if passed.
|
[
"Return",
"a",
"generator",
"of",
"all",
"processed",
"data",
"it",
"just",
"like",
"render",
"but",
"it",
"ll",
"not",
"return",
"a",
"table",
"or",
"json",
"format",
"data",
"but",
"just",
"data",
".",
"And",
"the",
"data",
"will",
"be",
"processed",
"by",
"fields_convert_map",
"if",
"passed",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L2275-L2292
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
ListView.query_all
|
def query_all(self):
"""
Query all records without limit and offset.
"""
return self.query_model(self.model, self.condition, order_by=self.order_by,
group_by=self.group_by, having=self.having)
|
python
|
def query_all(self):
"""
Query all records without limit and offset.
"""
return self.query_model(self.model, self.condition, order_by=self.order_by,
group_by=self.group_by, having=self.having)
|
[
"def",
"query_all",
"(",
"self",
")",
":",
"return",
"self",
".",
"query_model",
"(",
"self",
".",
"model",
",",
"self",
".",
"condition",
",",
"order_by",
"=",
"self",
".",
"order_by",
",",
"group_by",
"=",
"self",
".",
"group_by",
",",
"having",
"=",
"self",
".",
"having",
")"
] |
Query all records without limit and offset.
|
[
"Query",
"all",
"records",
"without",
"limit",
"and",
"offset",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L2636-L2641
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/contrib/sessions.py
|
ModificationTrackingDict.copy
|
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
|
python
|
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
|
[
"def",
"copy",
"(",
"self",
")",
":",
"missing",
"=",
"object",
"(",
")",
"result",
"=",
"object",
".",
"__new__",
"(",
"self",
".",
"__class__",
")",
"for",
"name",
"in",
"self",
".",
"__slots__",
":",
"val",
"=",
"getattr",
"(",
"self",
",",
"name",
",",
"missing",
")",
"if",
"val",
"is",
"not",
"missing",
":",
"setattr",
"(",
"result",
",",
"name",
",",
"val",
")",
"return",
"result"
] |
Create a flat copy of the dict.
|
[
"Create",
"a",
"flat",
"copy",
"of",
"the",
"dict",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/sessions.py#L100-L108
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/contrib/sessions.py
|
FilesystemSessionStore.list
|
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
|
python
|
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
|
[
"def",
"list",
"(",
"self",
")",
":",
"before",
",",
"after",
"=",
"self",
".",
"filename_template",
".",
"split",
"(",
"'%s'",
",",
"1",
")",
"filename_re",
"=",
"re",
".",
"compile",
"(",
"r'%s(.{5,})%s$'",
"%",
"(",
"re",
".",
"escape",
"(",
"before",
")",
",",
"re",
".",
"escape",
"(",
"after",
")",
")",
")",
"result",
"=",
"[",
"]",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"path",
")",
":",
"#: this is a session that is still being saved.",
"if",
"filename",
".",
"endswith",
"(",
"_fs_transaction_suffix",
")",
":",
"continue",
"match",
"=",
"filename_re",
".",
"match",
"(",
"filename",
")",
"if",
"match",
"is",
"not",
"None",
":",
"result",
".",
"append",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"return",
"result"
] |
Lists all sessions in the store.
.. versionadded:: 0.6
|
[
"Lists",
"all",
"sessions",
"in",
"the",
"store",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/sessions.py#L279-L295
|
train
|
limodou/uliweb
|
uliweb/utils/date.py
|
to_timezone
|
def to_timezone(dt, tzinfo=None):
"""
Convert a datetime to timezone
"""
if not dt:
return dt
tz = pick_timezone(tzinfo, __timezone__)
if not tz:
return dt
dttz = getattr(dt, 'tzinfo', None)
if not dttz:
return dt.replace(tzinfo=tz)
else:
return dt.astimezone(tz)
|
python
|
def to_timezone(dt, tzinfo=None):
"""
Convert a datetime to timezone
"""
if not dt:
return dt
tz = pick_timezone(tzinfo, __timezone__)
if not tz:
return dt
dttz = getattr(dt, 'tzinfo', None)
if not dttz:
return dt.replace(tzinfo=tz)
else:
return dt.astimezone(tz)
|
[
"def",
"to_timezone",
"(",
"dt",
",",
"tzinfo",
"=",
"None",
")",
":",
"if",
"not",
"dt",
":",
"return",
"dt",
"tz",
"=",
"pick_timezone",
"(",
"tzinfo",
",",
"__timezone__",
")",
"if",
"not",
"tz",
":",
"return",
"dt",
"dttz",
"=",
"getattr",
"(",
"dt",
",",
"'tzinfo'",
",",
"None",
")",
"if",
"not",
"dttz",
":",
"return",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"tz",
")",
"else",
":",
"return",
"dt",
".",
"astimezone",
"(",
"tz",
")"
] |
Convert a datetime to timezone
|
[
"Convert",
"a",
"datetime",
"to",
"timezone"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/date.py#L146-L159
|
train
|
limodou/uliweb
|
uliweb/utils/date.py
|
to_date
|
def to_date(dt, tzinfo=None, format=None):
"""
Convert a datetime to date with tzinfo
"""
d = to_datetime(dt, tzinfo, format)
if not d:
return d
return date(d.year, d.month, d.day)
|
python
|
def to_date(dt, tzinfo=None, format=None):
"""
Convert a datetime to date with tzinfo
"""
d = to_datetime(dt, tzinfo, format)
if not d:
return d
return date(d.year, d.month, d.day)
|
[
"def",
"to_date",
"(",
"dt",
",",
"tzinfo",
"=",
"None",
",",
"format",
"=",
"None",
")",
":",
"d",
"=",
"to_datetime",
"(",
"dt",
",",
"tzinfo",
",",
"format",
")",
"if",
"not",
"d",
":",
"return",
"d",
"return",
"date",
"(",
"d",
".",
"year",
",",
"d",
".",
"month",
",",
"d",
".",
"day",
")"
] |
Convert a datetime to date with tzinfo
|
[
"Convert",
"a",
"datetime",
"to",
"date",
"with",
"tzinfo"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/date.py#L161-L168
|
train
|
limodou/uliweb
|
uliweb/utils/date.py
|
to_time
|
def to_time(dt, tzinfo=None, format=None):
"""
Convert a datetime to time with tzinfo
"""
d = to_datetime(dt, tzinfo, format)
if not d:
return d
return time_(d.hour, d.minute, d.second, d.microsecond, tzinfo=d.tzinfo)
|
python
|
def to_time(dt, tzinfo=None, format=None):
"""
Convert a datetime to time with tzinfo
"""
d = to_datetime(dt, tzinfo, format)
if not d:
return d
return time_(d.hour, d.minute, d.second, d.microsecond, tzinfo=d.tzinfo)
|
[
"def",
"to_time",
"(",
"dt",
",",
"tzinfo",
"=",
"None",
",",
"format",
"=",
"None",
")",
":",
"d",
"=",
"to_datetime",
"(",
"dt",
",",
"tzinfo",
",",
"format",
")",
"if",
"not",
"d",
":",
"return",
"d",
"return",
"time_",
"(",
"d",
".",
"hour",
",",
"d",
".",
"minute",
",",
"d",
".",
"second",
",",
"d",
".",
"microsecond",
",",
"tzinfo",
"=",
"d",
".",
"tzinfo",
")"
] |
Convert a datetime to time with tzinfo
|
[
"Convert",
"a",
"datetime",
"to",
"time",
"with",
"tzinfo"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/date.py#L170-L177
|
train
|
limodou/uliweb
|
uliweb/utils/date.py
|
to_datetime
|
def to_datetime(dt, tzinfo=None, format=None):
"""
Convert a date or time to datetime with tzinfo
"""
if not dt:
return dt
tz = pick_timezone(tzinfo, __timezone__)
if isinstance(dt, (str, unicode)):
if not format:
formats = DEFAULT_DATETIME_INPUT_FORMATS
else:
formats = list(format)
d = None
for fmt in formats:
try:
d = datetime.strptime(dt, fmt)
except ValueError:
continue
if not d:
return None
d = d.replace(tzinfo=tz)
else:
d = datetime(getattr(dt, 'year', 1970), getattr(dt, 'month', 1),
getattr(dt, 'day', 1), getattr(dt, 'hour', 0), getattr(dt, 'minute', 0),
getattr(dt, 'second', 0), getattr(dt, 'microsecond', 0))
if not getattr(dt, 'tzinfo', None):
d = d.replace(tzinfo=tz)
else:
d = d.replace(tzinfo=dt.tzinfo)
return to_timezone(d, tzinfo)
|
python
|
def to_datetime(dt, tzinfo=None, format=None):
"""
Convert a date or time to datetime with tzinfo
"""
if not dt:
return dt
tz = pick_timezone(tzinfo, __timezone__)
if isinstance(dt, (str, unicode)):
if not format:
formats = DEFAULT_DATETIME_INPUT_FORMATS
else:
formats = list(format)
d = None
for fmt in formats:
try:
d = datetime.strptime(dt, fmt)
except ValueError:
continue
if not d:
return None
d = d.replace(tzinfo=tz)
else:
d = datetime(getattr(dt, 'year', 1970), getattr(dt, 'month', 1),
getattr(dt, 'day', 1), getattr(dt, 'hour', 0), getattr(dt, 'minute', 0),
getattr(dt, 'second', 0), getattr(dt, 'microsecond', 0))
if not getattr(dt, 'tzinfo', None):
d = d.replace(tzinfo=tz)
else:
d = d.replace(tzinfo=dt.tzinfo)
return to_timezone(d, tzinfo)
|
[
"def",
"to_datetime",
"(",
"dt",
",",
"tzinfo",
"=",
"None",
",",
"format",
"=",
"None",
")",
":",
"if",
"not",
"dt",
":",
"return",
"dt",
"tz",
"=",
"pick_timezone",
"(",
"tzinfo",
",",
"__timezone__",
")",
"if",
"isinstance",
"(",
"dt",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"if",
"not",
"format",
":",
"formats",
"=",
"DEFAULT_DATETIME_INPUT_FORMATS",
"else",
":",
"formats",
"=",
"list",
"(",
"format",
")",
"d",
"=",
"None",
"for",
"fmt",
"in",
"formats",
":",
"try",
":",
"d",
"=",
"datetime",
".",
"strptime",
"(",
"dt",
",",
"fmt",
")",
"except",
"ValueError",
":",
"continue",
"if",
"not",
"d",
":",
"return",
"None",
"d",
"=",
"d",
".",
"replace",
"(",
"tzinfo",
"=",
"tz",
")",
"else",
":",
"d",
"=",
"datetime",
"(",
"getattr",
"(",
"dt",
",",
"'year'",
",",
"1970",
")",
",",
"getattr",
"(",
"dt",
",",
"'month'",
",",
"1",
")",
",",
"getattr",
"(",
"dt",
",",
"'day'",
",",
"1",
")",
",",
"getattr",
"(",
"dt",
",",
"'hour'",
",",
"0",
")",
",",
"getattr",
"(",
"dt",
",",
"'minute'",
",",
"0",
")",
",",
"getattr",
"(",
"dt",
",",
"'second'",
",",
"0",
")",
",",
"getattr",
"(",
"dt",
",",
"'microsecond'",
",",
"0",
")",
")",
"if",
"not",
"getattr",
"(",
"dt",
",",
"'tzinfo'",
",",
"None",
")",
":",
"d",
"=",
"d",
".",
"replace",
"(",
"tzinfo",
"=",
"tz",
")",
"else",
":",
"d",
"=",
"d",
".",
"replace",
"(",
"tzinfo",
"=",
"dt",
".",
"tzinfo",
")",
"return",
"to_timezone",
"(",
"d",
",",
"tzinfo",
")"
] |
Convert a date or time to datetime with tzinfo
|
[
"Convert",
"a",
"date",
"or",
"time",
"to",
"datetime",
"with",
"tzinfo"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/date.py#L179-L210
|
train
|
limodou/uliweb
|
uliweb/utils/date.py
|
parse_time
|
def parse_time(t):
"""
Parse string time format to microsecond
"""
if isinstance(t, (str, unicode)):
b = re_time.match(t)
if b:
v, unit = int(b.group(1)), b.group(2)
if unit == 's':
return v*1000
elif unit == 'm':
return v*60*1000
elif unit == 'h':
return v*60*60*1000
else:
return v
else:
raise TimeFormatError(t)
elif isinstance(t, (int, long)):
return t
else:
raise TimeFormatError(t)
|
python
|
def parse_time(t):
"""
Parse string time format to microsecond
"""
if isinstance(t, (str, unicode)):
b = re_time.match(t)
if b:
v, unit = int(b.group(1)), b.group(2)
if unit == 's':
return v*1000
elif unit == 'm':
return v*60*1000
elif unit == 'h':
return v*60*60*1000
else:
return v
else:
raise TimeFormatError(t)
elif isinstance(t, (int, long)):
return t
else:
raise TimeFormatError(t)
|
[
"def",
"parse_time",
"(",
"t",
")",
":",
"if",
"isinstance",
"(",
"t",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"b",
"=",
"re_time",
".",
"match",
"(",
"t",
")",
"if",
"b",
":",
"v",
",",
"unit",
"=",
"int",
"(",
"b",
".",
"group",
"(",
"1",
")",
")",
",",
"b",
".",
"group",
"(",
"2",
")",
"if",
"unit",
"==",
"'s'",
":",
"return",
"v",
"*",
"1000",
"elif",
"unit",
"==",
"'m'",
":",
"return",
"v",
"*",
"60",
"*",
"1000",
"elif",
"unit",
"==",
"'h'",
":",
"return",
"v",
"*",
"60",
"*",
"60",
"*",
"1000",
"else",
":",
"return",
"v",
"else",
":",
"raise",
"TimeFormatError",
"(",
"t",
")",
"elif",
"isinstance",
"(",
"t",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"return",
"t",
"else",
":",
"raise",
"TimeFormatError",
"(",
"t",
")"
] |
Parse string time format to microsecond
|
[
"Parse",
"string",
"time",
"format",
"to",
"microsecond"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/date.py#L233-L254
|
train
|
limodou/uliweb
|
uliweb/contrib/session/middle_session.py
|
SessionMiddle.process_exception
|
def process_exception(self, request, e):
"""
Still process session data when specially Exception
"""
if isinstance(e, RedirectException):
response = e.get_response()
self.process_response(request, response)
|
python
|
def process_exception(self, request, e):
"""
Still process session data when specially Exception
"""
if isinstance(e, RedirectException):
response = e.get_response()
self.process_response(request, response)
|
[
"def",
"process_exception",
"(",
"self",
",",
"request",
",",
"e",
")",
":",
"if",
"isinstance",
"(",
"e",
",",
"RedirectException",
")",
":",
"response",
"=",
"e",
".",
"get_response",
"(",
")",
"self",
".",
"process_response",
"(",
"request",
",",
"response",
")"
] |
Still process session data when specially Exception
|
[
"Still",
"process",
"session",
"data",
"when",
"specially",
"Exception"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/session/middle_session.py#L68-L74
|
train
|
limodou/uliweb
|
uliweb/core/SimpleFrame.py
|
jsonp
|
def jsonp(data, **json_kwargs):
"""
jsonp is callback key name
"""
from uliweb import request
if 'jsonp' in json_kwargs:
cb = json_kwargs.pop('jsonp')
else:
cb = 'callback'
begin = str(request.GET.get(cb))
if not begin:
raise BadRequest("Can't found %s parameter in request's query_string" % cb)
if not r_callback.match(begin):
raise BadRequest("The callback name is not right, it can be alphabetic, number and underscore only")
if callable(data):
@wraps(data)
def f(*arg, **kwargs):
ret = data(*arg, **kwargs)
return Response(begin + '(' + json_dumps(ret) + ');', **json_kwargs)
return f
else:
return Response(begin + '(' + json_dumps(data) + ');', **json_kwargs)
|
python
|
def jsonp(data, **json_kwargs):
"""
jsonp is callback key name
"""
from uliweb import request
if 'jsonp' in json_kwargs:
cb = json_kwargs.pop('jsonp')
else:
cb = 'callback'
begin = str(request.GET.get(cb))
if not begin:
raise BadRequest("Can't found %s parameter in request's query_string" % cb)
if not r_callback.match(begin):
raise BadRequest("The callback name is not right, it can be alphabetic, number and underscore only")
if callable(data):
@wraps(data)
def f(*arg, **kwargs):
ret = data(*arg, **kwargs)
return Response(begin + '(' + json_dumps(ret) + ');', **json_kwargs)
return f
else:
return Response(begin + '(' + json_dumps(data) + ');', **json_kwargs)
|
[
"def",
"jsonp",
"(",
"data",
",",
"*",
"*",
"json_kwargs",
")",
":",
"from",
"uliweb",
"import",
"request",
"if",
"'jsonp'",
"in",
"json_kwargs",
":",
"cb",
"=",
"json_kwargs",
".",
"pop",
"(",
"'jsonp'",
")",
"else",
":",
"cb",
"=",
"'callback'",
"begin",
"=",
"str",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"cb",
")",
")",
"if",
"not",
"begin",
":",
"raise",
"BadRequest",
"(",
"\"Can't found %s parameter in request's query_string\"",
"%",
"cb",
")",
"if",
"not",
"r_callback",
".",
"match",
"(",
"begin",
")",
":",
"raise",
"BadRequest",
"(",
"\"The callback name is not right, it can be alphabetic, number and underscore only\"",
")",
"if",
"callable",
"(",
"data",
")",
":",
"@",
"wraps",
"(",
"data",
")",
"def",
"f",
"(",
"*",
"arg",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"data",
"(",
"*",
"arg",
",",
"*",
"*",
"kwargs",
")",
"return",
"Response",
"(",
"begin",
"+",
"'('",
"+",
"json_dumps",
"(",
"ret",
")",
"+",
"');'",
",",
"*",
"*",
"json_kwargs",
")",
"return",
"f",
"else",
":",
"return",
"Response",
"(",
"begin",
"+",
"'('",
"+",
"json_dumps",
"(",
"data",
")",
"+",
"');'",
",",
"*",
"*",
"json_kwargs",
")"
] |
jsonp is callback key name
|
[
"jsonp",
"is",
"callback",
"key",
"name"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L195-L219
|
train
|
limodou/uliweb
|
uliweb/core/SimpleFrame.py
|
get_url_adapter
|
def get_url_adapter(_domain_name):
"""
Fetch a domain url_adapter object, and bind it to according domain
"""
from werkzeug._compat import wsgi_decoding_dance
domain = application.domains.get(_domain_name, {})
server_name = None
if domain.get('domain', ''):
server_name = domain['domain']
try:
env = {}
environ = request.environ
env['url_scheme'] = environ['wsgi.url_scheme']
env['default_method'] = environ['REQUEST_METHOD']
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, "utf-8")
env['script_name'] = _get_wsgi_string('SCRIPT_NAME')
env['path_info'] = _get_wsgi_string('PATH_INFO')
env['query_args'] = _get_wsgi_string('QUERY_STRING')
except:
env = {}
adapter = url_map.bind(server_name, **env)
else:
try:
env = request.environ
except:
#this env if for testing only
env = {
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch',
'HTTP_ACCEPT_LANGUAGE': 'uk,en-US;q=0.8,en;q=0.6',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
# 'HTTP_HOST': 'localhost:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (X11; Linux i686)',
# 'PATH_INFO': '/',
# 'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'REQUEST_URI': '/',
'SCRIPT_NAME': '',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.errors': None,
'wsgi.file_wrapper': None,
# 'wsgi.input': BytesIO(ntob('', 'utf-8')),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
adapter = url_map.bind_to_environ(env)
return adapter
|
python
|
def get_url_adapter(_domain_name):
"""
Fetch a domain url_adapter object, and bind it to according domain
"""
from werkzeug._compat import wsgi_decoding_dance
domain = application.domains.get(_domain_name, {})
server_name = None
if domain.get('domain', ''):
server_name = domain['domain']
try:
env = {}
environ = request.environ
env['url_scheme'] = environ['wsgi.url_scheme']
env['default_method'] = environ['REQUEST_METHOD']
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, "utf-8")
env['script_name'] = _get_wsgi_string('SCRIPT_NAME')
env['path_info'] = _get_wsgi_string('PATH_INFO')
env['query_args'] = _get_wsgi_string('QUERY_STRING')
except:
env = {}
adapter = url_map.bind(server_name, **env)
else:
try:
env = request.environ
except:
#this env if for testing only
env = {
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch',
'HTTP_ACCEPT_LANGUAGE': 'uk,en-US;q=0.8,en;q=0.6',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
# 'HTTP_HOST': 'localhost:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (X11; Linux i686)',
# 'PATH_INFO': '/',
# 'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'REQUEST_URI': '/',
'SCRIPT_NAME': '',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.errors': None,
'wsgi.file_wrapper': None,
# 'wsgi.input': BytesIO(ntob('', 'utf-8')),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
adapter = url_map.bind_to_environ(env)
return adapter
|
[
"def",
"get_url_adapter",
"(",
"_domain_name",
")",
":",
"from",
"werkzeug",
".",
"_compat",
"import",
"wsgi_decoding_dance",
"domain",
"=",
"application",
".",
"domains",
".",
"get",
"(",
"_domain_name",
",",
"{",
"}",
")",
"server_name",
"=",
"None",
"if",
"domain",
".",
"get",
"(",
"'domain'",
",",
"''",
")",
":",
"server_name",
"=",
"domain",
"[",
"'domain'",
"]",
"try",
":",
"env",
"=",
"{",
"}",
"environ",
"=",
"request",
".",
"environ",
"env",
"[",
"'url_scheme'",
"]",
"=",
"environ",
"[",
"'wsgi.url_scheme'",
"]",
"env",
"[",
"'default_method'",
"]",
"=",
"environ",
"[",
"'REQUEST_METHOD'",
"]",
"def",
"_get_wsgi_string",
"(",
"name",
")",
":",
"val",
"=",
"environ",
".",
"get",
"(",
"name",
")",
"if",
"val",
"is",
"not",
"None",
":",
"return",
"wsgi_decoding_dance",
"(",
"val",
",",
"\"utf-8\"",
")",
"env",
"[",
"'script_name'",
"]",
"=",
"_get_wsgi_string",
"(",
"'SCRIPT_NAME'",
")",
"env",
"[",
"'path_info'",
"]",
"=",
"_get_wsgi_string",
"(",
"'PATH_INFO'",
")",
"env",
"[",
"'query_args'",
"]",
"=",
"_get_wsgi_string",
"(",
"'QUERY_STRING'",
")",
"except",
":",
"env",
"=",
"{",
"}",
"adapter",
"=",
"url_map",
".",
"bind",
"(",
"server_name",
",",
"*",
"*",
"env",
")",
"else",
":",
"try",
":",
"env",
"=",
"request",
".",
"environ",
"except",
":",
"#this env if for testing only",
"env",
"=",
"{",
"'HTTP_ACCEPT'",
":",
"'text/html,application/xhtml+xml,application/xml;'",
"'q=0.9,*/*;q=0.8'",
",",
"'HTTP_ACCEPT_CHARSET'",
":",
"'ISO-8859-1,utf-8;q=0.7,*;q=0.3'",
",",
"'HTTP_ACCEPT_ENCODING'",
":",
"'gzip,deflate,sdch'",
",",
"'HTTP_ACCEPT_LANGUAGE'",
":",
"'uk,en-US;q=0.8,en;q=0.6'",
",",
"'HTTP_CACHE_CONTROL'",
":",
"'max-age=0'",
",",
"'HTTP_CONNECTION'",
":",
"'keep-alive'",
",",
"# 'HTTP_HOST': 'localhost:8080',",
"'HTTP_USER_AGENT'",
":",
"'Mozilla/5.0 (X11; Linux i686)'",
",",
"# 'PATH_INFO': '/',",
"# 'QUERY_STRING': '',",
"'REMOTE_ADDR'",
":",
"'127.0.0.1'",
",",
"'REQUEST_METHOD'",
":",
"'GET'",
",",
"'REQUEST_URI'",
":",
"'/'",
",",
"'SCRIPT_NAME'",
":",
"''",
",",
"'SERVER_NAME'",
":",
"'localhost'",
",",
"'SERVER_PORT'",
":",
"'8080'",
",",
"'SERVER_PROTOCOL'",
":",
"'HTTP/1.1'",
",",
"'wsgi.errors'",
":",
"None",
",",
"'wsgi.file_wrapper'",
":",
"None",
",",
"# 'wsgi.input': BytesIO(ntob('', 'utf-8')),",
"'wsgi.multiprocess'",
":",
"False",
",",
"'wsgi.multithread'",
":",
"False",
",",
"'wsgi.run_once'",
":",
"False",
",",
"'wsgi.url_scheme'",
":",
"'http'",
",",
"'wsgi.version'",
":",
"(",
"1",
",",
"0",
")",
",",
"}",
"adapter",
"=",
"url_map",
".",
"bind_to_environ",
"(",
"env",
")",
"return",
"adapter"
] |
Fetch a domain url_adapter object, and bind it to according domain
|
[
"Fetch",
"a",
"domain",
"url_adapter",
"object",
"and",
"bind",
"it",
"to",
"according",
"domain"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L278-L339
|
train
|
limodou/uliweb
|
uliweb/core/SimpleFrame.py
|
get_app_dir
|
def get_app_dir(app):
"""
Get an app's directory
"""
path = __app_dirs__.get(app)
if path is not None:
return path
else:
p = app.split('.')
try:
path = pkg.resource_filename(p[0], '')
except ImportError as e:
log.error("Can't import app %s" % app)
log.exception(e)
path = ''
if len(p) > 1:
path = os.path.join(path, *p[1:])
__app_dirs__[app] = path
return path
|
python
|
def get_app_dir(app):
"""
Get an app's directory
"""
path = __app_dirs__.get(app)
if path is not None:
return path
else:
p = app.split('.')
try:
path = pkg.resource_filename(p[0], '')
except ImportError as e:
log.error("Can't import app %s" % app)
log.exception(e)
path = ''
if len(p) > 1:
path = os.path.join(path, *p[1:])
__app_dirs__[app] = path
return path
|
[
"def",
"get_app_dir",
"(",
"app",
")",
":",
"path",
"=",
"__app_dirs__",
".",
"get",
"(",
"app",
")",
"if",
"path",
"is",
"not",
"None",
":",
"return",
"path",
"else",
":",
"p",
"=",
"app",
".",
"split",
"(",
"'.'",
")",
"try",
":",
"path",
"=",
"pkg",
".",
"resource_filename",
"(",
"p",
"[",
"0",
"]",
",",
"''",
")",
"except",
"ImportError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Can't import app %s\"",
"%",
"app",
")",
"log",
".",
"exception",
"(",
"e",
")",
"path",
"=",
"''",
"if",
"len",
"(",
"p",
")",
">",
"1",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"*",
"p",
"[",
"1",
":",
"]",
")",
"__app_dirs__",
"[",
"app",
"]",
"=",
"path",
"return",
"path"
] |
Get an app's directory
|
[
"Get",
"an",
"app",
"s",
"directory"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L405-L424
|
train
|
limodou/uliweb
|
uliweb/core/SimpleFrame.py
|
Dispatcher.get_file
|
def get_file(self, filename, dir='static'):
"""
get_file will search from apps directory
"""
if os.path.exists(filename):
return filename
dirs = self.apps
if dir:
fname = os.path.join(dir, filename)
else:
fname = filename
for d in reversed(dirs):
path = pkg.resource_filename(d, fname)
if os.path.exists(path):
return path
return None
|
python
|
def get_file(self, filename, dir='static'):
"""
get_file will search from apps directory
"""
if os.path.exists(filename):
return filename
dirs = self.apps
if dir:
fname = os.path.join(dir, filename)
else:
fname = filename
for d in reversed(dirs):
path = pkg.resource_filename(d, fname)
if os.path.exists(path):
return path
return None
|
[
"def",
"get_file",
"(",
"self",
",",
"filename",
",",
"dir",
"=",
"'static'",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"filename",
"dirs",
"=",
"self",
".",
"apps",
"if",
"dir",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"filename",
")",
"else",
":",
"fname",
"=",
"filename",
"for",
"d",
"in",
"reversed",
"(",
"dirs",
")",
":",
"path",
"=",
"pkg",
".",
"resource_filename",
"(",
"d",
",",
"fname",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"path",
"return",
"None"
] |
get_file will search from apps directory
|
[
"get_file",
"will",
"search",
"from",
"apps",
"directory"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L842-L857
|
train
|
limodou/uliweb
|
uliweb/core/SimpleFrame.py
|
Dispatcher.get_template_dirs
|
def get_template_dirs(self):
"""
Get templates directory from apps, but in reversed order, so the same named template
file will be overrided by latter defined app
"""
def if_not_empty(dir):
if not os.path.exists(dir):
return
for root, dirs, files in os.walk(dir):
if dirs:
return True
for f in files:
if f != 'readme.txt':
return True
template_dirs = [os.path.join(self.project_dir, x) for x in settings.GLOBAL.TEMPLATE_DIRS or []]
taglibs_dirs = []
for p in reversed(self.apps):
app_path = get_app_dir(p)
path = os.path.join(app_path, 'templates')
if if_not_empty(path):
template_dirs.append(path)
path = os.path.join(app_path, 'taglibs')
if if_not_empty(path):
taglibs_dirs.append(path)
Dispatcher.template_dirs = template_dirs
Dispatcher.taglibs_dirs = taglibs_dirs
|
python
|
def get_template_dirs(self):
"""
Get templates directory from apps, but in reversed order, so the same named template
file will be overrided by latter defined app
"""
def if_not_empty(dir):
if not os.path.exists(dir):
return
for root, dirs, files in os.walk(dir):
if dirs:
return True
for f in files:
if f != 'readme.txt':
return True
template_dirs = [os.path.join(self.project_dir, x) for x in settings.GLOBAL.TEMPLATE_DIRS or []]
taglibs_dirs = []
for p in reversed(self.apps):
app_path = get_app_dir(p)
path = os.path.join(app_path, 'templates')
if if_not_empty(path):
template_dirs.append(path)
path = os.path.join(app_path, 'taglibs')
if if_not_empty(path):
taglibs_dirs.append(path)
Dispatcher.template_dirs = template_dirs
Dispatcher.taglibs_dirs = taglibs_dirs
|
[
"def",
"get_template_dirs",
"(",
"self",
")",
":",
"def",
"if_not_empty",
"(",
"dir",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir",
")",
":",
"return",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"dir",
")",
":",
"if",
"dirs",
":",
"return",
"True",
"for",
"f",
"in",
"files",
":",
"if",
"f",
"!=",
"'readme.txt'",
":",
"return",
"True",
"template_dirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"project_dir",
",",
"x",
")",
"for",
"x",
"in",
"settings",
".",
"GLOBAL",
".",
"TEMPLATE_DIRS",
"or",
"[",
"]",
"]",
"taglibs_dirs",
"=",
"[",
"]",
"for",
"p",
"in",
"reversed",
"(",
"self",
".",
"apps",
")",
":",
"app_path",
"=",
"get_app_dir",
"(",
"p",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_path",
",",
"'templates'",
")",
"if",
"if_not_empty",
"(",
"path",
")",
":",
"template_dirs",
".",
"append",
"(",
"path",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_path",
",",
"'taglibs'",
")",
"if",
"if_not_empty",
"(",
"path",
")",
":",
"taglibs_dirs",
".",
"append",
"(",
"path",
")",
"Dispatcher",
".",
"template_dirs",
"=",
"template_dirs",
"Dispatcher",
".",
"taglibs_dirs",
"=",
"taglibs_dirs"
] |
Get templates directory from apps, but in reversed order, so the same named template
file will be overrided by latter defined app
|
[
"Get",
"templates",
"directory",
"from",
"apps",
"but",
"in",
"reversed",
"order",
"so",
"the",
"same",
"named",
"template",
"file",
"will",
"be",
"overrided",
"by",
"latter",
"defined",
"app"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L1406-L1434
|
train
|
limodou/uliweb
|
uliweb/contrib/redis_cli/__init__.py
|
get_lock
|
def get_lock(key, value=None, expiry_time=60):
"""Get a distribute lock"""
from uliweb.utils.common import get_uuid
redis = get_redis()
value = value or get_uuid()
return redis.set(key, value, ex=expiry_time, nx=True)
|
python
|
def get_lock(key, value=None, expiry_time=60):
"""Get a distribute lock"""
from uliweb.utils.common import get_uuid
redis = get_redis()
value = value or get_uuid()
return redis.set(key, value, ex=expiry_time, nx=True)
|
[
"def",
"get_lock",
"(",
"key",
",",
"value",
"=",
"None",
",",
"expiry_time",
"=",
"60",
")",
":",
"from",
"uliweb",
".",
"utils",
".",
"common",
"import",
"get_uuid",
"redis",
"=",
"get_redis",
"(",
")",
"value",
"=",
"value",
"or",
"get_uuid",
"(",
")",
"return",
"redis",
".",
"set",
"(",
"key",
",",
"value",
",",
"ex",
"=",
"expiry_time",
",",
"nx",
"=",
"True",
")"
] |
Get a distribute lock
|
[
"Get",
"a",
"distribute",
"lock"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/redis_cli/__init__.py#L40-L46
|
train
|
limodou/uliweb
|
uliweb/contrib/redis_cli/__init__.py
|
set_lock
|
def set_lock(key, value=None, expiry_time=60):
"""Force to set a distribute lock"""
from uliweb.utils.common import get_uuid
redis = get_redis()
value = value or get_uuid()
return redis.set(key, value, ex=expiry_time, xx=True)
|
python
|
def set_lock(key, value=None, expiry_time=60):
"""Force to set a distribute lock"""
from uliweb.utils.common import get_uuid
redis = get_redis()
value = value or get_uuid()
return redis.set(key, value, ex=expiry_time, xx=True)
|
[
"def",
"set_lock",
"(",
"key",
",",
"value",
"=",
"None",
",",
"expiry_time",
"=",
"60",
")",
":",
"from",
"uliweb",
".",
"utils",
".",
"common",
"import",
"get_uuid",
"redis",
"=",
"get_redis",
"(",
")",
"value",
"=",
"value",
"or",
"get_uuid",
"(",
")",
"return",
"redis",
".",
"set",
"(",
"key",
",",
"value",
",",
"ex",
"=",
"expiry_time",
",",
"xx",
"=",
"True",
")"
] |
Force to set a distribute lock
|
[
"Force",
"to",
"set",
"a",
"distribute",
"lock"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/redis_cli/__init__.py#L48-L54
|
train
|
limodou/uliweb
|
uliweb/contrib/redis_cli/__init__.py
|
after_init_apps
|
def after_init_apps(sender):
"""
Check redis version
"""
from uliweb import settings
from uliweb.utils.common import log
check = settings.get_var('REDIS/check_version')
if check:
client = get_redis()
try:
info = client.info()
except Exception as e:
log.exception(e)
log.error('Redis is not started!')
return
redis_version = info['redis_version']
version = tuple(map(int, redis_version.split('.')))
op = re_compare_op.search(check)
if op:
_op = op.group()
_v = check[op.end()+1:].strip()
else:
_op = '='
_v = check
nv = tuple(map(int, _v.split('.')))
if _op == '=':
flag = version[:len(nv)] == nv
elif _op == '>=':
flag = version >= nv
elif _op == '>':
flag = version > nv
elif _op == '<=':
flag = version <= nv
elif _op == '<':
flag = version < nv
else:
log.error("Can't support operator %s when check redis version" % _op)
if not flag:
log.error("Redis version %s is not matched what you want %s" % (redis_version, _v))
|
python
|
def after_init_apps(sender):
"""
Check redis version
"""
from uliweb import settings
from uliweb.utils.common import log
check = settings.get_var('REDIS/check_version')
if check:
client = get_redis()
try:
info = client.info()
except Exception as e:
log.exception(e)
log.error('Redis is not started!')
return
redis_version = info['redis_version']
version = tuple(map(int, redis_version.split('.')))
op = re_compare_op.search(check)
if op:
_op = op.group()
_v = check[op.end()+1:].strip()
else:
_op = '='
_v = check
nv = tuple(map(int, _v.split('.')))
if _op == '=':
flag = version[:len(nv)] == nv
elif _op == '>=':
flag = version >= nv
elif _op == '>':
flag = version > nv
elif _op == '<=':
flag = version <= nv
elif _op == '<':
flag = version < nv
else:
log.error("Can't support operator %s when check redis version" % _op)
if not flag:
log.error("Redis version %s is not matched what you want %s" % (redis_version, _v))
|
[
"def",
"after_init_apps",
"(",
"sender",
")",
":",
"from",
"uliweb",
"import",
"settings",
"from",
"uliweb",
".",
"utils",
".",
"common",
"import",
"log",
"check",
"=",
"settings",
".",
"get_var",
"(",
"'REDIS/check_version'",
")",
"if",
"check",
":",
"client",
"=",
"get_redis",
"(",
")",
"try",
":",
"info",
"=",
"client",
".",
"info",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"exception",
"(",
"e",
")",
"log",
".",
"error",
"(",
"'Redis is not started!'",
")",
"return",
"redis_version",
"=",
"info",
"[",
"'redis_version'",
"]",
"version",
"=",
"tuple",
"(",
"map",
"(",
"int",
",",
"redis_version",
".",
"split",
"(",
"'.'",
")",
")",
")",
"op",
"=",
"re_compare_op",
".",
"search",
"(",
"check",
")",
"if",
"op",
":",
"_op",
"=",
"op",
".",
"group",
"(",
")",
"_v",
"=",
"check",
"[",
"op",
".",
"end",
"(",
")",
"+",
"1",
":",
"]",
".",
"strip",
"(",
")",
"else",
":",
"_op",
"=",
"'='",
"_v",
"=",
"check",
"nv",
"=",
"tuple",
"(",
"map",
"(",
"int",
",",
"_v",
".",
"split",
"(",
"'.'",
")",
")",
")",
"if",
"_op",
"==",
"'='",
":",
"flag",
"=",
"version",
"[",
":",
"len",
"(",
"nv",
")",
"]",
"==",
"nv",
"elif",
"_op",
"==",
"'>='",
":",
"flag",
"=",
"version",
">=",
"nv",
"elif",
"_op",
"==",
"'>'",
":",
"flag",
"=",
"version",
">",
"nv",
"elif",
"_op",
"==",
"'<='",
":",
"flag",
"=",
"version",
"<=",
"nv",
"elif",
"_op",
"==",
"'<'",
":",
"flag",
"=",
"version",
"<",
"nv",
"else",
":",
"log",
".",
"error",
"(",
"\"Can't support operator %s when check redis version\"",
"%",
"_op",
")",
"if",
"not",
"flag",
":",
"log",
".",
"error",
"(",
"\"Redis version %s is not matched what you want %s\"",
"%",
"(",
"redis_version",
",",
"_v",
")",
")"
] |
Check redis version
|
[
"Check",
"redis",
"version"
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/redis_cli/__init__.py#L57-L97
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/contrib/atom.py
|
_make_text_block
|
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
|
python
|
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
|
[
"def",
"_make_text_block",
"(",
"name",
",",
"content",
",",
"content_type",
"=",
"None",
")",
":",
"if",
"content_type",
"==",
"'xhtml'",
":",
"return",
"u'<%s type=\"xhtml\"><div xmlns=\"%s\">%s</div></%s>\\n'",
"%",
"(",
"name",
",",
"XHTML_NAMESPACE",
",",
"content",
",",
"name",
")",
"if",
"not",
"content_type",
":",
"return",
"u'<%s>%s</%s>\\n'",
"%",
"(",
"name",
",",
"escape",
"(",
"content",
")",
",",
"name",
")",
"return",
"u'<%s type=\"%s\">%s</%s>\\n'",
"%",
"(",
"name",
",",
"content_type",
",",
"escape",
"(",
"content",
")",
",",
"name",
")"
] |
Helper function for the builder that creates an XML text block.
|
[
"Helper",
"function",
"for",
"the",
"builder",
"that",
"creates",
"an",
"XML",
"text",
"block",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/atom.py#L34-L42
|
train
|
limodou/uliweb
|
uliweb/utils/xltools.py
|
SimpleWriter._style_range
|
def _style_range(self, cell, cell_range, border=None, fill=None, font=None, alignment=None):
"""
Apply styles to a range of cells as if they were a single cell.
:param ws: Excel worksheet instance
:param range: An excel range to style (e.g. A1:F20)
:param border: An openpyxl Border
:param fill: An openpyxl PatternFill or GradientFill
:param font: An openpyxl Font object
"""
from openpyxl.styles import Border, Side
top = left = right = bottom = Side(border_style='thin', color=self.border_color)
def border_add(border, top=None, right=None, left=None, bottom=None):
top = top or border.top
left = left or border.left
right = right or border.right
bottom = bottom or border.bottom
return Border(top=top, left=left, right=right, bottom=bottom)
cell.alignment = alignment
cell.fill = fill
rows = list(self.sheet[cell_range])
for cell in rows[0]:
cell.border = border_add(cell.border, top=top)
for cell in rows[-1]:
cell.border = border_add(cell.border, bottom=bottom)
for row in rows:
l = row[0]
r = row[-1]
l.border = border_add(l.border, left=left)
r.border = border_add(r.border, right=right)
|
python
|
def _style_range(self, cell, cell_range, border=None, fill=None, font=None, alignment=None):
"""
Apply styles to a range of cells as if they were a single cell.
:param ws: Excel worksheet instance
:param range: An excel range to style (e.g. A1:F20)
:param border: An openpyxl Border
:param fill: An openpyxl PatternFill or GradientFill
:param font: An openpyxl Font object
"""
from openpyxl.styles import Border, Side
top = left = right = bottom = Side(border_style='thin', color=self.border_color)
def border_add(border, top=None, right=None, left=None, bottom=None):
top = top or border.top
left = left or border.left
right = right or border.right
bottom = bottom or border.bottom
return Border(top=top, left=left, right=right, bottom=bottom)
cell.alignment = alignment
cell.fill = fill
rows = list(self.sheet[cell_range])
for cell in rows[0]:
cell.border = border_add(cell.border, top=top)
for cell in rows[-1]:
cell.border = border_add(cell.border, bottom=bottom)
for row in rows:
l = row[0]
r = row[-1]
l.border = border_add(l.border, left=left)
r.border = border_add(r.border, right=right)
|
[
"def",
"_style_range",
"(",
"self",
",",
"cell",
",",
"cell_range",
",",
"border",
"=",
"None",
",",
"fill",
"=",
"None",
",",
"font",
"=",
"None",
",",
"alignment",
"=",
"None",
")",
":",
"from",
"openpyxl",
".",
"styles",
"import",
"Border",
",",
"Side",
"top",
"=",
"left",
"=",
"right",
"=",
"bottom",
"=",
"Side",
"(",
"border_style",
"=",
"'thin'",
",",
"color",
"=",
"self",
".",
"border_color",
")",
"def",
"border_add",
"(",
"border",
",",
"top",
"=",
"None",
",",
"right",
"=",
"None",
",",
"left",
"=",
"None",
",",
"bottom",
"=",
"None",
")",
":",
"top",
"=",
"top",
"or",
"border",
".",
"top",
"left",
"=",
"left",
"or",
"border",
".",
"left",
"right",
"=",
"right",
"or",
"border",
".",
"right",
"bottom",
"=",
"bottom",
"or",
"border",
".",
"bottom",
"return",
"Border",
"(",
"top",
"=",
"top",
",",
"left",
"=",
"left",
",",
"right",
"=",
"right",
",",
"bottom",
"=",
"bottom",
")",
"cell",
".",
"alignment",
"=",
"alignment",
"cell",
".",
"fill",
"=",
"fill",
"rows",
"=",
"list",
"(",
"self",
".",
"sheet",
"[",
"cell_range",
"]",
")",
"for",
"cell",
"in",
"rows",
"[",
"0",
"]",
":",
"cell",
".",
"border",
"=",
"border_add",
"(",
"cell",
".",
"border",
",",
"top",
"=",
"top",
")",
"for",
"cell",
"in",
"rows",
"[",
"-",
"1",
"]",
":",
"cell",
".",
"border",
"=",
"border_add",
"(",
"cell",
".",
"border",
",",
"bottom",
"=",
"bottom",
")",
"for",
"row",
"in",
"rows",
":",
"l",
"=",
"row",
"[",
"0",
"]",
"r",
"=",
"row",
"[",
"-",
"1",
"]",
"l",
".",
"border",
"=",
"border_add",
"(",
"l",
".",
"border",
",",
"left",
"=",
"left",
")",
"r",
".",
"border",
"=",
"border_add",
"(",
"r",
".",
"border",
",",
"right",
"=",
"right",
")"
] |
Apply styles to a range of cells as if they were a single cell.
:param ws: Excel worksheet instance
:param range: An excel range to style (e.g. A1:F20)
:param border: An openpyxl Border
:param fill: An openpyxl PatternFill or GradientFill
:param font: An openpyxl Font object
|
[
"Apply",
"styles",
"to",
"a",
"range",
"of",
"cells",
"as",
"if",
"they",
"were",
"a",
"single",
"cell",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/xltools.py#L326-L362
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/urls.py
|
url_unquote
|
def url_unquote(string, charset='utf-8', errors='replace', unsafe=''):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
|
python
|
def url_unquote(string, charset='utf-8', errors='replace', unsafe=''):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
|
[
"def",
"url_unquote",
"(",
"string",
",",
"charset",
"=",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
",",
"unsafe",
"=",
"''",
")",
":",
"rv",
"=",
"_unquote_to_bytes",
"(",
"string",
",",
"unsafe",
")",
"if",
"charset",
"is",
"not",
"None",
":",
"rv",
"=",
"rv",
".",
"decode",
"(",
"charset",
",",
"errors",
")",
"return",
"rv"
] |
URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
|
[
"URL",
"decode",
"a",
"single",
"string",
"with",
"a",
"given",
"encoding",
".",
"If",
"the",
"charset",
"is",
"set",
"to",
"None",
"no",
"unicode",
"decoding",
"is",
"performed",
"and",
"raw",
"bytes",
"are",
"returned",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/urls.py#L439-L452
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/urls.py
|
_URLMixin.decode_netloc
|
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or '')
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
_url_unquote_legacy(self.raw_username or '', '/:%@'),
_url_unquote_legacy(self.raw_password or '', '/:%@'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return rv
|
python
|
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or '')
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
_url_unquote_legacy(self.raw_username or '', '/:%@'),
_url_unquote_legacy(self.raw_password or '', '/:%@'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return rv
|
[
"def",
"decode_netloc",
"(",
"self",
")",
":",
"rv",
"=",
"_decode_idna",
"(",
"self",
".",
"host",
"or",
"''",
")",
"if",
"':'",
"in",
"rv",
":",
"rv",
"=",
"'[%s]'",
"%",
"rv",
"port",
"=",
"self",
".",
"port",
"if",
"port",
"is",
"not",
"None",
":",
"rv",
"=",
"'%s:%d'",
"%",
"(",
"rv",
",",
"port",
")",
"auth",
"=",
"':'",
".",
"join",
"(",
"filter",
"(",
"None",
",",
"[",
"_url_unquote_legacy",
"(",
"self",
".",
"raw_username",
"or",
"''",
",",
"'/:%@'",
")",
",",
"_url_unquote_legacy",
"(",
"self",
".",
"raw_password",
"or",
"''",
",",
"'/:%@'",
")",
",",
"]",
")",
")",
"if",
"auth",
":",
"rv",
"=",
"'%s@%s'",
"%",
"(",
"auth",
",",
"rv",
")",
"return",
"rv"
] |
Decodes the netloc part into a string.
|
[
"Decodes",
"the",
"netloc",
"part",
"into",
"a",
"string",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/urls.py#L139-L154
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/urls.py
|
BytesURL.decode
|
def decode(self, charset='utf-8', errors='replace'):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode('ascii'),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors)
)
|
python
|
def decode(self, charset='utf-8', errors='replace'):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode('ascii'),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors)
)
|
[
"def",
"decode",
"(",
"self",
",",
"charset",
"=",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
")",
":",
"return",
"URL",
"(",
"self",
".",
"scheme",
".",
"decode",
"(",
"'ascii'",
")",
",",
"self",
".",
"decode_netloc",
"(",
")",
",",
"self",
".",
"path",
".",
"decode",
"(",
"charset",
",",
"errors",
")",
",",
"self",
".",
"query",
".",
"decode",
"(",
"charset",
",",
"errors",
")",
",",
"self",
".",
"fragment",
".",
"decode",
"(",
"charset",
",",
"errors",
")",
")"
] |
Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
|
[
"Decodes",
"the",
"URL",
"to",
"a",
"tuple",
"made",
"out",
"of",
"strings",
".",
"The",
"charset",
"is",
"only",
"being",
"used",
"for",
"the",
"path",
"query",
"and",
"fragment",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/urls.py#L270-L280
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/contrib/iterio.py
|
_mixed_join
|
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b''.join(iterator)
return first_item + u''.join(iterator)
|
python
|
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b''.join(iterator)
return first_item + u''.join(iterator)
|
[
"def",
"_mixed_join",
"(",
"iterable",
",",
"sentinel",
")",
":",
"iterator",
"=",
"iter",
"(",
"iterable",
")",
"first_item",
"=",
"next",
"(",
"iterator",
",",
"sentinel",
")",
"if",
"isinstance",
"(",
"first_item",
",",
"bytes",
")",
":",
"return",
"first_item",
"+",
"b''",
".",
"join",
"(",
"iterator",
")",
"return",
"first_item",
"+",
"u''",
".",
"join",
"(",
"iterator",
")"
] |
concatenate any string type in an intelligent way.
|
[
"concatenate",
"any",
"string",
"type",
"in",
"an",
"intelligent",
"way",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/iterio.py#L50-L56
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/contrib/iterio.py
|
IterO._buf_append
|
def _buf_append(self, string):
'''Replace string directly without appending to an empty string,
avoiding type issues.'''
if not self._buf:
self._buf = string
else:
self._buf += string
|
python
|
def _buf_append(self, string):
'''Replace string directly without appending to an empty string,
avoiding type issues.'''
if not self._buf:
self._buf = string
else:
self._buf += string
|
[
"def",
"_buf_append",
"(",
"self",
",",
"string",
")",
":",
"if",
"not",
"self",
".",
"_buf",
":",
"self",
".",
"_buf",
"=",
"string",
"else",
":",
"self",
".",
"_buf",
"+=",
"string"
] |
Replace string directly without appending to an empty string,
avoiding type issues.
|
[
"Replace",
"string",
"directly",
"without",
"appending",
"to",
"an",
"empty",
"string",
"avoiding",
"type",
"issues",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/iterio.py#L231-L237
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/http.py
|
quote_etag
|
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
|
python
|
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
|
[
"def",
"quote_etag",
"(",
"etag",
",",
"weak",
"=",
"False",
")",
":",
"if",
"'\"'",
"in",
"etag",
":",
"raise",
"ValueError",
"(",
"'invalid etag'",
")",
"etag",
"=",
"'\"%s\"'",
"%",
"etag",
"if",
"weak",
":",
"etag",
"=",
"'w/'",
"+",
"etag",
"return",
"etag"
] |
Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
|
[
"Quote",
"an",
"etag",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/http.py#L582-L593
|
train
|
limodou/uliweb
|
uliweb/lib/werkzeug/http.py
|
parse_etags
|
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
|
python
|
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
|
[
"def",
"parse_etags",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"ETags",
"(",
")",
"strong",
"=",
"[",
"]",
"weak",
"=",
"[",
"]",
"end",
"=",
"len",
"(",
"value",
")",
"pos",
"=",
"0",
"while",
"pos",
"<",
"end",
":",
"match",
"=",
"_etag_re",
".",
"match",
"(",
"value",
",",
"pos",
")",
"if",
"match",
"is",
"None",
":",
"break",
"is_weak",
",",
"quoted",
",",
"raw",
"=",
"match",
".",
"groups",
"(",
")",
"if",
"raw",
"==",
"'*'",
":",
"return",
"ETags",
"(",
"star_tag",
"=",
"True",
")",
"elif",
"quoted",
":",
"raw",
"=",
"quoted",
"if",
"is_weak",
":",
"weak",
".",
"append",
"(",
"raw",
")",
"else",
":",
"strong",
".",
"append",
"(",
"raw",
")",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"ETags",
"(",
"strong",
",",
"weak",
")"
] |
Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
|
[
"Parse",
"an",
"etag",
"header",
"."
] |
34472f25e4bc0b954a35346672f94e84ef18b076
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/http.py#L619-L645
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.