docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.
Args:
sparse_tensor_proto: A proto representing a `tf.SparseTensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
Returns:
An instance of `tf.SparseTensor`. | def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs):
if not sparse_tensor_proto.HasField("named_tuple"):
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: expected proto tuple.")
if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME:
raise base_e... | 101,356 |
Serializes `nested_value` into `nested_proto`.
Args:
nested_value: A nested Python value.
nested_proto: A `module_pb2.NestedData` instance to be filled from the value
in `nested_value`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
already_processed: ... | def _nested_to_proto(nested_value, nested_proto, process_leafs,
already_processed):
if not isinstance(nested_proto, module_pb2.NestedData):
raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.")
# If this object was already processed, mark as "unserializable"
# to avoid ... | 101,357 |
Serializes `module_into`.
Args:
module_info: An instance of `ModuleInfo`.
export_scope: Optional `string`. Name scope to remove.
Returns:
An instance of `module_pb2.SonnetModule`. | def _module_info_to_proto(module_info, export_scope=None):
def strip_name_scope(name_scope):
return ops.strip_name_scope(name_scope, export_scope)
def process_leafs(value):
return strip_name_scope(_graph_element_to_path(value))
module_info_def = module_pb2.SonnetModule(
module_name=module_info.mo... | 101,358 |
Deserializes `nested_proto`.
Args:
nested_proto: An instance of `module_pb2.NestedData`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
Returns:
An instance of `string`, `tuple`, `dict` or `namedtuple`.
Raises:
base_errors.ModuleInfoError: If the probo... | def _nested_from_proto(nested_proto, process_leafs):
if not isinstance(nested_proto, module_pb2.NestedData):
raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.")
if nested_proto.HasField("value"):
value = nested_proto.value
if not value:
value = _UnserializableObject()
else... | 101,359 |
Deserializes `module_info_def` proto.
Args:
module_info_def: An instance of `module_pb2.SonnetModule`.
import_scope: Optional `string`. Name scope to use.
Returns:
An instance of `ModuleInfo`.
Raises:
base_errors.ModuleInfoError: If the probobuf is of the wrong type or
if some of its fiel... | def _module_info_from_proto(module_info_def, import_scope=None):
graph = tf.get_default_graph()
def prepend_name_scope(name_scope):
return ops.prepend_name_scope(name_scope, import_scope)
def process_leafs(name):
return _path_to_graph_element(prepend_name_scope(name), graph)
connected_subgraphs = []
... | 101,360 |
Deserializes the `module_info_def` proto without raising exceptions.
Args:
module_info_def: An instance of `module_pb2.SonnetModule`.
import_scope: Optional `string`. Name scope to use.
Returns:
An instance of `ModuleInfo`. | def _module_info_from_proto_safe(module_info_def, import_scope=None):
try:
return _module_info_from_proto(module_info_def, import_scope)
except Exception as e: # pylint: disable=broad-except
logging.warning(
"Error encountered when deserializing sonnet ModuleInfo:\n%s", str(e))
return None | 101,361 |
Builds the deep LSTM model sub-graph.
Args:
one_hot_input_sequence: A Tensor with the input sequence encoded as a
one-hot representation. Its dimensions should be `[truncation_length,
batch_size, output_size]`.
Returns:
Tuple of the Tensor of output logits for the batch, with dimen... | def _build(self, one_hot_input_sequence):
input_shape = one_hot_input_sequence.get_shape()
batch_size = input_shape[1]
batch_embed_module = snt.BatchApply(self._embed_module)
input_sequence = batch_embed_module(one_hot_input_sequence)
input_sequence = tf.nn.relu(input_sequence)
initial_s... | 101,367 |
Builds sub-graph to generate a string, sampled from the model.
Args:
initial_logits: Starting logits to sample from.
initial_state: Starting state for the RNN core.
sequence_length: Number of characters to sample.
Returns:
A Tensor of characters, with dimensions `[sequence_length, batc... | def generate_string(self, initial_logits, initial_state, sequence_length):
current_logits = initial_logits
current_state = initial_state
generated_letters = []
for _ in range(sequence_length):
# Sample a character index from distribution.
char_index = tf.squeeze(tf.multinomial(current... | 101,368 |
Connects the core to the graph.
Args:
x: Input `Tensor` of shape `(batch_size, input_size)`.
prev_state: Previous state. This could be a `Tensor`, or a tuple of
`Tensor`s.
Returns:
The tuple `(output, state)` for this core.
Raises:
ValueError: if the `Tensor` `x` does no... | def _build(self, x, prev_state):
x.get_shape().with_rank(2)
self._batch_size = x.get_shape().as_list()[0]
self._dtype = x.dtype
x_zeros = tf.concat(
[x, tf.zeros(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
x_ones = tf.concat(
[x, tf.ones(
shape... | 101,380 |
Calculate a reasonable embedding size for a vocabulary.
Rule of thumb is 6 * 4th root of vocab_size.
Args:
vocab_size: Size of the input vocabulary.
Returns:
The embedding size to use.
Raises:
ValueError: if `vocab_size` is invalid. | def _embedding_dim(vocab_size):
if not vocab_size or (vocab_size <= 0):
raise ValueError("Invalid vocab_size %g." % vocab_size)
return int(round(6.0 * math.sqrt(math.sqrt(vocab_size)))) | 101,382 |
Lookup embeddings.
Looks up an embedding vector for each value in `ids`. All ids must be within
[0, vocab_size), else an `InvalidArgumentError` is raised at runtime.
Args:
ids: Tensor of dtype int64.
Returns:
Tensor of tf.shape(ids) + [embedding_dim] and dtype float32. | def _build(self, ids):
# Construct embeddings.
if self._existing_vocab is None:
if self.EMBEDDINGS not in self._initializers:
self._initializers[self.EMBEDDINGS] = tf.initializers.random_normal()
self._embeddings = tf.get_variable(
"embeddings",
shape=[self._vocab_si... | 101,384 |
Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
... | def _build(self, inputs):
input_shape = tf.shape(inputs)
input_dtype = inputs.dtype.as_numpy_dtype
batch_size = tf.expand_dims(input_shape[0], 0)
number_of_params = inputs.get_shape()[1]
if number_of_params != self._constraints.num_free_params:
raise base.Error('Input size is not consiste... | 101,389 |
Find files matching the given names relative to the given path.
Args:
path (str): The file path to start searching up from.
names (List[str]): The file/directory names to look for.
root (str): The directory at which to stop recursing upwards.
Note:
The path MUST be within the r... | def find_parents(root, path, names):
if not root:
return []
if not os.path.commonprefix((root, path)):
log.warning("Path %s not in %s", path, root)
return []
# Split the relative by directory, generate all the parent directories, then check each of them.
# This avoids runn... | 102,577 |
Create a new multiprocess.Manager (or return existing one).
Args:
:authkey: string authorization key
:queues: *INTERNAL_USE*
:mode: 'local' indicates that the manager will only be accessible from the same host, otherwise remotely accessible.
Returns:
A TFManager instance, which is also cached in l... | def start(authkey, queues, mode='local'):
global mgr, qdict, kdict
qdict.clear()
kdict.clear()
for q in queues:
qdict[q] = JoinableQueue()
TFManager.register('get_queue', callable=lambda qname: _get_queue(qname))
TFManager.register('get', callable=lambda key: _get(key))
TFManager.register('set', c... | 103,040 |
Connect to a multiprocess.Manager.
Args:
:address: unique address to the TFManager, either a unique connection string for 'local', or a (host, port) tuple for remote.
:authkey: string authorization key
Returns:
A TFManager instance referencing the remote TFManager at the supplied address. | def connect(address, authkey):
TFManager.register('get_queue')
TFManager.register('get')
TFManager.register('set')
m = TFManager(address, authkey=authkey)
m.connect()
return m | 103,041 |
Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing | def _activation_summary(x):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(ten... | 103,048 |
Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor | def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var | 103,049 |
Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: ad... | def _variable_with_weight_decay(name, shape, stddev, wd):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weig... | 103,050 |
Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits. | def inference(images):
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variab... | 103,052 |
Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float. | def loss(logits, labels):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, na... | 103,053 |
Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses. | def _add_loss_summaries(total_loss):
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to ... | 103,054 |
Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training. | def train(total_loss, global_step):
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponen... | 103,055 |
Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection. | def add_variable(var, restore=True):
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if var not in tf.get_collection(collection):
tf.add_to_collection(collection, var) | 103,057 |
Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix. | def get_variables(scope=None, suffix=None):
candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]
if suffix is not None:
candidates = [var for var in candidates if var.op.name.endswith(suffix)]
return candidates | 103,058 |
Gets the variable uniquely identified by that name.
Args:
name: a name that uniquely identifies the variable.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists. | def get_unique_variable(name):
candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
if not candidates:
raise ValueError('Couldnt find variable %s' % name)
for candidate in candidates:
if candidate.op.name == name:
return candidate
raise ValueError('Variable %s does not uniquely ... | 103,059 |
Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable. | def global_step(device=''):
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variabl... | 103,061 |
Initialize VariableDeviceChooser.
Args:
num_parameter_servers: number of parameter servers.
ps_device: string representing the parameter server device.
placement: string representing the placement of the variable either CPU:0
or GPU:0. When using parameter servers forced to CPU:0. | def __init__(self,
num_parameter_servers=0,
ps_device='/job:ps',
placement='CPU:0'):
self._num_ps = num_parameter_servers
self._ps_device = ps_device
self._placement = placement if num_parameter_servers == 0 else 'CPU:0'
self._next_task_id = 0 | 103,063 |
Runs Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_1_op: Top 1 op.
top_5_op: Top 5 op.
summary_op: Summary op. | def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
print("ckpt.model_checkpoint_path: {0}".format(ckpt.model_checkpoint_path))
saver.restore(sess, ck... | 103,067 |
mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.
Args:
:iterator: input RDD partition iterator.
:args: arguments for TFModel, in argparse format
:tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.
... | def _run_model(iterator, args, tf_args):
single_node_env(tf_args)
logging.info("===== input_mapping: {}".format(args.input_mapping))
logging.info("===== output_mapping: {}".format(args.output_mapping))
input_tensor_names = [tensor for col, tensor in sorted(args.input_mapping.items())]
output_tensor_names ... | 103,069 |
Sets up environment for a single-node TF session.
Args:
:args: command line arguments as either argparse args or argv list | def single_node_env(args):
# setup ARGV for the TF process
if isinstance(args, list):
sys.argv = args
elif args.argv:
sys.argv = args.argv
# setup ENV for Hadoop-compatibility and/or GPU allocation
num_gpus = args.num_gpus if 'num_gpus' in args else 1
util.single_node_env(num_gpus) | 103,070 |
Utility function to read a meta_graph_def from disk.
From `saved_model_cli.py <https://github.com/tensorflow/tensorflow/blob/8e0e8d41a3a8f2d4a6100c2ea1dc9d6c6c4ad382/tensorflow/python/tools/saved_model_cli.py#L186>`_
Args:
:saved_model_dir: path to saved_model.
:tag_set: list of string tags identifying th... | def get_meta_graph_def(saved_model_dir, tag_set):
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set(tag_set.split(','))
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise RuntimeError("MetaGraph... | 103,071 |
Generator that yields batches of a DataFrame iterator.
Args:
:iterable: Spark partition iterator.
:batch_size: number of items to retrieve per invocation.
:num_tensors: number of tensors (columns) expected in each item.
Returns:
An array of ``num_tensors`` arrays, each of length `batch_size` | def yield_batch(iterable, batch_size, num_tensors=1):
tensors = [[] for i in range(num_tensors)]
for item in iterable:
if item is None:
break
for i in range(num_tensors):
tmp = str(item[i]) if type(item[i]) is bytearray else item[i]
tensors[i].append(tmp)
if len(tensors[0]) >= batch... | 103,072 |
Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk.
Args:
:dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors.
Returns:
A TFModel representing the trained model, backed on disk by a Tenso... | def _fit(self, dataset):
sc = SparkContext.getOrCreate()
logging.info("===== 1. train args: {0}".format(self.args))
logging.info("===== 2. train params: {0}".format(self._paramMap))
local_args = self.merge_args_params()
logging.info("===== 3. train args + params: {0}".format(local_args))
... | 103,078 |
Transforms the input DataFrame by applying the _run_model() mapPartitions function.
Args:
:dataset: A Spark DataFrame for TensorFlow inferencing. | def _transform(self, dataset):
spark = SparkSession.builder.getOrCreate()
# set a deterministic order for input/output columns (lexicographic by key)
input_cols = [col for col, tensor in sorted(self.getInputMapping().items())] # input col => input tensor
output_cols = [col for tensor, col in ... | 103,080 |
Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, im... | def _process_image(filename, coder):
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as f:
image_data = f.read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif... | 103,090 |
Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset. | def _find_human_readable_labels(synsets, synset_to_human):
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans | 103,091 |
Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note th... | def _find_image_bounding_boxes(filenames, image_to_bboxes):
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d imag... | 103,092 |
Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red... | def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
... | 103,093 |
Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size. | def distorted_inputs(data_dir, batch_size):
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename... | 103,105 |
Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZ... | def inputs(eval_data, data_dir, batch_size):
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_p... | 103,106 |
Save a Spark DataFrame as TFRecords.
This will convert the DataFrame rows to TFRecords prior to saving.
Args:
:df: Spark DataFrame
:output_dir: Path to save TFRecords | def saveAsTFRecords(df, output_dir):
tf_rdd = df.rdd.mapPartitions(toTFExample(df.dtypes))
tf_rdd.saveAsNewAPIHadoopFile(output_dir, "org.tensorflow.hadoop.io.TFRecordFileOutputFormat",
keyClass="org.apache.hadoop.io.BytesWritable",
valueClass="org.... | 103,108 |
Construct all necessary functions and call run_loop.
Args:
flags_obj: Object containing user specified flags. | def run_census(flags_obj, ctx):
train_file = os.path.join(flags_obj.data_dir, census_dataset.TRAINING_FILE)
test_file = os.path.join(flags_obj.data_dir, census_dataset.EVAL_FILE)
# Train and evaluate the model every `flags.epochs_between_evals` epochs.
def train_input_fn():
return census_dataset.input_f... | 103,116 |
Yields the scope with the default parameters for inception_v3.
Args:
weight_decay: the weight decay for weights variables.
stddev: standard deviation of the truncated guassian weight distribution.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float ... | def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
# Set weight_decay for weights in Conv and FC layers.
with scopes.arg_scope([ops.conv2d, ops.fc],
weight_decay=weight_decay):
# Set stddev, act... | 103,119 |
Define a L1 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function. | def l1_regularizer(weight=1.0, scope=None):
def regularizer(tensor):
with tf.name_scope(scope, 'L1Regularizer', [tensor]):
l1_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.m... | 103,123 |
Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function. | def l2_regularizer(weight=1.0, scope=None):
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.m... | 103,124 |
Define a L1L2 regularizer.
Args:
weight_l1: scale the L1 loss by this factor.
weight_l2: scale the L2 loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function. | def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
def regularizer(tensor):
with tf.name_scope(scope, 'L1L2Regularizer', [tensor]):
weight_l1_t = tf.convert_to_tensor(weight_l1,
dtype=tensor.dtype.base_dtype,
... | 103,125 |
Define a L1Loss, useful for regularize, i.e. lasso.
Args:
tensor: tensor to regularize.
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
the L1 loss op. | def l1_loss(tensor, weight=1.0, scope=None):
with tf.name_scope(scope, 'L1Loss', [tensor]):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), ... | 103,126 |
Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
tensor: tensor to regularize.
weight: an optional weight to modulate the loss.
scope: Optional scope for name_scope.
Returns:
the L2 loss op. | def l2_loss(tensor, weight=1.0, scope=None):
with tf.name_scope(scope, 'L2Loss', [tensor]):
weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='loss_weight')
loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='va... | 103,127 |
Decorates a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args(). | def add_arg_scope(func):
@functools.wraps(func)
def func_with_args(*args, **kwargs):
current_scope = _current_arg_scope()
current_args = kwargs
key_func = (func.__module__, func.__name__)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(k... | 103,132 |
Returns this executor's "singleton" instance of the multiprocessing.Manager, reconnecting per python-worker if needed.
Args:
:cluster_info: cluster node reservations
:host: host IP address
:executor_id: unique id per executor (created during initial call to run())
Returns:
TFManager instance for t... | def _get_manager(cluster_info, host, executor_id):
for node in cluster_info:
if node['host'] == host and node['executor_id'] == executor_id:
addr = node['addr']
authkey = node['authkey']
TFSparkNode.mgr = TFManager.connect(addr, authkey)
break
if TFSparkNode.mgr is None:
msg = "N... | 103,134 |
Feeds Spark partitions into the shared multiprocessing.Queue.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)
:cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc)
:feed_timeout: number of seconds after... | def train(cluster_info, cluster_meta, feed_timeout=600, qname='input'):
def _train(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())
try:
queue = mgr.get_queue(qname)
equeue = mgr.get_queue('error')
exce... | 103,136 |
Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)
:feed_timeout: number of seconds after which data feeding times out (600 sec default)
:qname: *INTERNAL_U... | def inference(cluster_info, feed_timeout=600, qname='input'):
def _inference(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())
try:
queue_in = mgr.get_queue(qname)
equeue = mgr.get_queue('error')
except ... | 103,137 |
Stops all TensorFlow nodes by feeding ``None`` into the multiprocessing.Queues.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc).
:queues: *INTERNAL_USE*
Returns:
A nodeRDD.mapPartitions() function | def shutdown(cluster_info, queues=['input']):
def _shutdown(iter):
host = util.get_ip_address()
executor_id = util.read_executor_id()
# reconnect to shared queue
mgr = _get_manager(cluster_info, host, executor_id)
# send SIGTERM to Tensorboard proc (if running)
for node in cluster_info:
... | 103,138 |
Adds all losses for the model.
Note the final loss is not returned. Instead, the list of losses are collected
by slim.losses. The losses are accumulated in tower_loss() and summed to
calculate the total loss.
Args:
logits: List of logits from inference(). Each entry is a 2-D float Tensor.
labels: Labe... | def loss(logits, labels, batch_size=None):
if not batch_size:
batch_size = FLAGS.batch_size
# Reshape the labels into a dense Tensor of
# shape [FLAGS.batch_size, num_classes].
sparse_labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
concated = ... | 103,167 |
Convenience function to create a Tensorflow-compatible absolute HDFS path from relative paths
Args:
:ctx: TFNodeContext containing the metadata specific to this node in the cluster.
:path: path to convert
Returns:
An absolute path prefixed with the correct filesystem scheme. | def hdfs_path(ctx, path):
# All Hadoop-Compatible File System Schemes (as of Hadoop 3.0.x):
HADOOP_SCHEMES = ['adl://',
'file://',
'hdfs://',
'oss://',
's3://',
's3a://',
's3n://',
... | 103,168 |
Push a batch of output results to the Spark output RDD of ``TFCluster.inference()``.
Note: this currently expects a one-to-one mapping of input to output data, so the length of the ``results`` array should match the length of
the previously retrieved batch of input data.
Args:
:results: array of out... | def batch_results(self, results):
logging.debug("batch_results() invoked")
queue = self.mgr.get_queue(self.qname_out)
for item in results:
queue.put(item, block=True)
logging.debug("batch_results() returning data") | 103,173 |
Get list of free GPUs according to nvidia-smi.
This will retry for ``MAX_RETRIES`` times until the requested number of GPUs are available.
Args:
:num_gpu: number of GPUs desired.
:worker_index: index "hint" for allocation of available GPUs.
Returns:
Comma-delimited string of GPU ids, or raises an E... | def get_gpus(num_gpu=1, worker_index=-1):
# get list of gpus (index, uuid)
list_gpus = subprocess.check_output(["nvidia-smi", "--list-gpus"]).decode()
logging.debug("all GPUs:\n{0}".format(list_gpus))
# parse index and guid
gpus = [x for x in list_gpus.split('\n') if len(x) > 0]
def parse_gpu(gpu_str):... | 103,182 |
Get available GPUs according to utilization thresholds.
Args:
:max_gpu_utilization: percent utilization threshold to consider a GPU "free"
:min_free_memory: percent free memory to consider a GPU "free"
:num_gpu: number of requested GPUs
Returns:
A tuple of (available_gpus, minimum_free_memory), wh... | def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1):
def get_gpu_info():
# Get the gpu information
gpu_info = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu"]).decode()
gpu_info = gpu_... | 103,183 |
Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, imag... | def _convert_to_example(filename, image_buffer, label, text, height, width):
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspac... | 103,184 |
Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer ident... | def _process_image_files(name, filenames, texts, labels, num_shards):
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
f... | 103,186 |
Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file. | def _process_dataset(name, directory, num_shards, labels_file):
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards) | 103,188 |
Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor with values ranging from [0, 1). | def decode_jpeg(image_buffer, scope=None):
with tf.name_scope(values=[image_buffer], name=scope,
default_name='decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other ... | 103,193 |
Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:... | def distort_color(image, thread_id=0, scope=None):
with tf.name_scope(values=[image], name=scope, default_name='distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lowe... | 103,194 |
Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image. | def eval_image(image, height, width, scope=None):
with tf.name_scope(values=[image, height, width], name=scope,
default_name='eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fra... | 103,196 |
Export to SavedModel format.
Args:
model: Estimator object
model_type: string indicating model type. "wide", "deep" or "wide_deep"
export_dir: directory to export the model.
model_column_fn: Function to generate model feature columns. | def export_model(model, model_type, export_dir, model_column_fn):
wide_columns, deep_columns = model_column_fn()
if model_type == 'wide':
columns = wide_columns
elif model_type == 'deep':
columns = deep_columns
else:
columns = wide_columns + deep_columns
feature_spec = tf.feature_column.make_pa... | 103,229 |
Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_... | def _two_element_tuple(int_or_tuple):
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(in... | 103,235 |
Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for name_scope.
Returns:
one hot encoding of the labels. | def one_hot_encoding(labels, num_classes, scope=None):
with tf.name_scope(scope, 'OneHotEncoding', [labels]):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(axis=1, values=[indic... | 103,238 |
Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope:... | def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
if is_training and keep_prob > 0:
with tf.name_scope(scope, 'Dropout', [inputs]):
return tf.nn.dropout(inputs, keep_prob)
else:
return inputs | 103,240 |
Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is ... | def flatten(inputs, scope=None):
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, 'Flatten', [inputs]):
return tf.reshape(inputs, [-1, k]) | 103,241 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.CheckConfig = channel.unary_unary(
'/pulumirpc.ResourceProvider/CheckConfig',
request_serializer=provider__pb2.CheckRequest.SerializeToString,
response_deserializer=provider__pb2.CheckResponse.FromString,
)
self.DiffConfig = channel.unary_un... | 103,246 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.Log = channel.unary_unary(
'/pulumirpc.Engine/Log',
request_serializer=engine__pb2.LogRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetRootResource = channel.unary_unary(
... | 103,249 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.Analyze = channel.unary_unary(
'/pulumirpc.Analyzer/Analyze',
request_serializer=analyzer__pb2.AnalyzeRequest.SerializeToString,
response_deserializer=analyzer__pb2.AnalyzeResponse.FromString,
)
self.GetPluginInfo = channel.unary_unary(
... | 103,261 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.Invoke = channel.unary_unary(
'/pulumirpc.ResourceMonitor/Invoke',
request_serializer=provider__pb2.InvokeRequest.SerializeToString,
response_deserializer=provider__pb2.InvokeResponse.FromString,
)
self.ReadResource = channel.unary_unary(
... | 103,304 |
Constructor.
Args:
channel: A grpc.Channel. | def __init__(self, channel):
self.GetRequiredPlugins = channel.unary_unary(
'/pulumirpc.LanguageRuntime/GetRequiredPlugins',
request_serializer=language__pb2.GetRequiredPluginsRequest.SerializeToString,
response_deserializer=language__pb2.GetRequiredPluginsResponse.FromString,
)... | 103,312 |
Check if the specified event has the specified handler.
Args:
handler (callable): the callable event handler.
event_name: The event the handler attached to. Set this
to ``None`` to search all events. | def has_event_handler(self, handler, event_name=None):
if event_name is not None:
if event_name not in self._event_handlers:
return False
events = [event_name]
else:
events = self._event_handlers
for e in events:
for h, _, ... | 103,322 |
Remove event handler `handler` from registered handlers of the engine
Args:
handler (callable): the callable event handler that should be removed
event_name: The event the handler attached to. | def remove_event_handler(self, handler, event_name):
if event_name not in self._event_handlers:
raise ValueError("Input event name '{}' does not exist".format(event_name))
new_event_handlers = [(h, args, kwargs) for h, args, kwargs in self._event_handlers[event_name]
... | 103,323 |
Decorator shortcut for add_event_handler.
Args:
event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.Events` or
any `event_name` added by :meth:`~ignite.engine.Engine.register_events`.
*args: optional args to be passed to `handler`.... | def on(self, event_name, *args, **kwargs):
def decorator(f):
self.add_event_handler(event_name, f, *args, **kwargs)
return f
return decorator | 103,325 |
Runs the process_function over the passed data.
Args:
data (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`).
max_epochs (int, optional): max epochs to run for (default: 1).
Returns:
State: output state. | def run(self, data, max_epochs=1):
self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={})
try:
self._logger.info("Engine run starting with max_epochs={}.".format(max_epochs))
start_time = time.time()
self._fire_event(Events.STARTED)... | 103,329 |
Calculates accuracy using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
Returns:
MetricsLambda | def cmAccuracy(cm):
# Increase floating point precision
cm = cm.type(torch.float64)
return cm.diag().sum() / (cm.sum() + 1e-15) | 103,359 |
Calculates precision using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
average (bool, optional): if True metric value is averaged over all classes
Returns:
MetricsLambda | def cmPrecision(cm, average=True):
# Increase floating point precision
cm = cm.type(torch.float64)
precision = cm.diag() / (cm.sum(dim=0) + 1e-15)
if average:
return precision.mean()
return precision | 103,360 |
Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
average (bool, optional): if True metric value is averaged over all classes
Returns:
MetricsLambda | def cmRecall(cm, average=True):
# Increase floating point precision
cm = cm.type(torch.float64)
recall = cm.diag() / (cm.sum(dim=1) + 1e-15)
if average:
return recall.mean()
return recall | 103,361 |
Method to simulate scheduled values during num_events events.
Args:
num_events (int): number of events during the simulation.
lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap.
Returns:
list of pairs: [event_index, value... | def simulate_values(cls, num_events, lr_scheduler, **kwargs):
# This scheduler uses `torch.optim.lr_scheduler._LRScheduler` which
# should be replicated in order to simulate LR values and
# not perturb original scheduler.
copy_lr_scheduler = LRScheduler._replicate_lr_scheduler(l... | 103,419 |
Attach the logger to the engine and execute `log_handler` function at `event_name` events.
Args:
engine (Engine): engine object.
log_handler (callable): a logging handler to execute
event_name: event to attach the logging handler to. Valid events are from :class:`~ignite.eng... | def attach(self, engine, log_handler, event_name):
if event_name not in State.event_to_attr:
raise RuntimeError("Unknown event name '{}'".format(event_name))
engine.add_event_handler(event_name, log_handler, self, event_name) | 103,457 |
Iterate until condition is met, with optional timeout in seconds.
The yielded value is that of the condition or False when timed out.
Args:
condition: Predicate function that is tested after every network
update.
timeout: Maximum time in seconds to wait.
... | def loopUntil(
self, condition=None, timeout: float = 0) -> Iterator[object]:
endTime = time.time() + timeout
while True:
test = condition and condition()
if test:
yield test
return
elif timeout and time.time() > en... | 104,822 |
List of account values for the given account,
or of all accounts if account is left blank.
Args:
account: If specified, filter for this account name. | def accountValues(self, account: str = '') -> List[AccountValue]:
if account:
return [v for v in self.wrapper.accountValues.values()
if v.account == account]
else:
return list(self.wrapper.accountValues.values()) | 104,823 |
List of account values for the given account,
or of all accounts if account is left blank.
This method is blocking on first run, non-blocking after that.
Args:
account: If specified, filter for this account name. | def accountSummary(self, account: str = '') -> List[AccountValue]:
if not self.wrapper.acctSummary:
# loaded on demand since it takes ca. 250 ms
self.reqAccountSummary()
if account:
return [v for v in self.wrapper.acctSummary.values()
if v... | 104,824 |
List of positions for the given account,
or of all accounts if account is left blank.
Args:
account: If specified, filter for this account name. | def positions(self, account: str = '') -> List[Position]:
if account:
return list(self.wrapper.positions[account].values())
else:
return [v for d in self.wrapper.positions.values()
for v in d.values()] | 104,826 |
List of subscribed :class:`.PnL` objects (profit and loss),
optionally filtered by account and/or modelCode.
The :class:`.PnL` objects are kept live updated.
Args:
account: If specified, filter for this account name.
modelCode: If specified, filter for this account mode... | def pnl(self, account='', modelCode='') -> List[PnL]:
return [v for v in self.wrapper.pnls.values() if
(not account or v.account == account) and
(not modelCode or v.modelCode == modelCode)] | 104,827 |
List of subscribed :class:`.PnLSingle` objects (profit and loss for
single positions).
The :class:`.PnLSingle` objects are kept live updated.
Args:
account: If specified, filter for this account name.
modelCode: If specified, filter for this account model.
c... | def pnlSingle(
self, account: str = '', modelCode: str = '',
conId: int = 0) -> List[PnLSingle]:
return [v for v in self.wrapper.pnlSingles.values() if
(not account or v.account == account) and
(not modelCode or v.modelCode == modelCode) and
... | 104,828 |
Get ticker of the given contract. It must have been requested before
with reqMktData with the same contract object. The ticker may not be
ready yet if called directly after :meth:`.reqMktData`.
Args:
contract: Contract to get ticker for. | def ticker(self, contract: Contract) -> Ticker:
return self.wrapper.tickers.get(id(contract)) | 104,833 |
Request and return a list of snapshot tickers.
The list is returned when all tickers are ready.
This method is blocking.
Args:
contracts: Contracts to get tickers for.
regulatorySnapshot: Request NBBO snapshots (may incur a fee). | def reqTickers(
self, *contracts: List[Contract],
regulatorySnapshot: bool = False) -> List[Ticker]:
return self._run(
self.reqTickersAsync(
*contracts, regulatorySnapshot=regulatorySnapshot)) | 104,834 |
Fully qualify the given contracts in-place. This will fill in
the missing fields in the contract, especially the conId.
Returns a list of contracts that have been successfully qualified.
This method is blocking.
Args:
contracts: Contracts to qualify. | def qualifyContracts(self, *contracts: List[Contract]) -> List[Contract]:
return self._run(self.qualifyContractsAsync(*contracts)) | 104,835 |
Place the trades in the same One Cancels All (OCA) group.
https://interactivebrokers.github.io/tws-api/oca.html
Args:
orders: The orders that are to be placed together. | def oneCancelsAll(
orders: List[Order], ocaGroup: str, ocaType: int) -> List[Order]:
for o in orders:
o.ocaGroup = ocaGroup
o.ocaType = ocaType
return orders | 104,837 |
Retrieve commission and margin impact without actually
placing the order. The given order will not be modified in any way.
This method is blocking.
Args:
contract: Contract to test.
order: Order to test. | def whatIfOrder(self, contract: Contract, order: Order) -> OrderState:
return self._run(self.whatIfOrderAsync(contract, order)) | 104,838 |
Place a new order or modify an existing order.
Returns a Trade that is kept live updated with
status changes, fills, etc.
Args:
contract: Contract to use for order.
order: The order to be placed. | def placeOrder(self, contract: Contract, order: Order) -> Trade:
orderId = order.orderId or self.client.getReqId()
self.client.placeOrder(orderId, contract, order)
now = datetime.datetime.now(datetime.timezone.utc)
key = self.wrapper.orderKey(
self.wrapper.clientId, ... | 104,839 |
Cancel the order and return the Trade it belongs to.
Args:
order: The order to be canceled. | def cancelOrder(self, order: Order) -> Trade:
self.client.cancelOrder(order.orderId)
now = datetime.datetime.now(datetime.timezone.utc)
key = self.wrapper.orderKey(
order.clientId, order.orderId, order.permId)
trade = self.wrapper.trades.get(key)
if trade:
... | 104,840 |
It is recommended to use :meth:`.accountValues` instead.
Request account values of multiple accounts and keep updated.
This method is blocking.
Args:
account: If specified, filter for this account name.
modelCode: If specified, filter for this account model. | def reqAccountUpdatesMulti(
self, account: str = '', modelCode: str = ''):
self._run(self.reqAccountUpdatesMultiAsync(account, modelCode)) | 104,841 |
It is recommended to use :meth:`.fills` or
:meth:`.executions` instead.
Request and return a list a list of fills.
This method is blocking.
Args:
execFilter: If specified, return executions that match the filter. | def reqExecutions(
self, execFilter: ExecutionFilter = None) -> List[Fill]:
return self._run(self.reqExecutionsAsync(execFilter)) | 104,842 |
Start a subscription for profit and loss events.
Returns a :class:`.PnL` object that is kept live updated.
The result can also be queried from :meth:`.pnl`.
https://interactivebrokers.github.io/tws-api/pnl.html
Args:
account: Subscribe to this account.
modelCod... | def reqPnL(self, account: str, modelCode: str = '') -> PnL:
key = (account, modelCode)
assert key not in self.wrapper.pnlKey2ReqId
reqId = self.client.getReqId()
self.wrapper.pnlKey2ReqId[key] = reqId
pnl = PnL(account, modelCode)
self.wrapper.pnls[reqId] = pnl
... | 104,843 |
Cancel PnL subscription.
Args:
account: Cancel for this account.
modelCode: If specified, cancel for this account model. | def cancelPnL(self, account, modelCode: str = ''):
key = (account, modelCode)
reqId = self.wrapper.pnlKey2ReqId.pop(key, None)
if reqId:
self.client.cancelPnL(reqId)
self.wrapper.pnls.pop(reqId, None)
else:
self._logger.error(
... | 104,844 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.