docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Encode a set of proposals with respect to some reference boxes Arguments: reference_boxes (Tensor): reference boxes proposals (Tensor): boxes to be encoded
def encode(self, reference_boxes, proposals): TO_REMOVE = 1 # TODO remove ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths ex_ctr_y = proposals[:, 1] + 0.5 ...
123,156
From a set of original boxes and encoded relative box offsets, get the decoded boxes. Arguments: rel_codes (Tensor): encoded boxes boxes (Tensor): reference boxes.
def decode(self, rel_codes, boxes): boxes = boxes.to(rel_codes.dtype) TO_REMOVE = 1 # TODO remove widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heig...
123,157
Create a single numpy array from a dataset. The dataset must have only one dimension, that is, the length of its `output_shapes` and `output_types` is 1, and its output shape must be `[]`, that is, every tensor in the dataset must be a scalar. Args: ds: a TF Dataset. batch_size: how ...
def make_single_array(ds, batch_size=8*1024): if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple): raise ValueError('Dataset must have a single type and shape') nshapes = len(ds.output_shapes) if nshapes > 0: raise ValueError('Dataset must be comprised of scalar...
123,179
Given dataset of key names, return histogram of moves/game. Move counts are written by the game players, so this is mostly useful for repair or backfill. Args: sess: TF session ds: TF dataset containing game move keys. batch_size: performance tuning parameter
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024): ds = ds.batch(batch_size) # Turns 'g_0000001234_m_133' into 'g_0000001234' ds = ds.map(lambda x: tf.strings.substr(x, 0, 12)) iterator = ds.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_nex...
123,180
Constructor. Args: project_name: string name of GCP project having table. instance_name: string name of CBT instance in project. table_name: string name of CBT table in instance.
def __init__(self, project_name, instance_name, table_name): self.btspec = BigtableSpec(project_name, instance_name, table_name) self.bt_table = bigtable.Client( self.btspec.project, admin=True).instance( self.btspec.instance).table(self.btspec.table) self.tf...
123,186
Delete rows related to the given game range. Args: format_str: a string to `.format()` by the game numbers in order to create the row prefixes. start_game: the starting game number of the deletion. end_game: the ending game number of the deletion.
def delete_row_range(self, format_str, start_game, end_game): row_keys = make_single_array( self.tf_table.keys_by_range_dataset( format_str.format(start_game), format_str.format(end_game))) row_keys = list(row_keys) if not row_keys: ...
123,191
Require a given number of fresh games to be played. Args: number_fresh: integer, number of new fresh games needed Increments the cell `table_state=metadata:wait_for_game_number` by the given number of games. This will cause `self.wait_for_fresh_games()` to block until the g...
def require_fresh_games(self, number_fresh): latest = self.latest_game_number table_state = self.bt_table.row(TABLE_STATE) table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh)) table_state.commit() print("== Setting wait cell to ", int(latest + number_fre...
123,194
Block caller until required new games have been played. Args: poll_interval: number of seconds to wait between checks If the cell `table_state=metadata:wait_for_game_number` exists, then block the caller, checking every `poll_interval` seconds, until `table_state=metadata:ga...
def wait_for_fresh_games(self, poll_interval=15.0): wait_until_game = self.read_wait_cell() if not wait_until_game: return latest_game = self.latest_game_number last_latest = latest_game while latest_game < wait_until_game: utils.dbg('Latest game ...
123,195
Count the total moves in a game range. Args: game_begin: integer, starting game game_end: integer, ending game Uses the `ct_` keyspace for rapid move summary.
def count_moves_in_game_range(self, game_begin, game_end): rows = self.bt_table.read_rows( ROWCOUNT_PREFIX.format(game_begin), ROWCOUNT_PREFIX.format(game_end), filter_=bigtable_row_filters.ColumnRangeFilter( METADATA, MOVE_COUNT, MOVE_COUNT)) ...
123,197
Randomly choose a given number of moves from the last n games. Args: n: number of games at the end of this GameQueue to source. moves: number of moves to be sampled from `n` games. shuffle: if True, shuffle the selected moves. column_family: name of the column family...
def moves_from_last_n_games(self, n, moves, shuffle, column_family, column): self.wait_for_fresh_games() latest_game = self.latest_game_number utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game)) if latest_game == 0: r...
123,199
Add move counts from the given histogram to the table. Used to update the move counts in an existing table. Should not be needed except for backfill or repair. Args: sess: TF session to use for doing a Bigtable write. tf_table: TF Cloud Bigtable to use for writing. ...
def _write_move_counts(self, sess, h): def gen(): for k, v in h.items(): # The keys in the histogram may be of type 'bytes' k = str(k, 'utf-8') vs = str(v) yield (k.replace('g_', 'ct_') + '_%d' % v, vs) yield (k...
123,200
Input function which provides batches for train or eval. Args: is_training: A boolean denoting whether the input is for training. data_dir: The directory containing the input data. batch_size: The number of samples per batch. num_epochs: The number of epochs to repeat the dataset. num_gpus: The n...
def input_fn(is_training, data_dir, batch_size, num_epochs=1, num_gpus=None, dtype=tf.float32): mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER) filenames = get_filenames(is_training, data_dir) dataset = tf.data.Dataset.from_tensor_slices(filenames) if is_training: # Shuffle the input fi...
123,205
Retrieve the size of each block_layer in the ResNet model. The number of block layers used for the Resnet model varies according to the size of the model. This helper grabs the layer set we want, throwing an error if a non-standard size has been selected. Args: resnet_size: The number of convolutional lay...
def _get_block_sizes(resnet_size): choices = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3] } try: return choices[resnet_size] except KeyError: err = ('Could not find layers for selected Resnet s...
123,206
Builds just the inference part of the model graph. Args: features: input features tensor. training: True if the model is training. params: A dictionary Returns: (policy_output, value_output, logits) tuple of tensors.
def model_inference_fn(features, training, params): mg_batchn = functools.partial( tf.layers.batch_normalization, axis=-1, momentum=.95, epsilon=1e-5, center=True, scale=True, fused=True, training=training) mg_conv2d = functools.partial( ...
123,248
Take the latest checkpoint and copy it to model_path. Assumes that all relevant model files are prefixed by the same name. (For example, foo.index, foo.meta and foo.data-00000-of-00001). Args: model_path: The path (can be a gs:// path) to export model
def export_model(model_path): estimator = tf.estimator.Estimator(model_fn, model_dir=FLAGS.work_dir, params=FLAGS.flag_values_dict()) latest_checkpoint = estimator.latest_checkpoint() all_checkpoint_files = tf.gfile.Glob(latest_checkpoint + '*') for filename i...
123,254
Parses the output of --helpfull. Args: help_output: str, the full output of --helpfull. Returns: A set of flags that are valid flags.
def parse_helpfull_output(help_output, regex=FLAG_HELP_RE_PY): valid_flags = set() for _, no_prefix, flag_name in regex.findall(help_output): valid_flags.add('--' + flag_name) if no_prefix: valid_flags.add('--no' + flag_name) return valid_flags
123,274
Prepares a subprocess command by running --helpfull and masking flags. Args: subprocess_cmd: List[str], what would be passed into subprocess.call() i.e. ['python', 'train.py', '--flagfile=flags'] Returns: ['python', 'train.py', '--train_flag=blah', '--more_flags']
def prepare_subprocess_cmd(subprocess_cmd): help_cmd = subprocess_cmd + ['--helpfull'] help_output = subprocess.run(help_cmd, stdout=subprocess.PIPE).stdout help_output = help_output.decode('ascii') if 'python' in subprocess_cmd[0]: valid_flags = parse_helpfull_output(help_output) else:...
123,276
Split x into different heads, and transpose the resulting value. The tensor is transposed to insure the inner dimensions hold the correct values during the matrix multiplication. Args: x: A tensor with shape [batch_size, length, hidden_size] Returns: A tensor with shape [batch_size, num_h...
def split_heads(self, x): with tf.name_scope("split_heads"): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] # Calculate depth of last dimension after it has been split. depth = (self.hidden_size // self.num_heads) # Split the last dimension x = tf.reshape(x, [batch_si...
123,343
Combine tensor that has been split. Args: x: A tensor [batch_size, num_heads, length, hidden_size/num_heads] Returns: A tensor with shape [batch_size, length, hidden_size]
def combine_heads(self, x): with tf.name_scope("combine_heads"): batch_size = tf.shape(x)[0] length = tf.shape(x)[2] x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth] return tf.reshape(x, [batch_size, length, self.hidden_size])
123,344
r"""Replace characters that aren't in the alphabet and append "_" to token. Apply three transformations to the token: 1. Replace underline character "_" with "\u", and backslash "\" with "\\". 2. Replace characters outside of the alphabet with "\###;", where ### is the character's Unicode code point. ...
def _escape_token(token, alphabet): r token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u") ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token] return u"".join(ret) + "_"
123,352
r"""Replaces escaped characters in the token with their unescaped versions. Applies inverse transformations as _escape_token(): 1. Replace "\u" with "_", and "\\" with "\". 2. Replace "\###;" with the unicode character the ### refers to. Args: token: escaped string Returns: unescaped string
def _unescape_token(token): r def match(m): r # Check if the matched strings are '\u' or '\\'. if m.group(1) is None: return u"_" if m.group(0) == u"\\u" else u"\\" # If m.group(1) exists, try and return unicode character. try: return six.unichr(int(m.group(1))) except (ValueEr...
123,353
Return token counts of words in the files. Samples file_byte_limit bytes from each file, and counts the words that appear in the samples. The samples are semi-evenly distributed across the file. Args: files: List of filepaths file_byte_limit: Max number of bytes that will be read from each file. Retu...
def _count_tokens(files, file_byte_limit=1e6): token_counts = collections.defaultdict(int) for filepath in files: with tf.gfile.Open(filepath, mode="r") as reader: file_byte_budget = file_byte_limit counter = 0 lines_to_skip = int(reader.size() / (file_byte_budget * 2)) for line in r...
123,354
Return a bucketed list of subtokens that are filtered by count. Args: subtoken_counts: defaultdict mapping subtokens to their counts min_count: int count used to filter subtokens Returns: List of subtoken sets, where subtokens in set i have the same length=i.
def _filter_and_bucket_subtokens(subtoken_counts, min_count): # Create list of buckets, where subtokens in bucket i have length i. subtoken_buckets = [] for subtoken, count in six.iteritems(subtoken_counts): if count < min_count: # Filter out subtokens that don't appear enough continue while len...
123,359
Add operations to classify a batch of input images. Args: inputs: A Tensor representing a batch of input images. training: A boolean. Set to True to add operations required only when training the classifier. Returns: A logits Tensor with shape [<batch_size>, self.num_classes].
def __call__(self, inputs, training): # Drop batch size from shape logging. mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_INITIAL_SHAPE, value=inputs.shape.as_list()[1:]) with self._model_variable_scope(): if self.data_format == 'channels_first': # Convert ...
123,431
Compile raw files into a single file for each language. Args: raw_dir: Directory containing downloaded raw files. raw_files: Dict containing filenames of input and target data. {"inputs": list of files containing data in input language "targets": list of files containing corresponding data in ta...
def compile_files(raw_dir, raw_files, tag): tf.logging.info("Compiling files with tag %s." % tag) filename = "%s-%s" % (_PREFIX, tag) input_compiled_file = os.path.join(raw_dir, filename + ".lang1") target_compiled_file = os.path.join(raw_dir, filename + ".lang2") with tf.gfile.Open(input_compiled_file, m...
123,465
Initialize layers to build Transformer model. Args: params: hyperparameter object defining layer sizes, dropout values, etc. train: boolean indicating whether the model is in training mode. Used to determine if dropout layers should be added.
def __init__(self, params, train): self.train = train self.params = params self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights( params.vocab_size, params.hidden_size) self.encoder_stack = EncoderStack(params, train) self.decoder_stack = DecoderStack(params, train)
123,474
Generate continuous representation for inputs. Args: inputs: int tensor with shape [batch_size, input_length]. attention_bias: float tensor with shape [batch_size, 1, 1, input_length] Returns: float tensor with shape [batch_size, input_length, hidden_size]
def encode(self, inputs, attention_bias): with tf.name_scope("encode"): # Prepare inputs to the layer stack by adding positional encodings and # applying dropout. embedded_inputs = self.embedding_softmax_layer(inputs) inputs_padding = model_utils.get_padding(inputs) with tf.name_...
123,476
Generate logits for each value in the target sequence. Args: targets: target values for the output sequence. int tensor with shape [batch_size, target_length] encoder_outputs: continuous representation of input sequence. float tensor with shape [batch_size, input_length, hidden_size] ...
def decode(self, targets, encoder_outputs, attention_bias): with tf.name_scope("decode"): # Prepare inputs to decoder layers by shifting targets, adding positional # encoding and applying dropout. decoder_inputs = self.embedding_softmax_layer(targets) with tf.name_scope("shift_targets")...
123,477
Separate the rating datafram into train and test sets. Filters out users with less than two distinct timestamps. Creates train set and test set. The test set contains all the last interactions of users with more than two distinct timestamps. Args: ratings_df: pandas dataframe with columns 'userId', 'movie...
def _preprocess_movie_lens(ratings_df): ratings_df["data"] = 1.0 num_timestamps = ratings_df[["userId", "timestamp"]].groupby( "userId").nunique() last_user_timestamp = ratings_df[["userId", "timestamp"]].groupby( "userId").max() ratings_df["numberOfTimestamps"] = ratings_df["userId"].apply( ...
123,490
Get token embeddings of x. Args: x: An int64 tensor with shape [batch_size, length] Returns: embeddings: float32 tensor with shape [batch_size, length, embedding_size] padding: float32 tensor with shape [batch_size, length] indicating the locations of the padding tokens in x.
def call(self, x): with tf.name_scope("embedding"): embeddings = tf.gather(self.shared_weights, x) # Scale embedding by the sqrt of the hidden size embeddings *= self.hidden_size ** 0.5 # Create binary array of size [batch_size, length] # where 1 = padding, 0 = not padding ...
123,516
Computes logits by running x through a linear layer. Args: x: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size].
def linear(self, x): with tf.name_scope("presoftmax_linear"): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] x = tf.reshape(x, [-1, self.hidden_size]) logits = tf.matmul(x, self.shared_weights, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size...
123,517
Given a set of BoxList containing the `labels` field, return a set of BoxList for which `labels > 0`. Arguments: boxes (list of BoxList)
def keep_only_positive_boxes(boxes): assert isinstance(boxes, (list, tuple)) assert isinstance(boxes[0], BoxList) assert boxes[0].has_field("labels") positive_boxes = [] positive_inds = [] num_boxes = 0 for boxes_per_image in boxes: labels = boxes_per_image.get_field("labels") ...
123,526
Prints the current MCTS search status to stderr. Reports the current search path, root node's child_Q, root node's child_N, the most visited path in a format that can be parsed by one of the STDERR_HANDLERS in minigui.ts. Args: leaves: list of leaf MCTSNodes returned by tree_...
def _minigui_report_search_status(self, leaves): root = self._player.get_root() msg = { "id": hex(id(root)), "n": int(root.N), "q": float(root.Q), } msg["childQ"] = [int(round(q * 1000)) for q in root.child_Q] msg["childN"] = [int(n...
123,552
Given a list of strings, removes blanks and replace space character with space. Option to remove repetitions (e.g. 'abbca' -> 'abca'). Arguments: sequences: list of 1-d array of integers remove_repetitions (boolean, optional): If true, repeating characters are re...
def process_strings(self, sequences, remove_repetitions=False): processed_strings = [] for sequence in sequences: string = self.process_string(remove_repetitions, sequence).strip() processed_strings.append(string) return processed_strings
123,589
Computes the Word Error Rate, defined as the edit distance between the two provided sentences after tokenizing to words. Arguments: s1 (string): space-separated sentence s2 (string): space-separated sentence
def wer(self, s1, s2): # build mapping of words to integers b = set(s1.split() + s2.split()) word2char = dict(zip(b, range(len(b)))) # map the words to a char array (Levenshtein packages only accepts # strings) w1 = [chr(word2char[w]) for w in s1.split()] ...
123,591
Returns the argmax decoding given the probability matrix. Removes repeated elements in the sequence, as well as blanks. Arguments: probs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim sizes(optional): Size of each sequence ...
def decode(self, probs, sizes=None): _, max_probs = torch.max(probs.transpose(0, 1), 2) strings = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes) return self.process_strings(strings, remove_repetitions=True)
123,594
Returns an iterable of tf.Examples. Args: data_extracts: An iterable of (position, pi, result) tuples
def make_dataset_from_selfplay(data_extracts): tf_examples = (make_tf_example(features_lib.extract_features(pos), pi, result) for pos, pi, result in data_extracts) return tf_examples
123,638
Return a boolean representing whether a model should be stopped. Args: stop_threshold: float, the threshold above which a model should stop training. eval_metric: float, the current value of the relevant metric to check. Returns: True if training should stop, False otherwise. Raises: Valu...
def past_stop_threshold(stop_threshold, eval_metric): if stop_threshold is None: return False if not isinstance(stop_threshold, numbers.Number): raise ValueError("Threshold for checking stop conditions must be a number.") if not isinstance(eval_metric, numbers.Number): raise ValueError("Eval metri...
123,649
Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank
def all_gather(data): world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_...
123,655
evaluate dataset using different methods based on dataset type. Args: dataset: Dataset object predictions(list[BoxList]): each item in the list represents the prediction results for one image. output_folder: output folder, to save evaluation files or results. **kwargs: ot...
def evaluate(dataset, predictions, output_folder, **kwargs): args = dict( dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs ) if isinstance(dataset, datasets.COCODataset): return coco_evaluation(**args) elif isinstance(dataset, datasets.PascalVOCDataset...
123,669
Factory for getting a list of TensorFlow hooks for training by name. Args: name_list: a list of strings to name desired hook classes. Allowed: LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined as keys in HOOKS **kwargs: a dictionary of arguments to the hooks. Returns: ...
def get_train_hooks(name_list, **kwargs): if not name_list: return [] train_hooks = [] for name in name_list: hook_name = HOOKS.get(name.strip().lower()) if hook_name is None: raise ValueError('Unrecognized training hook requested: {}'.format(name)) else: train_hooks.append(hook_n...
123,672
Function to get LoggingTensorHook. Args: every_n_iter: `int`, print the values of `tensors` once every N local steps taken on the current worker. tensors_to_log: List of tensor names or dictionary mapping labels to tensor names. If not set, log _TENSORS_TO_LOG by default. **kwargs: a dictiona...
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument if tensors_to_log is None: tensors_to_log = _TENSORS_TO_LOG return tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=every_n_iter)
123,673
Function to get LoggingMetricHook. Args: benchmark_log_dir: `string`, directory path to save the metric log. tensors_to_log: List of tensor names or dictionary mapping labels to tensor names. If not set, log _TENSORS_TO_LOG by default. every_n_secs: `int`, the frequency for logging the metric. Defa...
def get_logging_metric_hook(benchmark_log_dir=None, tensors_to_log=None, every_n_secs=600, **kwargs): # pylint: disable=unused-argument if benchmark_log_dir is None: raise ValueError("metric_log_dir should be provided to use m...
123,675
Called after each call to run(). Args: run_context: A SessionRunContext object. run_values: A SessionRunValues object.
def after_run(self, run_context, run_values): # pylint: disable=unused-argument global_step = run_values.results if self._timer.should_trigger_for_step( global_step) and global_step > self._warm_steps: elapsed_time, elapsed_steps = self._timer.update_last_triggered_step( global_st...
123,750
Adds the predicted boxes on top of the image Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `labels`.
def overlay_boxes(self, image, predictions): labels = predictions.get_field("labels") boxes = predictions.bbox colors = self.compute_colors_for_labels(labels).tolist() for box, color in zip(boxes, colors): box = box.to(torch.int64) top_left, bottom_righ...
123,758
Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `label...
def overlay_mask(self, image, predictions): masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] ...
123,759
Create a montage showing the probability heatmaps for each one one of the detected objects Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask`.
def create_mask_montage(self, image, predictions): masks = predictions.get_field("mask") masks_per_dim = self.masks_per_dim masks = L.interpolate( masks.float(), scale_factor=1 / masks_per_dim ).byte() height, width = masks.shape[-2:] max_masks = mask...
123,761
Adds detected class names and scores in the positions defined by the top-left corner of the predicted bounding box Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain t...
def overlay_class_names(self, image, predictions): scores = predictions.get_field("scores").tolist() labels = predictions.get_field("labels").tolist() labels = [self.CATEGORIES[i] for i in labels] boxes = predictions.bbox template = "{}: {:.2f}" for box, score, ...
123,762
Log the evaluation result for a estimator. The evaluate result is a directory that contains metrics defined in model_fn. It also contains a entry for global_step which contains the value of the global step when evaluation was performed. Args: eval_results: dict, the result of evaluate() from a e...
def log_estimator_evaluation_result(self, eval_results): if not isinstance(eval_results, dict): tf.logging.warning("eval_results should be directory for logging. Got %s", type(eval_results)) return global_step = eval_results[tf.GraphKeys.GLOBAL_STEP] for key in sort...
123,772
Collect most of the TF runtime information for the local env. The schema of the run info follows official/benchmark/datastore/schema. Args: model_name: string, the name of the model.
def log_run_info(self, model_name): run_info = { "model_name": model_name, "machine_config": {}, "run_date": datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN)} _collect_tensorflow_info(run_info) _collect_tensorflow_environment_variables(run_info) _collect_cpu_info(...
123,774
Write all eval_records to eval_table In addition to writing new rows table_state must be updated in row `table_state` columns `metadata:eval_game_counter` Args: bt_table: bigtable table to add rows to. game_data: metadata pairs (column name, value) for each eval record. last_game: last...
def write_eval_records(bt_table, game_data, last_game): eval_num = last_game # Each column counts as a mutation so max rows is ~10000 GAMES_PER_COMMIT = 2000 for games in grouper(tqdm(game_data), GAMES_PER_COMMIT): assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't e...
123,781
Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout as a list of strings, one line per element (stderr output is piped to stdout). Raises: RuntimeError: if the command returns a non-zero result.
async def run(*cmd): stdout = await checked_run(*cmd) log_path = os.path.join(FLAGS.base_dir, get_cmd_name(cmd) + '.log') with gfile.Open(log_path, 'a') as f: f.write(expand_cmd_str(cmd)) f.write('\n') f.write(stdout) f.write('\n') # Split stdout into lines. return stdout.split('\n')
123,785
Run selfplay and write a training chunk to the fsdb golden_chunk_dir. Args: state: the RL loop State instance. flagfile: the name of the flagfile to use for selfplay, either 'selfplay' (the default) or 'boostrap'.
async def selfplay(state, flagfile='selfplay'): output_dir = os.path.join(fsdb.selfplay_dir(), state.output_model_name) holdout_dir = os.path.join(fsdb.holdout_dir(), state.output_model_name) lines = await run( 'bazel-bin/cc/selfplay', '--flagfile={}.flags'.format(os.path.join(FLAGS.flags_dir, fl...
123,787
Run training and write a new model to the fsdb models_dir. Args: state: the RL loop State instance. tf_records: a list of paths to TensorFlow records to train on.
async def train(state, tf_records): model_path = os.path.join(fsdb.models_dir(), state.train_model_name) await run( 'python3', 'train.py', *tf_records, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'train.flags')), '--work_dir={}'.format(fsdb.working_dir()), '--export_path={}'.for...
123,788
Validate the trained model against holdout games. Args: state: the RL loop State instance. holdout_glob: a glob that matches holdout games.
async def validate(state, holdout_glob): if not glob.glob(holdout_glob): print('Glob "{}" didn\'t match any files, skipping validation'.format( holdout_glob)) else: await run( 'python3', 'validate.py', holdout_glob, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate...
123,789
Evaluate one model against a target. Args: eval_model_path: the path to the model to evaluate. target_model_path: the path to the model to compare to. sgf_dif: directory path to write SGF output to. seed: random seed to use when running eval. Returns: The win-rate of eval_model against target_...
async def evaluate_model(eval_model_path, target_model_path, sgf_dir, seed): lines = await run( 'bazel-bin/cc/eval', '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'eval.flags')), '--model={}'.format(eval_model_path), '--model_two={}'.format(target_model_path), '--sgf_dir={}'.f...
123,790
Evaluate the most recently trained model against the current best model. Args: state: the RL loop State instance.
async def evaluate_trained_model(state): return await evaluate_model( state.train_model_path, state.best_model_path, os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed)
123,791
Turn a game into SGF. Doesn't handle handicap games or positions with incomplete history. Args: move_history: iterable of PlayerMoves result_string: "B+R", "W+0.5", etc. comments: iterable of string/None. Will be zipped with move_history.
def make_sgf( move_history, result_string, ruleset="Chinese", komi=7.5, white_name=PROGRAM_IDENTIFIER, black_name=PROGRAM_IDENTIFIER, comments=[] ): boardsize = go.N game_moves = ''.join(translate_sgf_move(*z) for z in itertools.zip_longest(move_history,...
123,836
Download content from a url. Args: path: string directory where file will be downloaded url: string url Returns: Full path to downloaded file
def download_from_url(path, url): filename = url.split("/")[-1] found_file = find_file(path, filename, max_depth=0) if found_file is None: filename = os.path.join(path, filename) tf.logging.info("Downloading from %s to %s." % (url, filename)) inprogress_filepath = filename + ".incomplete" inpro...
123,889
Given segmentation masks and the bounding boxes corresponding to the location of the masks in the image, this function crops and resizes the masks in the position defined by the boxes. This prepares the masks for them to be fed to the loss computation as the targets. Arguments: segmentation...
def project_masks_on_boxes(segmentation_masks, proposals, discretization_size): masks = [] M = discretization_size device = proposals.bbox.device proposals = proposals.convert("xyxy") assert segmentation_masks.size == proposals.size, "{}, {}".format( segmentation_masks, proposals ) ...
123,891
Crops the given image to a random part of the image, and randomly flips. We use the fused decode_and_crop op, which performs better than the two ops used separately in series, but note that this requires that the image be passed in as an un-decoded string Tensor. Args: image_buffer: scalar string Tensor r...
def _decode_crop_and_flip(image_buffer, num_channels): # A large fraction of image datasets contain a human-annotated bounding box # delineating the region of the image containing the object of interest. We # choose to create a new bounding box for the object which is a randomly # distorted version of the h...
123,910
Performs central crops of the given image list. Args: image: a 3-D image tensor crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: 3-D tensor with cropped image.
def _central_crop(image, crop_height, crop_width): shape = tf.shape(image) height, width = shape[0], shape[1] mlperf_log.resnet_print(key=mlperf_log.INPUT_CENTRAL_CROP, value=[crop_height, crop_width]) amount_to_be_cropped_h = (height - crop_height) crop_top = amount_to_be_cropp...
123,911
Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image.
def _aspect_preserving_resize(image, resize_min): mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE_ASPECT_PRESERVING, value={"min": resize_min}) shape = tf.shape(image) height, width = shape[0], shape[1] new_height, new_width = _smallest_size_at_least(height, width, resize_min)...
123,914
Simple wrapper around tf.resize_images. This is primarily to make sure we use the same `ResizeMethod` and other details each time. Args: image: A 3-D image `Tensor`. height: The target height for the resized image. width: The target width for the resized image. Returns: resized_image: A 3-D t...
def _resize_image(image, height, width): return tf.image.resize_images( image, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
123,915
Reshapes first two dimensions in to single dimension. Args: tensor: Tensor to reshape of shape [A, B, ...] Returns: Reshaped tensor of shape [A*B, ...]
def _flatten_beam_dim(tensor): shape = _shape_list(tensor) shape[0] *= shape[1] shape.pop(1) # Remove beam dim return tf.reshape(tensor, shape)
123,920
Reshapes first dimension back to [batch_size, beam_size]. Args: tensor: Tensor to reshape of shape [batch_size*beam_size, ...] batch_size: Tensor, original batch size. beam_size: int, original beam size. Returns: Reshaped tensor of shape [batch_size, beam_size, ...]
def _unflatten_beam_dim(tensor, batch_size, beam_size): shape = _shape_list(tensor) new_shape = [batch_size, beam_size] + shape[1:] return tf.reshape(tensor, new_shape)
123,921
Return initial state dictionary and its shape invariants. Args: initial_ids: initial ids to pass into the symbols_to_logits_fn. int tensor with shape [batch_size, 1] initial_cache: dictionary storing values to be passed into the symbols_to_logits_fn. Returns: state and shap...
def _create_initial_state(self, initial_ids, initial_cache): # Current loop index (starts at 0) cur_index = tf.constant(0) # Create alive sequence with shape [batch_size, beam_size, 1] alive_seq = _expand_to_beam_size(initial_ids, self.beam_size) alive_seq = tf.expand_dims(alive_seq, axis=2) ...
123,926
Return whether to continue the search loop. The loops should terminate when 1) when decode length has been reached, or 2) when the worst score in the finished sequences is better than the best score in the alive sequences (i.e. the finished sequences are provably unchanging) Args...
def _continue_search(self, state): i = state[_StateKeys.CUR_INDEX] alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS] finished_scores = state[_StateKeys.FINISHED_SCORES] finished_flags = state[_StateKeys.FINISHED_FLAGS] not_at_max_decode_length = tf.less(i, self.max_decode_length) # Calc...
123,927
Return positional encoding. Calculates the position encoding as a mix of sine and cosine functions with geometrically increasing wavelengths. Defined and formulized in Attention is All You Need, section 3.5. Args: length: Sequence length. hidden_size: Size of the min_timescale: Minimum scale that ...
def get_position_encoding( length, hidden_size, min_timescale=1.0, max_timescale=1.0e4): position = tf.to_float(tf.range(length)) num_timescales = hidden_size // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timesc...
123,941
Calculate bias for decoder that maintains model's autoregressive property. Creates a tensor that masks out locations that correspond to illegal connections, so prediction at position i cannot draw information from future positions. Args: length: int length of sequences in batch. Returns: float tens...
def get_decoder_self_attention_bias(length): with tf.name_scope("decoder_self_attention_bias"): valid_locs = tf.matrix_band_part(tf.ones([length, length]), -1, 0) valid_locs = tf.reshape(valid_locs, [1, 1, length, length]) decoder_bias = _NEG_INF * (1.0 - valid_locs) return decoder_bias
123,942
Return float tensor representing the padding values in x. Args: x: int tensor with any shape padding_value: int value that Returns: flaot tensor with same shape as x containing values 0 or 1. 0 -> non-padding, 1 -> padding
def get_padding(x, padding_value=0): with tf.name_scope("padding"): return tf.to_float(tf.equal(x, padding_value))
123,943
Calculate bias tensor from padding values in tensor. Bias tensor that is added to the pre-softmax multi-headed attention logits, which has shape [batch_size, num_heads, length, length]. The tensor is zero at non-padding locations, and -1e9 (negative infinity) at padding locations. Args: x: int tensor with...
def get_padding_bias(x): with tf.name_scope("attention_bias"): padding = get_padding(x) attention_bias = padding * _NEG_INF attention_bias = tf.expand_dims( tf.expand_dims(attention_bias, axis=1), axis=1) return attention_bias
123,944
Adds a result to the dictionary. Args: dict_entry: main dict to add entry entry: slot for this entry (likely an integer) dt: the timing for the entry start_time: when the entry started unix time float
def _add_result(self, dict_entry, entry, dt, start_time): time_entry = {} time_entry['dt'] = dt time_entry['start_time'] = start_time dict_entry[entry] = time_entry
123,953
Sorts dict of results based on log start_time. Sorts the results and returns an array with only the values but sorted by oldest value first.value Args: results_dicts: List of result dicts Returns: List of only the time but sorted oldest first.
def _sorted_results(self, results_dicts): print('results dicts:', results_dicts) sorted_dict = sorted(results_dicts, key=lambda k: k['start_time']) results = [] for entry in sorted_dict: results.append(entry['dt']) return results
123,954
Verifies and result and returns timing. Uses submodule mlp_compliance (https://github.com/bitfort/mlp_compliance) Args: log_file: Absolute path to result file. division: open, closed result_name: name of the benchmark, ncf, ssd, etc Returns: Time for the result or `INFINITE_TIME` ...
def verify_and_extract_time(self, log_file, division, result_name): expected_level = constants.DIVISION_COMPLIANCE_CHECK_LEVEL.get( division, None) print(result_name) if expected_level is None: raise Exception('Unknown division: {}'.format(division)) start_time, level, dt, _, success ...
123,959
Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Arguments: boxlist(BoxList) nms_thresh (float) max_proposals (int): if > 0, then only the top max_proposals are kept after non-maximum suppression score_field (st...
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores"): if nms_thresh <= 0: return boxlist mode = boxlist.mode boxlist = boxlist.convert("xyxy") boxes = boxlist.bbox score = boxlist.get_field(score_field) keep = _box_nms(boxes, score, nms_thresh) if max_pro...
123,991
Only keep boxes with both sides >= min_size Arguments: boxlist (Boxlist) min_size (int)
def remove_small_boxes(boxlist, min_size): # TODO maybe add an API for querying the ws / hs xywh_boxes = boxlist.convert("xywh").bbox _, _, ws, hs = xywh_boxes.unbind(dim=1) keep = ( (ws >= min_size) & (hs >= min_size) ).nonzero().squeeze(1) return boxlist[keep]
123,992
Compute the intersection over union of two set of boxes. The box order must be (xmin, ymin, xmax, ymax). Arguments: box1: (BoxList) bounding boxes, sized [N,4]. box2: (BoxList) bounding boxes, sized [M,4]. Returns: (tensor) iou, sized [N,M]. Reference: https://github.com/chain...
def boxlist_iou(boxlist1, boxlist2): if boxlist1.size != boxlist2.size: raise RuntimeError( "boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2)) N = len(boxlist1) M = len(boxlist2) area1 = boxlist1.area() area2 = boxlist2.area() box1, box...
123,993
Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList])
def cat_boxlist(bboxes): assert isinstance(bboxes, (list, tuple)) assert all(isinstance(bbox, BoxList) for bbox in bboxes) size = bboxes[0].size assert all(bbox.size == size for bbox in bboxes) mode = bboxes[0].mode assert all(bbox.mode == mode for bbox in bboxes) fields = set(bboxes...
123,994
Create min and max boundary lists up to max_length. For example, when max_length=24, min_boundary=4 and boundary_scale=2, the returned values will be: buckets_min = [0, 4, 8, 16, 24] buckets_max = [4, 8, 16, 24, 25] Args: max_length: The maximum length of example in dataset. min_boundary: Minimu...
def _create_min_max_boundaries( max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE): # Create bucket boundaries list by scaling the previous boundary or adding 1 # (to ensure increasing boundary sizes). bucket_boundaries = [] x = min_boundary while x < max_length: bucket_boundar...
124,025
Convert dtype string to tf dtype, and set loss_scale default as needed. Args: flags: namespace object returned by arg parser. Raises: ValueError: If an invalid dtype is provided.
def parse_dtype_info(flags): if flags.dtype in (i[0] for i in DTYPE_MAP.values()): return # Make function idempotent try: flags.dtype, default_loss_scale = DTYPE_MAP[flags.dtype] except KeyError: raise ValueError("Invalid dtype: {}".format(flags.dtype)) flags.loss_scale = flags.loss_scale or d...
124,045
Propagate a virtual loss up to the root node. Args: up_to: The node to propagate until. (Keep track of this! You'll need it to reverse the virtual loss later.)
def add_virtual_loss(self, up_to): self.losses_applied += 1 # This is a "win" for the current node; hence a loss for its parent node # who will be deciding whether to investigate this node again. loss = self.position.to_play self.W += loss if self.parent is None ...
124,102
Propagates a value estimation up to the root node. Args: value: the value to be propagated (1 = black wins, -1 = white wins) up_to: the node to propagate until.
def backup_value(self, value, up_to): self.N += 1 self.W += value if self.parent is None or self is up_to: return self.parent.backup_value(value, up_to)
124,105
Convert a list of command arguments to types specified by the handler. Args: handler: a command handler function. args: the list of string arguments to pass to handler. Returns: A new list containing `args` that have been converted to the expected type for `handler`. For each function ...
def _convert_args(handler, args): args = list(args) params = inspect.signature(handler).parameters for i, (arg, name) in enumerate(zip(args, params)): default = params[name].default annotation = params[name].annotation if annotation != inspect.Parameter.empty: if i...
124,120
Registers a new command handler object. All methods on `handler_obj` whose name starts with "cmd_" are registered as a GTP command. For example, the method cmd_genmove will be invoked when the engine receives a genmove command. Args: handler_obj: the handler object to registe...
def add_cmd_handler(self, handler_obj): for field in dir(handler_obj): if field.startswith("cmd_"): cmd = field[4:] fn = getattr(handler_obj, field) if cmd in self.cmds: print('Replacing {} with {}'.format( ...
124,121
Calculate cross entropy loss while ignoring padding. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch_size, length_labels] smoothing: Label smoothing constant, used to determine the on and off values vocab_size: int size of the vocabulary Returns: ...
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): with tf.name_scope("loss", [logits, labels]): logits, labels = _pad_tensors_to_same_length(logits, labels) # Calculate smoothing cross entropy with tf.name_scope("smoothing_cross_entropy", [logits, labels]): confidence = 1.0 -...
124,133
Wrap a metric fn that returns scores and weights as an eval metric fn. The input metric_fn returns values for the current batch. The wrapper aggregates the return values collected over all of the batches evaluated. Args: metric_fn: function that returns scores and weights for the current batch's logit...
def _convert_to_eval_metric(metric_fn): def problem_metric_fn(*args): (scores, weights) = metric_fn(*args) # The tf.metrics.mean function assures correct aggregation. return tf.metrics.mean(scores, weights) return problem_metric_fn
124,134
Approximate BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: logits: Tensor of size ...
def bleu_score(logits, labels): predictions = tf.to_int32(tf.argmax(logits, axis=-1)) # TODO: Look into removing use of py_func bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32) return bleu, tf.constant(1.0)
124,140
Extracts all n-grams up to a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams upto max_order in segment with ...
def _get_ngrams_with_counter(segment, max_order): ngram_counts = collections.Counter() for order in xrange(1, max_order + 1): for i in xrange(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts
124,141
Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a ...
def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True): reference_length = 0 translation_length = 0 bp = 1.0 geo_mean = 0 matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order precisions = [] for (references, translations) in zip(r...
124,142
ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: logits: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score...
def rouge_2_fscore(logits, labels): predictions = tf.to_int32(tf.argmax(logits, axis=-1)) # TODO: Look into removing use of py_func rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
124,143
Computes ROUGE-N f1 score of two text collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Args: eval_sentences: Predicted sentences. ref_sentences: Sentences from the reference set n: Size of ngram. Defaults ...
def rouge_n(eval_sentences, ref_sentences, n=2): f1_scores = [] for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): eval_ngrams = _get_ngrams(n, eval_sentence) ref_ngrams = _get_ngrams(n, ref_sentence) ref_count = len(ref_ngrams) eval_count = len(eval_ngrams) # Count the o...
124,144
ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 sco...
def rouge_l_fscore(predictions, labels): outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), tf.float32) return rouge_l_f_score, tf.constant(1.0)
124,145
Extract files from downloaded compressed archive file. Args: path: string directory where the files will be downloaded url: url containing the compressed input and target files input_filename: name of file containing data in source language target_filename: name of file containing data in target lang...
def download_and_extract(path, url, input_filename, target_filename): logging.info('Downloading and extracting data to: %s' % path) # Check if extracted files already exist in path input_file = find_file(path, input_filename) target_file = find_file(path, target_filename) if input_file and target_file: ...
124,151
This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList])
def subsample(self, proposals, targets): labels, regression_targets = self.prepare_targets(proposals, targets) sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) proposals = list(proposals) # add corresponding label and regression_targets information to the boundi...
124,164
Computes the loss for Faster R-CNN. This requires that the subsample method has been called beforehand. Arguments: class_logits (list[Tensor]) box_regression (list[Tensor]) Returns: classification_loss (Tensor) box_loss (Tensor)
def __call__(self, class_logits, box_regression): class_logits = cat(class_logits, dim=0) box_regression = cat(box_regression, dim=0) device = class_logits.device if not hasattr(self, "_proposals"): raise RuntimeError("subsample needs to be called before") ...
124,165
Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout. Raises: RuntimeError: if the command returns a non-zero result.
async def checked_run(*cmd): # Start the subprocess. logging.info('Running: %s', expand_cmd_str(cmd)) with logged_timer('{} finished'.format(get_cmd_name(cmd))): p = await asyncio.create_subprocess_exec( *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) # Stream output f...
124,169
Get frame by index. Args: frame_id (int): Index of the expected frame, 0-based. Returns: ndarray or None: Return the frame if successful, otherwise None.
def get_frame(self, frame_id): if frame_id < 0 or frame_id >= self._frame_cnt: raise IndexError( '"frame_id" must be between 0 and {}'.format(self._frame_cnt - 1)) if frame_id == self._position: ...
126,334
Convert a video to frame images Args: frame_dir (str): Output directory to store all the frame images. file_start (int): Filenames will start from the specified number. filename_tmpl (str): Filename template with the index as the placeholder. star...
def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): mkdir_or_exist(frame_dir) if max_num == 0: task_num = ...
126,335
Track the progress of tasks execution with a progress bar. Tasks are done with a simple for-loop. Args: func (callable): The function to be applied to each task. tasks (list or tuple[Iterable, int]): A list of tasks or (tasks, total num). bar_width (int): Width of progress ...
def track_progress(func, tasks, bar_width=50, **kwargs): if isinstance(tasks, tuple): assert len(tasks) == 2 assert isinstance(tasks[0], collections_abc.Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, collections...
126,341