python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
nvidia-gcp-samples-master
|
dataflow-samples/bert-qa-trt-dataflow/helpers/__init__.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding='utf-8') as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
nvidia-gcp-samples-master
|
dataflow-samples/bert-qa-trt-dataflow/helpers/tokenization.py
|
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import helpers.tokenization as tokenization
import collections
import numpy as np
import six
import math
import json
def convert_doc_tokens(paragraph_text):
""" Return the list of tokens from the doc text """
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
doc_tokens = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
return doc_tokens
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def convert_example_to_features(doc_tokens, question_text, tokenizer, max_seq_length,
doc_stride, max_query_length):
"""Loads a data file into a list of `InputBatch`s."""
query_tokens = tokenizer.tokenize(question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
_Feature = collections.namedtuple( # pylint: disable=invalid-name
"Feature",
["input_ids", "input_mask", "segment_ids", "tokens", "token_to_orig_map", "token_is_max_context"])
features = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
def create_int_feature(values):
feature = np.asarray(values, dtype=np.int32, order=None)
return feature
features.append(_Feature(
input_ids = create_int_feature(input_ids),
input_mask = create_int_feature(input_mask),
segment_ids = create_int_feature(segment_ids),
tokens = tokens,
token_to_orig_map = token_to_orig_map,
token_is_max_context = token_is_max_context
))
return features
def read_squad_json(input_file):
"""read from squad json into a list of examples"""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
_Example = collections.namedtuple( # pylint: disable=invalid-name
"Example",
["id", "question_text", "doc_tokens"])
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = convert_doc_tokens(paragraph_text)
for qa in paragraph["qas"]:
examples.append(_Example(
id = qa["id"],
question_text = qa["question"],
doc_tokens = doc_tokens
))
return examples
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def get_predictions(doc_tokens, features, results, n_best_size, max_answer_length):
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
prediction = ""
scores_diff_json = 0.0
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
version_2_with_negative = False
for result in results:
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
feature = features[result.feature_index]
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = 0
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=result.feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=result.feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, True)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
if len(final_text):
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
null_score_diff_threshold = 0.0
if not version_2_with_negative:
prediction = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json = score_diff
if score_diff > null_score_diff_threshold:
prediction = ""
else:
prediction = best_non_null_entry.text
return prediction, nbest_json, scores_diff_json
|
nvidia-gcp-samples-master
|
dataflow-samples/bert-qa-trt-dataflow/helpers/data_processing.py
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Text, List, Any
import copy
import os as os
import sys as sys
import numpy as np
import apache_beam as beam
from apache_beam.utils import shared
from apache_beam import pvalue
import tensorflow as tf
import tensorflow_text
import logging
from apache_beam.utils import shared
from apache_beam.options.pipeline_options import PipelineOptions
import time
class MyModel():
def __init__(self, model):
self.model = model
class DoManualInference(beam.DoFn):
def __init__(self, shared_handle, saved_model_path):
self._shared_handle = shared_handle
self._saved_model_path = saved_model_path
def setup(self):
# setup is a good place to initialize transient in-memory resources.
def initialize_model():
import tensorflow as tf
import os
src='/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcusolver.so.11'
dst='/usr/local/lib/python3.6/dist-packages/tensorflow/python/libcusolver.so.10'
try:
os.symlink(src, dst)
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
pass
# Load a potentially large model in memory. Executed once per process.
return MyModel(tf.saved_model.load(self._saved_model_path, ["serve"]))
self._model = self._shared_handle.acquire(initialize_model)
def process(self, element: List[Text]) -> List[Any]:
yield (self.predict(element))
def predict(self, inputs: List[Text]) -> List[Any]:
batch = list(inputs)
batch_size = len(batch)
bs = 16
if batch_size < bs:
# Pad the input batch to 10 elements to match the model expected input.
pad = [''] * (bs - batch_size)
batch.extend(pad)
inference = self._model.model.signatures['serving_default'](tf.constant(batch))['outputs'].numpy()
return inference[0:batch_size]
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
pipeline_options = PipelineOptions(save_main_session=True)
questions = ["nq question: who is the ceo of nvidia",
"nq question: what is the population of the north varolina state",
"nq question: what is the capital city of turkey",
"nq question: when do babies start teeting"] * 4000
bs = 16
saved_model_path = 'model_b'+str(bs)
start_time = time.time()
with beam.Pipeline(options=pipeline_options) as p:
shared_handle = shared.Shared()
_ = (p | beam.Create(questions)
| beam.BatchElements(min_batch_size=bs, max_batch_size=bs)
| beam.ParDo(DoManualInference(shared_handle=shared_handle, saved_model_path=saved_model_path))
| beam.Map(print))
|
nvidia-gcp-samples-master
|
dataflow-samples/t5-dataflow-gpu-cpu/t5_gpu.py
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Text, List, Any
import copy
import os as os
import sys as sys
import numpy as np
import apache_beam as beam
from apache_beam.utils import shared
from apache_beam import pvalue
import tensorflow as tf
import tensorflow_text
import logging
from apache_beam.utils import shared
from apache_beam.options.pipeline_options import PipelineOptions
import time
class MyModel():
def __init__(self, model):
self.model = model
class DoManualInference(beam.DoFn):
def __init__(self, shared_handle, saved_model_path):
self._shared_handle = shared_handle
self._saved_model_path = saved_model_path
def setup(self):
# setup is a good place to initialize transient in-memory resources.
def initialize_model():
import tensorflow as tf
# Load a potentially large model in memory. Executed once per process.
return MyModel(tf.saved_model.load(self._saved_model_path, ["serve"]))
self._model = self._shared_handle.acquire(initialize_model)
def process(self, element: List[Text]) -> List[Any]:
yield (self.predict(element))
def predict(self, inputs: List[Text]) -> List[Any]:
batch = list(inputs)
batch_size = len(batch)
bs = 16
if batch_size < bs:
# Pad the input batch to 10 elements to match the model expected input.
pad = [''] * (bs - batch_size)
batch.extend(pad)
inference = self._model.model.signatures['serving_default'](tf.constant(batch))['outputs'].numpy()
return inference[0:batch_size]
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
pipeline_options = PipelineOptions(save_main_session=True)
questions = ["nq question: who is the ceo of nvidia",
"nq question: what is the population of the north varolina state",
"nq question: what is the capital city of turkey",
"nq question: when do babies start teeting"] * 4000
bs = 16
saved_model_path = 'model_b'+str(bs)
start_time = time.time()
with beam.Pipeline(options=pipeline_options) as p:
shared_handle = shared.Shared()
_ = (p | beam.Create(questions)
| beam.BatchElements(min_batch_size=bs, max_batch_size=bs)
| beam.ParDo(DoManualInference(shared_handle=shared_handle, saved_model_path=saved_model_path))
| beam.Map(print))
|
nvidia-gcp-samples-master
|
dataflow-samples/t5-dataflow-gpu-cpu/t5_cpu.py
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Any, Text, Tuple
import time
import logging
import os
import apache_beam as beam
from apache_beam.utils import shared
from apache_beam.options.pipeline_options import PipelineOptions
import tensorflow as tf
from tensorflow.compat.v1.saved_model import tag_constants
def singleton(cls):
instances = {}
def getinstance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
@singleton
class TfModel():
def __init__(self, model, vocab_file="vocab.txt"):
import helpers.tokenization as tokenization
with tf.Graph().as_default() as graph:
self.vocab_file = vocab_file
self.tokenizer = tokenization.FullTokenizer(vocab_file="vocab.txt", do_lower_case=True)
self.do_lower_case = True
self.max_seq_length = 384
self.doc_stride = 128
self.max_query_length = 64
self.verbose_logging = True
self.version_2_with_negative = False
self.n_best_size = 20
self.max_answer_length = 30
self.model = model
class DoManualInference(beam.DoFn):
def __init__(self, shared_handle, engine_path, batch_size):
import collections
self._shared_handle = shared_handle
self._engine_path = engine_path
self._batch_size = batch_size
self._NetworkOutput = collections.namedtuple(
"NetworkOutput",
["start_logits", "end_logits", "feature_index"])
def setup(self):
def initialize_model():
import tensorflow as tf
# Load a potentially large model in memory. Executed once per process.
return TfModel(tf.saved_model.load(self._engine_path, ["serve"]))
self._model = self._shared_handle.acquire(initialize_model)
def process(self, element: Tuple[Text, List[Text]]) -> List[Any]:
yield (self.predict(element))
def predict(self, inputs: Tuple[Text, List[Text]]) -> List[Any]:
import helpers.data_processing as dp
import numpy as np
import collections
import time
def question_features(tokens, question):
# Extract features from the paragraph and question
return dp.convert_example_to_features(tokens, question,
self._model.tokenizer,
self._model.max_seq_length,
self._model.doc_stride,
self._model.max_query_length)
features = []
doc_tokens = dp.convert_doc_tokens(inputs[0])
ques_list = inputs[1]
batch_size = len(ques_list)
if batch_size < self._batch_size:
# Pad the input batch to batch_size to match the model expected input.
pad = [ques_list[0]] * (self._batch_size - batch_size)
ques_list.extend(pad)
for question_text in ques_list:
features.append(question_features(doc_tokens, question_text)[0])
input_ids_batch = np.array([feature.input_ids for feature in features]).squeeze()
segment_ids_batch = np.array([feature.segment_ids for feature in features]).squeeze()
input_mask_batch = np.array([feature.input_mask for feature in features]).squeeze()
start_time = int(time.time())
uids = np.array([ (start_time+i) for i in range(0, batch_size)], dtype=np.int32).squeeze()
inputs = {
"input_ids": input_ids_batch,
"input_mask": input_mask_batch,
"segment_ids": segment_ids_batch,
"unique_ids": uids
}
inference_func = self._model.model.signatures["serving_default"]
outputs = inference_func(**({k: tf.convert_to_tensor(v) for k, v in inputs.items()}))
return ["results"]
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
pipeline_options = PipelineOptions(save_main_session=True)
question_list = [("""TensorRT is a high performance deep learning inference platform
that delivers low latency and high throughput for apps such as
recommenders, speech and image/video on NVIDIA GPUs. It includes
parsers to import models, and plugins to support novel ops and
layers before applying optimizations for inference. Today NVIDIA
is open-sourcing parsers and plugins in TensorRT so that the deep
learning community can customize and extend these components to
take advantage of powerful TensorRT optimizations for your apps.""",
["What is TensorRT?", "Is TensorRT open sourced?", "Who is open sourcing TensorRT?",
"What does TensorRT deliver?"] * 2)] * 1000
engine_path='model.savedmodel'
with beam.Pipeline(options=pipeline_options) as p:
shared_handle = shared.Shared()
_ = (p | beam.Create(question_list)
| beam.ParDo(DoManualInference(shared_handle=shared_handle, engine_path=engine_path, batch_size=16))
)
|
nvidia-gcp-samples-master
|
dataflow-samples/bert-qa-tf-dataflow/bert_squad2_qa_cpu.py
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Any, Text, Tuple
import time
import logging
import os
import apache_beam as beam
from apache_beam.utils import shared
from apache_beam.options.pipeline_options import PipelineOptions
import tensorflow as tf
from tensorflow.compat.v1.saved_model import tag_constants
def singleton(cls):
instances = {}
def getinstance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
@singleton
class TfModel():
def __init__(self, model, vocab_file="vocab.txt"):
import helpers.tokenization as tokenization
with tf.Graph().as_default() as graph:
self.vocab_file = vocab_file
self.tokenizer = tokenization.FullTokenizer(vocab_file="vocab.txt", do_lower_case=True)
self.do_lower_case = True
self.max_seq_length = 384
self.doc_stride = 128
self.max_query_length = 64
self.verbose_logging = True
self.version_2_with_negative = False
self.n_best_size = 20
self.max_answer_length = 30
self.model = model
class DoManualInference(beam.DoFn):
def __init__(self, shared_handle, engine_path, batch_size):
import collections
self._shared_handle = shared_handle
self._engine_path = engine_path
self._batch_size = batch_size
self._NetworkOutput = collections.namedtuple(
"NetworkOutput",
["start_logits", "end_logits", "feature_index"])
def setup(self):
def initialize_model():
import tensorflow as tf
import os
src='/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcusolver.so.11'
dst='/usr/local/lib/python3.6/dist-packages/tensorflow/python/libcusolver.so.10'
try:
os.symlink(src, dst)
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
pass
# Load a potentially large model in memory. Executed once per process.
return TfModel(tf.saved_model.load(self._engine_path, ["serve"]))
self._model = self._shared_handle.acquire(initialize_model)
def process(self, element: Tuple[Text, List[Text]]) -> List[Any]:
yield (self.predict(element))
def predict(self, inputs: Tuple[Text, List[Text]]) -> List[Any]:
import helpers.data_processing as dp
import numpy as np
import collections
import time
def question_features(tokens, question):
# Extract features from the paragraph and question
return dp.convert_example_to_features(tokens, question,
self._model.tokenizer,
self._model.max_seq_length,
self._model.doc_stride,
self._model.max_query_length)
features = []
doc_tokens = dp.convert_doc_tokens(inputs[0])
ques_list = inputs[1]
batch_size = len(ques_list)
if batch_size < self._batch_size:
# Pad the input batch to batch_size to match the model expected input.
pad = [ques_list[0]] * (self._batch_size - batch_size)
ques_list.extend(pad)
for question_text in ques_list:
features.append(question_features(doc_tokens, question_text)[0])
input_ids_batch = np.array([feature.input_ids for feature in features]).squeeze()
segment_ids_batch = np.array([feature.segment_ids for feature in features]).squeeze()
input_mask_batch = np.array([feature.input_mask for feature in features]).squeeze()
start_time = int(time.time())
uids = np.array([ (start_time+i) for i in range(0, batch_size)], dtype=np.int32).squeeze()
inputs = {
"input_ids": input_ids_batch,
"input_mask": input_mask_batch,
"segment_ids": segment_ids_batch,
"unique_ids": uids
}
inference_func = self._model.model.signatures["serving_default"]
outputs = inference_func(**({k: tf.convert_to_tensor(v) for k, v in inputs.items()}))
return ["results"]
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
pipeline_options = PipelineOptions(save_main_session=True)
question_list = [("""TensorRT is a high performance deep learning inference platform
that delivers low latency and high throughput for apps such as
recommenders, speech and image/video on NVIDIA GPUs. It includes
parsers to import models, and plugins to support novel ops and
layers before applying optimizations for inference. Today NVIDIA
is open-sourcing parsers and plugins in TensorRT so that the deep
learning community can customize and extend these components to
take advantage of powerful TensorRT optimizations for your apps.""",
["What is TensorRT?", "Is TensorRT open sourced?", "Who is open sourcing TensorRT?",
"What does TensorRT deliver?"] * 4)] * 1000
engine_path='model.savedmodel'
with beam.Pipeline(options=pipeline_options) as p:
shared_handle = shared.Shared()
_ = (p | beam.Create(question_list)
| beam.ParDo(DoManualInference(shared_handle=shared_handle, engine_path=engine_path, batch_size=16))
)
|
nvidia-gcp-samples-master
|
dataflow-samples/bert-qa-tf-dataflow/bert_squad2_qa_gpu.py
|
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xgboost as xgb
from xgboost.dask import DaskDMatrix, DaskDeviceQuantileDMatrix
from dask.distributed import Client, wait
from dask.distributed import LocalCluster
from dask import array as da
import dask.dataframe as dd
import argparse
import gcsfs
import time
import os, json
def main(client, train_dir, model_file, fs, do_wait=False ):
colnames = ['label'] + ['feature-%02d' % i for i in range(1, 29)]
df = dd.read_csv(train_dir, header=None, names=colnames)
X = df[df.columns.difference(['label'])]
y = df['label']
print("[INFO]: ------ CSV files are read")
if do_wait is True:
df = df.persist()
X = X.persist()
wait(df)
wait(X)
print("[INFO]: ------ Long waited but the data is ready now")
start_time = time.time()
dtrain = DaskDMatrix(client, X, y)
print("[INFO]: ------ QuantileDMatrix is formed in {} seconds ---".format((time.time() - start_time)))
del df
del X
del y
start_time = time.time()
output = xgb.dask.train(client,
{ 'verbosity': 2,
'learning_rate': 0.1,
'max_depth': 8,
'objective': 'reg:squarederror',
'subsample': 0.5,
'gamma': 0.9,
'verbose_eval': True,
'tree_method':'hist',
},
dtrain,
num_boost_round=100, evals=[(dtrain, 'train')])
print("[INFO]: ------ Training is completed in {} seconds ---".format((time.time() - start_time)))
history = output['history']
print('[INFO]: ------ Training evaluation history:', history)
output['booster'].save_model('/tmp/tmp.model')
fs.put('/tmp/tmp.model', model_file)
print("[INFO]: ------ Model saved here:{}".format( model_file))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--gcp-project', type=str, help='user gcp project',
default='crisp-sa')
parser.add_argument(
'--train-files', type=str, help='Training files local or GCS',
default='gs://crisp-sa/rapids/higgs_csv/*.csv')
parser.add_argument(
'--model-file', type=str,
help="""GCS or local dir for checkpoints, exports, and summaries.
Use an existing directory to load a trained model, or a new directory
to retrain""",
default='gs://crisp-sa/rapids/models/001.model')
parser.add_argument(
'--num-worker', type=int, help='num of workers',
default=2)
parser.add_argument(
'--threads-per-worker', type=int, help='num of threads per worker',
default=4)
parser.add_argument(
'--do-wait', action='store_true', help='do persist/wait data')
args = parser.parse_args()
print("[INFO]: ------ Arguments parsed")
print(args)
fs = gcsfs.GCSFileSystem(project=args.gcp_project, token='cloud')
print("[INFO]: ------ gcsfs object is created")
print("[INFO]: ------ LocalCluster is being formed")
# or use other clusters for scaling
with LocalCluster(n_workers=args.num_worker, threads_per_worker=args.threads_per_worker) as cluster:
with Client(cluster) as client:
main(client, args.train_files, args.model_file, fs, args.do_wait)
|
nvidia-gcp-samples-master
|
ai-platform-samples/xgboost_single_node/gcsfs_local_cpu/rapids_opt2_cpu.py
|
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
from dask.distributed import wait
from dask import array as da
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
import cupy as cp
import argparse
import time
import gcsfs
import dask_cudf
import os, json
import subprocess
def using_quantile_device_dmatrix(client: Client, train_dir, model_file, fs, do_wait=False, parquet=False):
'''`DaskDeviceQuantileDMatrix` is a data type specialized for `gpu_hist`, tree
method that reduces memory overhead. When training on GPU pipeline, it's
preferred over `DaskDMatrix`.
.. versionadded:: 1.2.0
'''
colnames = ['label'] + ['feature-%02d' % i for i in range(1, 29)]
if parquet is True:
df = dask_cudf.read_parquet(train_dir, columns=colnames)
else:
df = dask_cudf.read_csv(train_dir, header=None, names=colnames, chunksize=None)
X = df[df.columns.difference(['label'])]
y = df['label']
print("[INFO]: ------ CSV files are read")
if do_wait is True:
df = df.persist()
X = X.persist()
wait(df)
wait(X)
print("[INFO]: ------ Long waited but the data is ready now")
# `DaskDeviceQuantileDMatrix` is used instead of `DaskDMatrix`, be careful
# that it can not be used for anything else than training.
start_time = time.time()
dtrain = dxgb.DaskDeviceQuantileDMatrix(client, X, y)
print("[INFO]: ------ QuantileDMatrix is formed in {} seconds ---".format((time.time() - start_time)))
del df
del X
del y
start_time = time.time()
output = xgb.dask.train(client,
{ 'verbosity': 2,
'learning_rate': 0.1,
'max_depth': 8,
'objective': 'reg:squarederror',
'subsample': 0.5,
'gamma': 0.9,
'verbose_eval': True,
'tree_method':'gpu_hist',
'nthread':1
},
dtrain,
num_boost_round=100, evals=[(dtrain, 'train')])
print("[INFO]: ------ Training is completed in {} seconds ---".format((time.time() - start_time)))
history = output['history']
print('[INFO]: ------ Training evaluation history:', history)
output['booster'].save_model('/tmp/tmp.model')
fs.put('/tmp/tmp.model', model_file)
print("[INFO]: ------ Model saved here:{}".format( model_file))
def get_scheduler_info():
scheduler_ip = subprocess.check_output(['hostname','--all-ip-addresses'])
scheduler_ip = scheduler_ip.decode('UTF-8').split()[0]
scheduler_port = '8786'
scheduler_uri = '{}:{}'.format(scheduler_ip, scheduler_port)
return scheduler_ip, scheduler_uri
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--gcp-project', type=str, help='user gcp project',
default='crisp-sa')
parser.add_argument(
'--train-files', type=str, help='Training files local or GCS',
default='gs://crisp-sa/rapids/higgs_csv/*.csv')
parser.add_argument(
'--model-file', type=str,
help="""GCS or local dir for checkpoints, exports, and summaries.
Use an existing directory to load a trained model, or a new directory
to retrain""",
default='gs://crisp-sa/rapids/models/001.model')
parser.add_argument(
'--num--worker', type=int, help='num of workers',
default=2)
parser.add_argument(
'--threads-per-worker', type=int, help='num of threads per worker',
default=4)
parser.add_argument(
'--do-wait', action='store_true', help='do persist/wait data')
parser.add_argument(
'--parquet', action='store_true', help='parquet files are used')
args = parser.parse_args()
print("[INFO]: ------ Arguments parsed")
print(args)
fs = gcsfs.GCSFileSystem(project=args.gcp_project, token='cloud')
print("[INFO]: ------ gcsfs object is created")
sched_ip, sched_uri = get_scheduler_info()
print("[INFO]: ------ LocalCUDACluster is being formed")
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker
# process.
with LocalCUDACluster(ip=sched_ip, n_workers=args.num_worker, threads_per_worker=args.threads_per_worker) as cluster:
#with LocalCUDACluster(n_workers=args.num_gpu_per_worker, threads_per_worker=args.threads_per_worker) as cluster:
with Client(cluster) as client:
# generate some random data for demonstration
print('[INFO]: ------ Calling main function')
using_quantile_device_dmatrix(client, args.train_files, args.model_file, fs, args.do_wait, args.parquet)
|
nvidia-gcp-samples-master
|
ai-platform-samples/xgboost_single_node/gcsfs_localcuda/rapids_opt2.py
|
# coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import, division, print_function
import collections
import json
import math
import os
import random
import shutil
import time
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.client import device_lib
import tokenization
from utils.create_squad_data import *
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def get_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length,
do_lower_case, version_2_with_negative, verbose_logging):
"""Get final predictions"""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json, scores_diff_json
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file,
version_2_with_negative, verbose_logging):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.compat.v1.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.compat.v1.logging.info("Writing nbest to: %s" % (output_nbest_file))
all_predictions, all_nbest_json, scores_diff_json = get_predictions(all_examples, all_features,
all_results, n_best_size, max_answer_length, do_lower_case, version_2_with_negative, verbose_logging)
with tf.io.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.io.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with tf.io.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
tf.compat.v1.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
tf.compat.v1.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
tf.compat.v1.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
tf.compat.v1.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict and not FLAGS.export_triton:
raise ValueError("At least one of `do_train` or `do_predict` or `export_SavedModel` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def export_model(estimator, export_dir, init_checkpoint):
"""Exports a checkpoint in SavedModel format in a directory structure compatible with Triton."""
def serving_input_fn():
label_ids = tf.placeholder(tf.int32, [None,], name='unique_ids')
input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids')
input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask')
segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids')
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'unique_ids': label_ids,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
})()
return input_fn
saved_dir = estimator.export_savedmodel(
export_dir,
serving_input_fn,
assets_extra=None,
as_text=False,
checkpoint_path=init_checkpoint,
strip_default_attrs=False)
model_name = FLAGS.triton_model_name
model_folder = export_dir + "/triton_models/" + model_name
version_folder = model_folder + "/" + str(FLAGS.triton_model_version)
final_model_folder = version_folder + "/model.savedmodel"
if not os.path.exists(version_folder):
os.makedirs(version_folder)
if (not os.path.exists(final_model_folder)):
os.rename(saved_dir, final_model_folder)
print("Model saved to dir", final_model_folder)
else:
if (FLAGS.triton_model_overwrite):
shutil.rmtree(final_model_folder)
os.rename(saved_dir, final_model_folder)
print("WARNING: Existing model was overwritten. Model dir: {}".format(final_model_folder))
else:
print("ERROR: Could not save Triton model. Folder already exists. Use '--triton_model_overwrite=True' if you would like to overwrite an existing model. Model dir: {}".format(final_model_folder))
return
# Now build the config for Triton. Check to make sure we can overwrite it, if it exists
config_filename = os.path.join(model_folder, "config.pbtxt")
if (os.path.exists(config_filename) and not FLAGS.triton_model_overwrite):
print("ERROR: Could not save Triton model config. Config file already exists. Use '--triton_model_overwrite=True' if you would like to overwrite an existing model config. Model config: {}".format(config_filename))
return
config_template = r"""
name: "{model_name}"
platform: "tensorflow_savedmodel"
max_batch_size: {max_batch_size}
input [
{{
name: "unique_ids"
data_type: TYPE_INT32
dims: [ 1 ]
reshape: {{ shape: [ ] }}
}},
{{
name: "segment_ids"
data_type: TYPE_INT32
dims: {seq_length}
}},
{{
name: "input_ids"
data_type: TYPE_INT32
dims: {seq_length}
}},
{{
name: "input_mask"
data_type: TYPE_INT32
dims: {seq_length}
}}
]
output [
{{
name: "end_logits"
data_type: TYPE_FP32
dims: {seq_length}
}},
{{
name: "start_logits"
data_type: TYPE_FP32
dims: {seq_length}
}}
]
{dynamic_batching}
instance_group [
{{
count: {engine_count}
kind: KIND_GPU
gpus: [{gpu_list}]
}}
]"""
batching_str = ""
max_batch_size = FLAGS.triton_max_batch_size
if (FLAGS.triton_dyn_batching_delay > 0):
# Use only full and half full batches
pref_batch_size = [int(max_batch_size / 2.0), max_batch_size]
batching_str = r"""
dynamic_batching {{
preferred_batch_size: [{0}]
max_queue_delay_microseconds: {1}
}}""".format(", ".join([str(x) for x in pref_batch_size]), int(FLAGS.triton_dyn_batching_delay * 1000.0))
config_values = {
"model_name": model_name,
"max_batch_size": max_batch_size,
"seq_length": FLAGS.max_seq_length,
"dynamic_batching": batching_str,
"gpu_list": ", ".join([x.name.split(":")[-1] for x in device_lib.list_local_devices() if x.device_type == "GPU"]),
"engine_count": FLAGS.triton_engine_count
}
with open(model_folder + "/config.pbtxt", "w") as file:
final_config_str = config_template.format_map(config_values)
file.write(final_config_str)
|
nvidia-gcp-samples-master
|
ai-platform-samples/bert_on_caip/ai_platform_prediction_triton/run_squad.py
|
nvidia-gcp-samples-master
|
ai-platform-samples/bert_on_caip/ai_platform_prediction_triton/__init__.py
|
|
# coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import re
import os
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name, do_lower_case=True):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file)
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, do_lower_case)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name))
tokenizer = None
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
nvidia-gcp-samples-master
|
ai-platform-samples/bert_on_caip/ai_platform_prediction_triton/tokenization.py
|
#!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Usage:
Context and questions are to be put into json file respecting SQuAD format.
ex)
python get_request_body_bert.py -f <filename>.json
curl \
-X GET -k -H "Content-Type: application/json" \
-H "Authorization: Bearer `gcloud auth print-access-token`" \
"${ENDPOINT}/projects/${PROJECT_NAME}/models/${MODEL_NAME}"
curl \
-X POST <triton_host_ip>:8000/v2/models/bert/infer \
-k -H "Content-Type: application/json" \
-d @bert_squad.json
expected output:
"""
from utils.create_squad_data import read_squad_examples, convert_examples_to_features
import argparse
import json
import numpy as np
import os
import random
import run_squad
import sys
import struct
import tensorflow as tf
import time
import tokenization
class InferInput:
"""An object of InferInput class is used to describe
input tensor for an inference request.
Parameters
----------
name : str
The name of input whose data will be described by this object
shape : list
The shape of the associated input.
datatype : str
The datatype of the associated input.
"""
def __init__(self, name, shape, datatype):
self._name = name
self._shape = shape
self._datatype = datatype
self._parameters = {}
self._data = None
self._raw_data = None
def name(self):
"""Get the name of input associated with this object.
Returns
-------
str
The name of input
"""
return self._name
def datatype(self):
"""Get the datatype of input associated with this object.
Returns
-------
str
The datatype of input
"""
return self._datatype
def shape(self):
"""Get the shape of input associated with this object.
Returns
-------
list
The shape of input
"""
return self._shape
def set_shape(self, shape):
"""Set the shape of input.
Parameters
----------
shape : list
The shape of the associated input.
"""
self._shape = shape
def set_data_from_numpy(self, input_tensor, binary_data=True):
"""Set the tensor data from the specified numpy array for
input associated with this object.
Parameters
----------
input_tensor : numpy array
The tensor data in numpy array format
binary_data : bool
Indicates whether to set data for the input in binary format
or explicit tensor within JSON. The default value is True,
which means the data will be delivered as binary data in the
HTTP body after the JSON object.
Raises
------
InferenceServerException
If failed to set data for the tensor.
"""
if not isinstance(input_tensor, (np.ndarray,)):
raise_error("input_tensor must be a numpy array")
dtype = np_to_triton_dtype(input_tensor.dtype)
if self._datatype != dtype:
raise_error(
"got unexpected datatype {} from numpy array, expected {}".
format(dtype, self._datatype))
valid_shape = True
if len(self._shape) != len(input_tensor.shape):
valid_shape = False
else:
for i in range(len(self._shape)):
if self._shape[i] != input_tensor.shape[i]:
valid_shape = False
if not valid_shape:
raise_error(
"got unexpected numpy array shape [{}], expected [{}]".format(
str(input_tensor.shape)[1:-1],
str(self._shape)[1:-1]))
self._parameters.pop('shared_memory_region', None)
self._parameters.pop('shared_memory_byte_size', None)
self._parameters.pop('shared_memory_offset', None)
if not binary_data:
self._parameters.pop('binary_data_size', None)
self._raw_data = None
if self._datatype == "BYTES":
self._data = [val for val in input_tensor.flatten()]
else:
self._data = [val.item() for val in input_tensor.flatten()]
else:
self._data = None
if self._datatype == "BYTES":
self._raw_data = serialize_byte_tensor(input_tensor).tobytes()
else:
self._raw_data = input_tensor.tobytes()
self._parameters['binary_data_size'] = len(self._raw_data)
def set_shared_memory(self, region_name, byte_size, offset=0):
"""Set the tensor data from the specified shared memory region.
Parameters
----------
region_name : str
The name of the shared memory region holding tensor data.
byte_size : int
The size of the shared memory region holding tensor data.
offset : int
The offset, in bytes, into the region where the data for
the tensor starts. The default value is 0.
"""
self._data = None
self._raw_data = None
self._parameters.pop('binary_data_size', None)
self._parameters['shared_memory_region'] = region_name
self._parameters['shared_memory_byte_size'] = byte_size
if offset != 0:
self._parameters['shared_memory_offset'].int64_param = offset
def _get_binary_data(self):
"""Returns the raw binary data if available
Returns
-------
bytes
The raw data for the input tensor
"""
return self._raw_data
def _get_tensor(self):
"""Retrieve the underlying input as json dict.
Returns
-------
dict
The underlying tensor specification as dict
"""
if self._parameters.get('shared_memory_region') is not None or \
self._raw_data is not None:
return {
'name': self._name,
'shape': self._shape,
'datatype': self._datatype,
'parameters': self._parameters,
}
else:
return {
'name': self._name,
'shape': self._shape,
'datatype': self._datatype,
'parameters': self._parameters,
'data': self._data
}
class InferRequestedOutput:
"""An object of InferRequestedOutput class is used to describe a
requested output tensor for an inference request.
Parameters
----------
name : str
The name of output tensor to associate with this object.
binary_data : bool
Indicates whether to return result data for the output in
binary format or explicit tensor within JSON. The default
value is True, which means the data will be delivered as
binary data in the HTTP body after JSON object. This field
will be unset if shared memory is set for the output.
class_count : int
The number of classifications to be requested. The default
value is 0 which means the classification results are not
requested.
"""
def __init__(self, name, binary_data=True, class_count=0):
self._name = name
self._parameters = {}
if class_count != 0:
self._parameters['classification'] = class_count
self._binary = binary_data
self._parameters['binary_data'] = binary_data
def name(self):
"""Get the name of output associated with this object.
Returns
-------
str
The name of output
"""
return self._name
def set_shared_memory(self, region_name, byte_size, offset=0):
"""Marks the output to return the inference result in
specified shared memory region.
Parameters
----------
region_name : str
The name of the shared memory region to hold tensor data.
byte_size : int
The size of the shared memory region to hold tensor data.
offset : int
The offset, in bytes, into the region where the data for
the tensor starts. The default value is 0.
"""
if 'classification' in self._parameters:
raise_error("shared memory can't be set on classification output")
if self._binary:
self._parameters['binary_data'] = False
self._parameters['shared_memory_region'] = region_name
self._parameters['shared_memory_byte_size'] = byte_size
if offset != 0:
self._parameters['shared_memory_offset'] = offset
def unset_shared_memory(self):
"""Clears the shared memory option set by the last call to
InferRequestedOutput.set_shared_memory(). After call to this
function requested output will no longer be returned in a
shared memory region.
"""
self._parameters['binary_data'] = self._binary
self._parameters.pop('shared_memory_region', None)
self._parameters.pop('shared_memory_byte_size', None)
self._parameters.pop('shared_memory_offset', None)
def _get_tensor(self):
"""Retrieve the underlying input as json dict.
Returns
-------
dict
The underlying tensor as a dict
"""
return {'name': self._name, 'parameters': self._parameters}
class InferResult:
"""An object of InferResult class holds the response of
an inference request and provide methods to retrieve
inference results.
Parameters
----------
result : dict
The inference response from the server
verbose : bool
If True generate verbose output. Default value is False.
"""
def __init__(self, response, verbose):
header_length = response.get('Inference-Header-Content-Length')
header_length = None
if header_length is None:
content = response.read()
if verbose:
print(content)
self._result = json.loads(content)
else:
header_length = int(header_length)
content = response.read(length=header_length)
if verbose:
print(content)
self._result = json.loads(content)
# Maps the output name to the index in buffer for quick retrieval
self._output_name_to_buffer_map = {}
# Read the remaining data off the response body.
self._buffer = response.read()
buffer_index = 0
for output in self._result['outputs']:
parameters = output.get("parameters")
if parameters is not None:
this_data_size = parameters.get("binary_data_size")
if this_data_size is not None:
self._output_name_to_buffer_map[
output['name']] = buffer_index
buffer_index = buffer_index + this_data_size
def as_numpy(self, name):
"""Get the tensor data for output associated with this object
in numpy format
Parameters
----------
name : str
The name of the output tensor whose result is to be retrieved.
Returns
-------
numpy array
The numpy array containing the response data for the tensor or
None if the data for specified tensor name is not found.
"""
if self._result.get('outputs') is not None:
for output in self._result['outputs']:
if output['name'] == name:
datatype = output['datatype']
has_binary_data = False
parameters = output.get("parameters")
if parameters is not None:
this_data_size = parameters.get("binary_data_size")
if this_data_size is not None:
has_binary_data = True
if this_data_size != 0:
start_index = self._output_name_to_buffer_map[
name]
end_index = start_index + this_data_size
if datatype == 'BYTES':
# String results contain a 4-byte string length
# followed by the actual string characters. Hence,
# need to decode the raw bytes to convert into
# array elements.
np_array = deserialize_bytes_tensor(
self._buffer[start_index:end_index])
else:
np_array = np.frombuffer(
self._buffer[start_index:end_index],
dtype=triton_to_np_dtype(datatype))
else:
np_array = np.empty(0)
if not has_binary_data:
np_array = np.array(output['data'],
dtype=triton_to_np_dtype(datatype))
np_array = np.resize(np_array, output['shape'])
return np_array
return None
def get_output(self, name):
"""Retrieves the output tensor corresponding to the named ouput.
Parameters
----------
name : str
The name of the tensor for which Output is to be
retrieved.
Returns
-------
Dict
If an output tensor with specified name is present in
the infer resonse then returns it as a json dict,
otherwise returns None.
"""
for output in self._result['outputs']:
if output['name'] == name:
return output
return None
def get_response(self):
"""Retrieves the complete response
Returns
-------
dict
The underlying response dict.
"""
return self._result
def np_to_triton_dtype(np_dtype):
if np_dtype == np.bool:
return "BOOL"
elif np_dtype == np.int8:
return "INT8"
elif np_dtype == np.int16:
return "INT16"
elif np_dtype == np.int32:
return "INT32"
elif np_dtype == np.int64:
return "INT64"
elif np_dtype == np.uint8:
return "UINT8"
elif np_dtype == np.uint16:
return "UINT16"
elif np_dtype == np.uint32:
return "UINT32"
elif np_dtype == np.uint64:
return "UINT64"
elif np_dtype == np.float16:
return "FP16"
elif np_dtype == np.float32:
return "FP32"
elif np_dtype == np.float64:
return "FP64"
elif np_dtype == np.object or np_dtype.type == np.bytes_:
return "BYTES"
return None
def triton_to_np_dtype(dtype):
if dtype == "BOOL":
return np.bool
elif dtype == "INT8":
return np.int8
elif dtype == "INT16":
return np.int16
elif dtype == "INT32":
return np.int32
elif dtype == "INT64":
return np.int64
elif dtype == "UINT8":
return np.uint8
elif dtype == "UINT16":
return np.uint16
elif dtype == "UINT32":
return np.uint32
elif dtype == "UINT64":
return np.uint64
elif dtype == "FP16":
return np.float16
elif dtype == "FP32":
return np.float32
elif dtype == "FP64":
return np.float64
elif dtype == "BYTES":
return np.object
return None
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
label_ids_data = ()
input_ids_data = ()
input_mask_data = ()
segment_ids_data = ()
for i in range(0, min(n, l-ndx)):
label_ids_data = label_ids_data + (np.array([iterable[ndx + i].unique_id], dtype=np.int32),)
input_ids_data = input_ids_data+ (np.array(iterable[ndx + i].input_ids, dtype=np.int32),)
input_mask_data = input_mask_data+ (np.array(iterable[ndx + i].input_mask, dtype=np.int32),)
segment_ids_data = segment_ids_data+ (np.array(iterable[ndx + i].segment_ids, dtype=np.int32),)
inputs_dict = {'unique_ids': label_ids_data,
'input_ids': input_ids_data,
'input_mask': input_mask_data,
'segment_ids': segment_ids_data}
return inputs_dict
def get_bert_inference_request(inputs, request_id, outputs, sequence_id,
sequence_start, sequence_end, priority, timeout):
infer_request = {}
parameters = {}
if request_id != "":
infer_request['id'] = request_id
if sequence_id != 0:
parameters['sequence_id'] = sequence_id
parameters['sequence_start'] = sequence_start
parameters['sequence_end'] = sequence_end
if priority != 0:
parameters['priority'] = priority
if timeout is not None:
parameters['timeout'] = timeout
infer_request['inputs'] = [
this_input._get_tensor() for this_input in inputs
]
if outputs:
infer_request['outputs'] = [
this_output._get_tensor() for this_output in outputs
]
if parameters:
infer_request['parameters'] = parameters
request_body = json.dumps(infer_request)
json_size = len(request_body)
binary_data = None
for input_tensor in inputs:
raw_data = input_tensor._get_binary_data()
if raw_data is not None:
if binary_data is not None:
binary_data += raw_data
else:
binary_data = raw_data
if binary_data is not None:
request_body = struct.pack(
'{}s{}s'.format(len(request_body), len(binary_data)),
request_body.encode(), binary_data)
return request_body, json_size
return request_body, None
def get_bert_request_body(input_data, version_2_with_negative,
tokenizer, max_seq_length, doc_stride,
max_query_length):
eval_examples = read_squad_examples(input_file=None, is_training=False,
version_2_with_negative=version_2_with_negative,
input_data=input_data)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
convert_examples_to_features(
examples=eval_examples[0:],
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=False,
output_fn=append_feature)
inputs_dict = batch(eval_features)
label_ids_data = np.stack(inputs_dict['unique_ids'])
input_ids_data = np.stack(inputs_dict['input_ids'])
input_mask_data = np.stack(inputs_dict['input_mask'])
segment_ids_data = np.stack(inputs_dict['segment_ids'])
inputs = []
inputs.append(InferInput('input_ids', input_ids_data.shape, "INT32"))
inputs[0].set_data_from_numpy(input_ids_data, binary_data=False)
inputs.append(InferInput('input_mask', input_mask_data.shape, "INT32"))
inputs[1].set_data_from_numpy(input_mask_data, binary_data=False)
inputs.append(InferInput('segment_ids', segment_ids_data.shape, "INT32"))
inputs[2].set_data_from_numpy(segment_ids_data, binary_data=False)
outputs = []
outputs.append(InferRequestedOutput('cls_squad_logits', binary_data=False))
request_id = ''
sequence_id = 0
sequence_start = False
sequence_end = False
priority = 0
timeout = None
headers=None
query_params=None
request_body, json_size = get_bert_inference_request(
inputs=inputs,
request_id=request_id,
outputs=outputs,
sequence_id=sequence_id,
sequence_start=sequence_start,
sequence_end=sequence_end,
priority=priority,
timeout=timeout)
return request_body, inputs_dict, eval_examples, eval_features
|
nvidia-gcp-samples-master
|
ai-platform-samples/bert_on_caip/ai_platform_prediction_triton/get_request_body_bert.py
|
nvidia-gcp-samples-master
|
ai-platform-samples/bert_on_caip/ai_platform_prediction_triton/utils/__init__.py
|
|
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import tokenization
import six
import tensorflow as tf
import time
flags = tf.flags
FLAGS = None
def extract_flags():
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"squad_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.mark_flag_as_required("train_file")
flags.mark_flag_as_required("predict_file")
flags.mark_flag_as_required("squad_dir")
flags.mark_flag_as_required("vocab_file")
return flags.FLAGS
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative=False, input_data=None):
"""Return list of SquadExample from input_data or input_file (SQuAD json file)"""
if input_data is None:
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn, verbose_logging=False):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if verbose_logging and example_index < 20:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("unique_id: %s" % (unique_id))
tf.compat.v1.logging.info("example_index: %s" % (example_index))
tf.compat.v1.logging.info("doc_span_index: %s" % (doc_span_index))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.compat.v1.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.compat.v1.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.compat.v1.logging.info("start_position: %d" % (start_position))
tf.compat.v1.logging.info("end_position: %d" % (end_position))
tf.compat.v1.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def main():
FLAGS = extract_flags()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tf.gfile.MakeDirs(FLAGS.squad_dir + "/final_tfrecords_sharded")
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True,
version_2_with_negative=FLAGS.version_2_with_negative)
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/train.tf_record"),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
verbose_logging=FLAGS.verbose_logging)
train_writer.close()
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False,
version_2_with_negative=FLAGS.version_2_with_negative)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature,
verbose_logging=FLAGS.verbose_logging)
eval_writer.close()
if __name__ == "__main__":
main()
|
nvidia-gcp-samples-master
|
ai-platform-samples/bert_on_caip/ai_platform_prediction_triton/utils/create_squad_data.py
|
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time, argparse
import subprocess, sys, os, json
import dask, dask_cudf, asyncio
import socket, gcsfs
from dask.distributed import Client
from dask.distributed import wait
import xgboost as xgb
import logging
import datetime
async def start_client(scheduler_addr, train_dir, model_file, num_workers, fs, parquet=False):
async with Client(scheduler_addr, asynchronous=True) as client:
dask.config.set({'distributed.scheduler.work-stealing': False})
print(dask.config.get('distributed.scheduler.work-stealing'))
dask.config.set({'distributed.scheduler.bandwidth': 1})
print(dask.config.get('distributed.scheduler.bandwidth'))
await client.wait_for_workers(num_workers)
colnames = ['label'] + ['feature-%02d' % i for i in range(1, 29)]
if parquet is True:
df = dask_cudf.read_parquet(train_dir, columns=colnames)
else:
df = dask_cudf.read_csv(train_dir, header=None, names=colnames, chunksize=None)
X = df[df.columns.difference(['label'])]
y = df['label']
df = df.persist()
X = X.persist()
wait(df)
wait(X)
print("[INFO]: ------ Long waited but the data is ready now")
start_time = time.time()
dtrain = await xgb.dask.DaskDeviceQuantileDMatrix(client, X, y)
del df
del X
del y
output = await xgb.dask.train(client,
{ 'verbosity': 1,
'learning_rate': 0.1,
'max_depth': 8,
'objective': 'reg:squarederror',
'subsample': 0.6,
'gamma': 1,
'verbose_eval': True,
'tree_method':'gpu_hist',
'nthread': 1
},
dtrain,
num_boost_round=100, evals=[(dtrain, 'train')])
logging.info("[debug:leader]: ------ training finished")
output['booster'].save_model('/tmp/tmp.model')
history = output['history']
logging.info('[debug:leader]: ------ Training evaluation history:', history)
fs.put('/tmp/tmp.model', model_file)
logging.info("[debug:leader]: ------model saved")
logging.info("[debug:leader]: ------ %s seconds ---" % (time.time() - start_time))
predictions = xgb.dask.predict(client, output, dtrain)
print(type(predictions))
await client.shutdown()
def launch_dask(cmd, is_shell):
return subprocess.Popen(cmd,
stdout=None,
stderr=None,
shell=is_shell)
def launch_worker(cmd):
return subprocess.check_call(cmd,
stdout=sys.stdout,
stderr=sys.stderr)
if __name__=='__main__':
now = datetime.datetime.utcnow()
timestamp = now.strftime("%m%d%Y%H%M%S%f")
parser = argparse.ArgumentParser()
logging.basicConfig(format='%(message)s')
logging.getLogger().setLevel(logging.INFO)
parser.add_argument(
'--gcp-project',
type=str,
help='User gcp project',
required=True)
parser.add_argument(
'--train-files',
type=str,
help='Training files local or GCS',
required=True)
parser.add_argument(
'--scheduler-ip-file',
type=str,
help='Scratch temp file to storage scheduler ip in GCS',
required=True)
parser.add_argument(
'--model-file',
type=str,
help="""GCS or local dir for checkpoints, exports, and summaries.
Use an existing directory to load a trained model, or a new directory
to retrain""",
required=True)
parser.add_argument(
'--num-workers',
type=int,
help='num of workers for rabit')
parser.add_argument(
'--rmm-pool-size',
type=str,
help='RMM pool size',
default='8G')
parser.add_argument(
'--nthreads',
type=str,
help='nthreads for master and worker',
default='4')
parser.add_argument(
'--parquet',
action='store_true', help='parquet files are used')
args, _ = parser.parse_known_args()
tf_config_str = os.environ.get('TF_CONFIG')
tf_config_json = json.loads(tf_config_str)
logging.info(tf_config_json)
task_name = tf_config_json.get('task', {}).get('type')
fs = gcsfs.GCSFileSystem(project=args.gcp_project, token='cloud')
if task_name == 'master':
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
with fs.open(args.scheduler_ip_file, 'w') as f:
f.write(host_ip)
scheduler_addr = host_ip + ':2222'
logging.info('[INFO]: The scheduler IP is %s', scheduler_addr)
proc_scheduler = launch_dask(f'dask-scheduler --protocol tcp > /tmp/scheduler.log 2>&1 &', True)
logging.info('[debug:leader]: ------ start scheduler')
proc_worker = launch_dask(['dask-cuda-worker', '--rmm-pool-size', args.rmm_pool_size, '--nthreads', args.nthreads , scheduler_addr], False)
logging.info('[debug:leader]: ------ start worker')
asyncio.get_event_loop().run_until_complete(start_client(scheduler_addr,
args.train_files,
args.model_file,
args.num_workers,
fs,
args.parquet))
# launch dask worker, redirect output to sys stdout/err
elif task_name == 'worker':
while not fs.exists(args.scheduler_ip_file):
time.sleep(1)
with fs.open(args.scheduler_ip_file, 'r') as f:
scheduler_ip = f.read().rstrip("\n")
logging.info('[debug:scheduler_ip]: ------'+scheduler_ip)
scheduler_addr = scheduler_ip + ':2222'
proc_worker = launch_worker(['dask-cuda-worker', '--rmm-pool-size', args.rmm_pool_size, '--nthreads' , args.nthreads, scheduler_addr])
|
nvidia-gcp-samples-master
|
ai-platform-samples/xgboost_multi_node/rapids.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from prometheus_client import Gauge
import requests
import time
import urllib3
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
metric_map = {}
created = False
metric_map["status"] = -1
os_ssd_count = "os_count"
data_ssd_count = "data_count"
health = "drive_health"
avg_data = "avg_data_used"
avg_os = "avg_os_used"
def init():
"""
Init function to initialize the module,
Initialize Prometheus metrics that would be later used in the module
Args:
None
Returns:
None
"""
# Check if metric already present in the metric_map
if os_ssd_count not in metric_map:
# Create metric and add it to metric_map
metric_map[os_ssd_count] = Gauge(os_ssd_count, "Number of OS Drives")
if data_ssd_count not in metric_map:
metric_map[data_ssd_count] = Gauge(data_ssd_count, "Number of Data Drives")
if health not in metric_map:
metric_map[health] = Gauge(health, "Drive Health")
if avg_data not in metric_map:
metric_map[avg_data] = Gauge(avg_data, "Average Percent used Data Drives")
if avg_os not in metric_map:
metric_map[avg_os] = Gauge(avg_os, "Average Percent Used OS Drives")
print("Initialized Storage Exporter...")
def ExportMetric(ip="localhost", port= "273"):
"""
ExportMetric: This function requests from NVSM-APIs using URL. Upon gettin valid JSON data traverses the data and create and set values to metrics.
The metrics include:
1. Number of OS Drives
2. Number of Data Drives
3. Overall Drive Health
4. Average Percent used for Data Drives
5. Average Percent use for OS Drives
6. Per Drive Disk Capacity
7. Per Drive Percent Used
Args:
ip : IP address of the NVSM server
port: Port number of the NVSM server
Returns:
None
"""
global metric_map
avg_os_used = 0
avg_data_used = 0
os_count = 0
data_count = 0
# Read JWT token for NVSM-APIs
with open ('/etc/nvsm-apis/nvsm-apis-perpetual.jwt', 'r') as jwt_file:
tokenstring = jwt_file.read()
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/redfish/v1/Systems/1/Storage', timeout=120, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
storage_collection = r.json()
# Iterate over the storage collection to get the storage information
for data_id in storage_collection["Members"]:
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/' + data_id["@odata.id"], timeout=120, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
try:
nvme_storage_subsys = r.json()
except:
continue
# Iterate over the storage information to get the drive information
for nvme_id in nvme_storage_subsys["Drives"]:
# Request to URL to get the data for each drive
r = requests.get('https://' + str(ip) + ':' + str(port) +nvme_id["@odata.id"], timeout=120, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
try:
drive = r.json()
except:
continue
if "nvme0n1" in drive["@odata.id"] or "nvme1n1" in drive["@odata.id"]:
name = "os_" + drive["@odata.id"][-7:] + "_capacity"
else:
name = "data_" + drive["@odata.id"][-7:] + "_capacity"
if name not in metric_map:
metric_map[name] = Gauge(name, "Disk Capacity")
c = metric_map[name]
c.set(float(drive["Capacity"]))
if "nvme0n1" in drive["@odata.id"] or "nvme1n1" in drive["@odata.id"]:
usage_name = "os_" + drive["@odata.id"][-7:] + "_percent_used"
avg_os_used += float(drive["Oem"]["Nvidia_HM"]["Metrics"]["PercentUsed"])
else:
usage_name = "data_" + drive["@odata.id"][-7:] + "_percent_used"
avg_data_used += float(drive["Oem"]["Nvidia_HM"]["Metrics"]["PercentUsed"])
if usage_name not in metric_map:
metric_map[usage_name] = Gauge(usage_name, "Percent Used")
g = metric_map[usage_name]
g.set(float(drive["Oem"]["Nvidia_HM"]["Metrics"]["PercentUsed"]))
if "nvme0n1" in drive["@odata.id"] or "nvme1n1" in drive["@odata.id"]:
error_name = "os_" + drive["@odata.id"][-7:] + "_media_errors"
else:
error_name = "data_" + drive["@odata.id"][-7:] + "_media_errors"
if error_name not in metric_map:
metric_map[error_name] = Gauge(error_name, "Media Errors")
d = metric_map[error_name]
d.set(int(drive["Oem"]["Nvidia_HM"]["Errors"]["Media"]["Count"]))
h = drive["Status"]["Health"]
if "nvme0n1" in drive["@odata.id"] or "nvme1n1" in drive["@odata.id"]:
os_count += 1
else:
data_count += 1
status = metric_map["status"]
if(h == "OK"):
temp = 0
elif(h == "Warning"):
temp = 1
elif(h == "Critical"):
temp = 2
if(status < temp):
status = temp
metric_map["status"] = status
# Set values to metrics
status = metric_map["status"]
health_metric = metric_map[health]
health_metric.set(status)
os_ssd_count_metric = metric_map[os_ssd_count]
os_ssd_count_metric.set(os_count)
data_ssd_count_metric = metric_map[data_ssd_count]
data_ssd_count_metric.set(data_count)
avg_os_metric = metric_map[avg_os]
avg_os_metric.set(avg_os_used/os_count)
avg_data_metric = metric_map[avg_data]
avg_data_metric.set(avg_data_used/data_count)
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/storage_exporter.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from prometheus_client import start_http_server
from prometheus_client import Gauge, Summary, Counter
from prometheus_client.core import GaugeMetricFamily, REGISTRY
import requests
import time
import urllib3
import sys
import traceback
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
metric_map = {}
health_created = False
created = False
gpu_count = "GPU_count"
gpu_healthrollup = "gpu_healthrollup"
def init():
"""
Init function to initialize the module,
Initialize Prometheus metrics that would be later used in the module
Args:
None
Returns:
None
"""
# Check if metric already present in the metric_map
if gpu_count not in metric_map:
# Create metric and add it to metric_map
metric_map[gpu_count] = Gauge(gpu_count, "Number of GPUs")
if not created:
metric_map[gpu_healthrollup] = Gauge(gpu_healthrollup, "GPU HealthRollup")
print("Initialized GPU Exporter...")
def ExportMetric(ip="localhost", port="273"):
"""
ExportMetric: This function requests from NVSM-APIs using URL. Upon gettin valid JSON data traverses the data and create and set values to metrics.
The metrics include:
1. Number of GPUs - gpu_count
2. GPU HealthrollUp - gpu_healthrollup
3. Per GPU PCIe link width
4. Per GPU PCIe link generation
5. Per GPU Health
6. Per GPU Retired Pages
Args:
ip : IP address of the NVSM server
port: Port number of the NVSM server
Returns:
None
"""
global metric_map
# Read JWT token for NVSM-APIs
with open ('/etc/nvsm-apis/nvsm-apis-perpetual.jwt', 'r') as jwt_file:
tokenstring = jwt_file.read()
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/nvsm/v1/Systems/1/GPUs', verify=False, timeout=10, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
data = r.json()
# Set GPU count metric
c = metric_map[gpu_count]
c.set(int(data["Members@odata.count"]))
health_endpoint = data["Health"]["@odata.id"]
time.sleep(5)
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + health_endpoint, verify=False, timeout=10, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
health_data = r.json()
health = health_data["Health"]
status = 2
if health == "Ok":
status = 0
elif health == "Warning":
status = 1
# Set GPU healthrollup metric
healthrollup_metric = metric_map[gpu_healthrollup]
healthrollup_metric.set(status)
for gpu_endpoint in data["Members"]:
time.sleep(5)
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + gpu_endpoint["@odata.id"], verify=False, timeout=10, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
gpu = r.json()
# Create Per GPU pcie link width metric
name = "pcie_link_width_" + str(gpu["Inventory"]["UUID"])
name = name.replace('-','_')
if name not in metric_map:
metric_map[name] = Gauge(name, "PCIe Link Width")
c = metric_map[name]
c.set(int(gpu["Connections"]["PCIeLinkWidth"][:-1]))
# Create Per GPU pcie link gen info metric
name = "pcie_link_gen_info_" + str(gpu["Inventory"]["UUID"])
name = name.replace('-','_')
if name not in metric_map:
metric_map[name] = Gauge(name, "PCIe Generation Info")
c = metric_map[name]
c.set(int(gpu["Connections"]["PCIeGen"]))
h = gpu["Status"]["Health"]
# Create Per GPU health metric
name = "gpu_health_" + str(gpu["Inventory"]["UUID"])
name = name.replace('-','_')
if name not in metric_map:
metric_map[name] = Gauge(name, "Per GPU Health")
c = metric_map[name]
if(h == "OK"):
staus = 0
elif(h == "Warning"):
status = 1
elif(h == "Critical"):
status = 2
c.set(status)
# Create Per GPU retired pages metric
name = "retired_pages_" + str(gpu["Inventory"]["UUID"])
name = name.replace('-','_')
if name not in metric_map:
metric_map[name] = Gauge(name, "Per GPU Retired Pages")
c = metric_map[name]
single_bit_errors = int(gpu["Stats"]["ErrorStats"]["RetiredPages"]["DueToMultipleSingleBitErrors"]["PageCount"])
double_bit_errors = int(gpu["Stats"]["ErrorStats"]["RetiredPages"]["DueToDoubleBitErrors"]["PageCount"])
pending_retirement = int(gpu["Stats"]["ErrorStats"]["RetiredPages"]["PendingRetirementCount"])
c.set(single_bit_errors + double_bit_errors + pending_retirement )
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/gpu_exporter.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from prometheus_client import Gauge
import requests
import time
import urllib3
import sys
ip = "localhost"
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
metric_map = {}
coreCount = "core_count"
threadCount = "thread_count"
cpuCount = "cpu_count"
def init():
"""
Init function to initialize the module,
Initialize Prometheus metrics that would be later used in the module
Args:
None
Returns:
None
"""
# Check if metric already present in the metric_map
if coreCount not in metric_map:
# Create metric and add it to metric_map
metric_map[coreCount] = Gauge(coreCount, "Total Number of Core in CPUs")
if threadCount not in metric_map:
metric_map[threadCount] = Gauge(threadCount, "Total Number of threads in CPUs")
if cpuCount not in metric_map:
metric_map[cpuCount] = Gauge(cpuCount, "Total Number of CPUs")
print("Initialized Processor Exporter...")
def ExportMetric(ip="localhost", port="273"):
"""
ExportMetric: This function requests from NVSM-APIs using URL. Upon gettin valid JSON data traverses the data and create and set values to metrics.
The metrics include:
1. Total Number of Cores in CPUs
2. Total Number of Threads in CPUs
3. Total Number of CPUs
Args:
ip : IP address of the NVSM server
port: Port number of the NVSM server
Returns:
None
"""
core_count = 0
thread_count = 0
cpu_count = 0
# Read JWT token for NVSM-APIs
with open ('/etc/nvsm-apis/nvsm-apis-perpetual.jwt', 'r') as jwt_file:
tokenstring = jwt_file.read()
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/redfish/v1/Systems/1/Processors', timeout=5, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
data = r.json()
# Iterate over the processor collection to get the processor information
for processor in data["Members"]:
r = requests.get('https://' + str(ip) + ':273' + processor["@odata.id"], timeout=5, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
proc_data = r.json()
core_count += proc_data["TotalCores"]
thread_count += proc_data["TotalThreads"]
cpu_count += 1
# Set values to metrics
coreCount_metric = metric_map[coreCount]
threadCount_metric = metric_map[threadCount]
cpuCount_metric = metric_map[cpuCount]
coreCount_metric.set(core_count)
threadCount_metric.set(thread_count)
cpuCount_metric.set(cpu_count)
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/processor_exporter.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from prometheus_client import Gauge, Summary, Counter
import requests
import time
import urllib3
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
metric_map = {}
correctable = "correctable_memory_errors"
uncorrectable = "uncorrectable_memory_errors"
dimm_count = "dimm_count"
dimm_health = "dimm_health"
def init():
"""
Init function to initialize the module,
Initialize Prometheus metrics that would be later used in the module
Args:
None
Returns:
None
"""
# Check if metric already present in the metric_map
if correctable not in metric_map:
# Create metric and add it to metric_map
metric_map[correctable] = Gauge(correctable, "Correctable Memory Errors")
if uncorrectable not in metric_map:
metric_map[uncorrectable] = Gauge(uncorrectable, "Uncorrectable Memory Errors")
if dimm_count not in metric_map:
metric_map[dimm_count] = Gauge(dimm_count, "Number of DIMMs")
if dimm_health not in metric_map:
metric_map[dimm_health] = Gauge(dimm_health, "DIMM Health")
print("Initialized Memory Exporter...")
def ExportMetric(ip="localhost", port="273"):
"""
ExportMetric: This function requests from NVSM-APIs using URL. Upon gettin valid JSON data traverses the data and create and set values to metrics.
The metrics include:
1. Correctable Memory Errors
2. Uncorrectable Memory Errors
3. Number of DIMMs
Args:
ip : IP address of the NVSM server
port: Port number of the NVSM server
Returns:
None
"""
global metric_map
dimms = 0
# Read JWT token for NVSM-APIs
with open ('/etc/nvsm-apis/nvsm-apis-perpetual.jwt', 'r') as jwt_file:
tokenstring = jwt_file.read()
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/redfish/v1/Systems/1/Memory', timeout=5, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
data = r.json()
correctable_total = 0
uncorrectable_total = 0
for data_id,val in enumerate(data["Members"]):
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + val["@odata.id"], timeout=5, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
data_cpu = r.json()
dimms += 1
correctable_total += int(data_cpu["Oem"]["Error"]["CorrectableCount"])
uncorrectable_total += int(data_cpu["Oem"]["Error"]["UncorrectableCount"])
name = data_cpu["Id"] + "_capacity"
if name not in metric_map:
metric_map[name] = Gauge(name, "DIMM Capacity")
c = metric_map[name]
cap = (float(data_cpu["CapacityMiB"]))
c.set(cap)
h = data_cpu["Status"]["Health"]
health_metric = metric_map[dimm_health]
status = 2
if(h == "OK"):
status = 0
elif(h == "Warning"):
status = 1
health_metric.set(status)
# Set values to metrics
correctable_metric = metric_map[correctable]
uncorrectable_metric = metric_map[uncorrectable]
count_metric = metric_map[dimm_count]
count_metric.set(dimms)
correctable_metric.set(correctable_total)
uncorrectable_metric.set(uncorrectable_total)
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/memory_exporter.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from prometheus_client import Gauge, Summary, Counter
import requests
import time
import urllib3
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
metric_map = {}
correctable = "PCIE_correctable_errors"
uncorrectable = "PCIE_uncorrectable_errors"
def init():
"""
Init function to initialize the module,
Initialize Prometheus metrics that would be later used in the module
Args:
None
Returns:
None
"""
# Check if metric already present in the metric_map
if correctable not in metric_map:
# Create metric and add it to metric_map
metric_map[correctable] = Gauge(correctable, "Correctable PCIe Errors")
if uncorrectable not in metric_map:
metric_map[uncorrectable] = Gauge(uncorrectable, "Uncorrectable PCIe Errors")
print("Initialized PCIe Exporter...")
def ExportMetric(ip="localhost", port="273"):
"""
ExportMetric: This function requests from NVSM-APIs using URL. Upon gettin valid JSON data traverses the data and create and set values to metrics.
The metrics include:
1. Correctable PCIe Errors
2. Uncorrectable PCIe Errors
Args:
ip : IP address of the NVSM server
port: Port number of the NVSM server
Returns:
None
"""
# Read JWT token for NVSM-APIs
with open ('/etc/nvsm-apis/nvsm-apis-perpetual.jwt', 'r') as jwt_file:
tokenstring = jwt_file.read()
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/nvsm/v1/Systems/1/Pcie/Errors', timeout=120, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
data = r.json()
# Set values to metrics
correctable_metric = metric_map[correctable]
uncorrectable_metric = metric_map[uncorrectable]
correctable_metric.set(int(data["Correctable"]["ErrorCount"]))
uncorrectable_metric.set(int(data["Uncorrectable"]["ErrorCount"]))
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/pcie_exporter.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from prometheus_client import Gauge
import requests
import time
import urllib3
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ip = "localhost"
metric_map = {}
metric_map["status"] = -1
count = 0
system_power_consumption = "system_power_consumption"
psu_health = "psu_health"
def init():
"""
Init function to initialize the module,
Initialize Prometheus metrics that would be later used in the module
Args:
None
Returns:
None
"""
# Check if metric already present in the metric_map
if system_power_consumption not in metric_map:
# Create metric and add it to metric_map
metric_map[system_power_consumption] = Gauge(system_power_consumption, "System Power Consumption")
if psu_health not in metric_map:
metric_map[psu_health] = Gauge(psu_health, "PSU Overall Health")
print("Initialized Power Exporter...")
def ExportMetric(ip="localhost", port="273"):
"""
ExportMetric: This function requests from NVSM-APIs using URL. Upon gettin valid JSON data traverses the data and create and set values to metrics.
The metrics include:
1. System Overall Power Consumption
2. PSU Overall Health Status
3. Per PSU Power Consumption
4. Per PSU Health Status
Args:
ip : IP address of the NVSM server
port: Port number of the NVSM server
Returns:
None
"""
count = 0
power_usage = 0
global metric_map
# Read JWT token for NVSM-APIs
with open ('/etc/nvsm-apis/nvsm-apis-perpetual.jwt', 'r') as jwt_file:
tokenstring = jwt_file.read()
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/redfish/v1/Chassis/1/Power', timeout=5, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
data = r.json()
# Iterate over the PowerSupplies collection to get the PowerSupply information
for powersupply in data['PowerSupplies']:
# Create new metrics for each power supply
name = powersupply["Name"]
if name not in metric_map:
metric_map[name] = Gauge(name, "Power Consumption")
c = metric_map[name]
# Set value to each metric
if(powersupply["LastPowerOutputWatts"] == "na"):
c.set(0)
else:
c.set(int(powersupply["LastPowerOutputWatts"]))
h = powersupply["Status"]["Health"]
status = metric_map["status"]
if h == "Ok":
temp = 0
count += 1
elif h == "Warning":
temp = 1
else:
temp = 2
if count >= 5:
status = 0
elif status < temp:
status = temp
# Set value to metric
metric_map["status"] = status
# Create metric for each power supply
name = powersupply["Name"] + "_Health"
if name not in metric_map:
metric_map[name] = Gauge(name, "PSU Health")
c = metric_map[name]
c.set(temp)
power_usage += int(powersupply["LastPowerOutputWatts"])
# Set data to metrics
status = metric_map["status"]
system_power_metric = metric_map[system_power_consumption]
health_metric = metric_map[psu_health]
system_power_metric.set(power_usage)
health_metric.set(status)
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/power_exporter.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from prometheus_client import Gauge
import requests
import time
import urllib3
import traceback
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
metric_map = {}
alerts_count = "alerts_count"
def init():
"""
Init function to initialize the module,
Initialize Prometheus metrics that would be later used in the module
Args:
None
Returns:
None
"""
# Check if metric already present in the metric_map
if alerts_count not in metric_map:
# Create metric and add it to metric_map
metric_map[alerts_count] = Gauge(alerts_count, "Number of Alerts")
print("Initialized Alert Exporter...")
def ExportMetric(ip="localhost", port="273"):
"""
ExportMetric: This function requests from NVSM-APIs using URL. Upon gettin valid JSON data traverses the data and create and set values to metrics.
The metrics include:
1. Number of Alerts
Args:
ip : IP address of the NVSM server
port: Port number of the NVSM server
Returns:
None
"""
alerts = 0
# Read JWT token for NVSM-APIs
with open ('/etc/nvsm-apis/nvsm-apis-perpetual.jwt', 'r') as jwt_file:
tokenstring = jwt_file.read()
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/nvsm/v1/Chassis/1/Alerts', timeout=120, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
alerts_collection = r.json()
# Check the MemberCount, state of alert and severity of alert
if alerts_collection["MemberCount"] !=0:
for alert in alerts_collection["Alerts"]:
if alert["state"] != "cleared" and alert["severity"] == "1":
# Increment count of alerts
alerts += 1
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/nvsm/v1/Systems/1/Alerts', timeout=120, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
alerts_collection = r.json()
# Check the MemberCount, state of alert and severity of alert
if alerts_collection["MemberCount"] !=0:
for alert in alerts_collection["Alerts"]:
if alert["state"] != "cleared" and alert["severity"] == "1":
# Increment count of alerts
alerts += 1
# Set data to metrics
alert_metric = metric_map[alerts_count]
alert_metric.set(alerts)
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/alerts_exporter.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import importlib
import os
import urllib3
import time
import sys
import yaml
import alerts_exporter
import gpu_exporter
import memory_exporter
import pcie_exporter
import processor_exporter
import storage_exporter
import thermal_exporter
import power_exporter
from prometheus_client import start_http_server
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
modules = []
metric_exporter_port = 8000
metric_exporters = []
nvsm_host = "localhost"
nvsm_port = 273
exporters_interval = 20
DefaultConfigDir = "/etc/nvsm-apis/"
DefaultConfigFile = "nvsm-prometheus-exporters.config"
DefaultConfigFilePath = DefaultConfigDir + DefaultConfigFile
# Check if config file exists
if not os.path.exists(DefaultConfigFilePath):
sys.exit("Config file does not exist")
# Open config file
try:
with open(DefaultConfigFilePath) as ymlfile:
configyml = yaml.load(ymlfile)
except:
sys.exit("Error in config file")
# Check if config file contains metric_exporter info
if "metric_exporter" in configyml:
metric_exporter_port = int(configyml['metric_exporter']['port'])
metric_exporters = configyml['metric_exporter']['exporters']
exporters_interval = configyml['metric_exporter']['interval']
else:
sys.exit("Error in config file")
# Check if config file contains NVSM info
if "nvsm" in configyml:
nvsm_host = configyml['nvsm']['host']
nvsm_port = int(configyml['nvsm']['port'])
# Start http_server for metric exporters
start_http_server(metric_exporter_port)
# Import all the modules and call the init functions for each one of them
for e in metric_exporters:
mod = e.replace(".py", "")
module = __import__(mod)
module.init()
modules.append(module)
# Run the exporters at exporters_interval intervals
while(True):
for module in modules:
ExportMetric = getattr(module, 'ExportMetric')
try:
ExportMetric(nvsm_host, nvsm_port)
except KeyboardInterrupt:
sys.exit(1)
except Exception as e:
print e
print "Failed"
continue
time.sleep(exporters_interval)
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/main.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from prometheus_client import Gauge
import requests
import time
import urllib3
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
metric_map = {}
metric_map["fan_status"] = -1
metric_map["temp_status"] = -1
temp_health = "temperature_health"
fan_health = "fan_health"
def init():
"""
Init function to initialize the module,
Initialize Prometheus metrics that would be later used in the module
Args:
None
Returns:
None
"""
# Check if metric already present in the metric_map
if temp_health not in metric_map:
# Create metric and add it to metric_map
metric_map[temp_health] = Gauge(temp_health, "Temperature Overall Health")
# Check if metric already present in the metric_map
if fan_health not in metric_map:
# Create metric and add it to metric_map
metric_map[fan_health] = Gauge(fan_health, "Fan Overall Health")
print("Initialized Thermal Exporter...")
def ExportMetric(ip="localhost", port="273"):
"""
ExportMetric: This function requests from NVSM-APIs using URL. Upon gettin valid JSON data traverses the data and create and set values to metrics.
The metrics include:
1. PDB Temperature Reading
2. PDB Temperature Overall Health Status
3. Per PDB Temperature Health Status
4. Fan Speeds
5. Fan Overall Health Status
6. Per Fan Health Status
Args:
ip : IP address of the NVSM server
port: Port number of the NVSM server
Returns:
None
"""
global metric_map
# Read JWT token for NVSM-APIs
with open ('/etc/nvsm-apis/nvsm-apis-perpetual.jwt', 'r') as jwt_file:
tokenstring = jwt_file.read()
# Request to URL to get the data
r = requests.get('https://' + str(ip) + ':' + str(port) + '/redfish/v1/Chassis/1/Thermal', timeout=5, verify=False, headers={'Authorization': 'Bearer '+tokenstring})
# Read data returned by URL
data = r.json()
# Iterate over the temperature sensors to create metrics and set values to the metrics
for temperatures in data['Temperatures']:
temp_name = str(temperatures["Name"]).split()[0]
name = temp_name + "_temp"
if name not in metric_map:
metric_map[name] = Gauge(name, "PDB Temperature")
c = metric_map[name]
c.set(temperatures["ReadingCelsius"])
h = temperatures["Status"]["Health"]
status = metric_map["temp_status"]
if(h == "OK"):
temp = 0
elif(h == "Warning"):
temp = 1
elif(h == "Critical"):
temp = 2
if(status < temp):
status = temp
metric_map["temp_status"] = status
name = temp_name + "_status"
if name not in metric_map:
metric_map[name] = Gauge(name, "PDB Health Status")
c = metric_map[name]
c.set(temp)
# Iterate over fan sensors to create metrics and set values to the metrics
for fans in data["Fans"]:
name = fans["Name"] + "_speed"
if name not in metric_map:
metric_map[name] = Gauge(name, "Fan Speed")
c = metric_map[name]
c.set(fans["Reading"])
h = fans["Status"]["Health"]
name = fans["Name"] + "_health"
if name not in metric_map:
metric_map[name] = Gauge(name, "Fan Health Status")
c = metric_map[name]
status = metric_map["fan_status"]
if(h == "OK"):
temp = 0
elif(h == "Warning"):
temp = 1
else:
temp = 2
if(status < temp):
status = temp
metric_map["fan_status"] = status
c.set(temp)
# Set values to metrics
temp_health_metric = metric_map[temp_health]
fan_health_metric = metric_map[fan_health]
status = metric_map["temp_status"]
temp_health_metric.set(status)
status = metric_map["fan_status"]
fan_health_metric.set(status)
|
NVSM-master
|
NVSM-Prometheus/nvsm_exporters/thermal_exporter.py
|
# SPDX-License-Identifier: MIT
from bobber import __version__
from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='nvidia-bobber',
version=__version__,
description='Containerized testing of system components that impact AI workload performance',
long_description=long_description,
packages=['bobber',
'bobber/lib',
'bobber/lib/analysis',
'bobber/lib/docker',
'bobber/lib/system',
'bobber/lib/tests'],
include_package_data=True,
package_data={'': ['lib/docker/Dockerfile',
'test_scripts/call_dali_multi.sh',
'test_scripts/dali_multi.sh',
'test_scripts/fio_fill_single.sh',
'test_scripts/fio_multi.sh',
'test_scripts/mdtest_multi.sh',
'test_scripts/nccl_multi.sh',
'test_scripts/setup_fio.sh']},
license='MIT',
python_requires='>=3.6',
entry_points={
'console_scripts': ['bobber=bobber.bobber:main']
},
install_requires=[
'docker >= 4.3.1',
'numpy >= 1.9.5',
'pyyaml >= 5.4.0',
'tabulate >= 0.8.7',
'six>=1.15.0'
]
)
|
Bobber-main
|
setup.py
|
# SPDX-License-Identifier: MIT
import bobber.lib.docker
import json
from argparse import ArgumentParser, ArgumentTypeError, Namespace
from copy import copy
from bobber import __version__
from bobber.lib.constants import (
BASELINES,
BUILD,
DGX_2,
DGX_A100_DUAL,
DGX_A100_SINGLE,
EXPORT,
CAST,
LOAD,
PARSE_RESULTS,
RUN_ALL,
RUN_DALI,
RUN_NCCL,
RUN_STG_BW,
RUN_STG_IOPS,
RUN_STG_125K,
RUN_STG_META,
SYSTEMS,
READ_PATTERNS,
WRITE_PATTERNS
)
from bobber.lib.analysis import parse_results
from bobber.lib.system.file_handler import create_directory
from bobber.lib.tests import run_tests
from typing import NoReturn
def unique_hosts(hosts: str) -> str:
"""
Verify all hosts are unique.
If more than one host is passed to the hosts parameter, ensure no two hosts
are identical as Bobber can't be run concurrently on the same host.
Parameters
----------
hosts : str
A ``string`` of the comma-separated hosts from the user, such as
'host1,host2,host3,...'
Returns
-------
str
Returns a ``string`` of the original hosts list if all hosts are
unique.
Raises
------
ArgumentTypeError
Raises an ``ArgumentTypeError`` if any of the passed hosts are
identical.
"""
host_list = hosts.split(',')
if len(host_list) != len(list(set(host_list))):
raise ArgumentTypeError('Hosts must be unique')
return hosts
def parse_args(version: str) -> Namespace:
"""
Parse arguments passed to the application.
A custom argument parser handles multiple commands and options to launch
the desired function.
Parameters
----------
version : string
A ``string`` of the Bobber version.
Returns
-------
Namespace
Returns a ``Namespace`` of all of the arguments that were parsed from
the application during runtime.
"""
parser = ArgumentParser(f'Bobber Version: {version}')
parser.add_argument('--version', action='version', version=__version__)
# Required positional command subparser which should be specified first
commands = parser.add_subparsers(dest='command', metavar='command')
commands_parent = ArgumentParser(add_help=False)
# More general options which apply to a majority of the running commands
# Note that all arguments prepended with '--' are optional
commands_parent.add_argument('log_path', metavar='log-path', help='Path '
'used to store log files on the head node')
commands_parent.add_argument('hosts', help='Comma-separated list of '
'hostnames or IP addresses',
type=unique_hosts)
commands_parent.add_argument('--config-path', help='Read a JSON config '
'file with expected parameters and use those '
'values for testing. Ignores all other '
'optional flags')
commands_parent.add_argument('--gpus', help='Number of GPUs contained '
'within a system or systems under test '
'(heterogeneous counts not supported)',
type=int)
commands_parent.add_argument('--compute-gid', help='The compute gid. '
'defaults to 0 - check with "show_gids" '
'command. A non-default gid is needed for '
'Ethernet (frequently gid 3)', type=int,
default=0)
commands_parent.add_argument('--nccl-tc', help='NCCL setting required to '
'use prio3 traffic for Ethernet. Set to 106 '
'for Ethernet, and do not set for IB.',
type=int)
commands_parent.add_argument('--batch-size-sm', help='Batch size to use '
'with DALI data ingest tests for small '
'images', type=int)
commands_parent.add_argument('--batch-size-lg', help='Batch size to use '
'with DALI data ingest tests for large '
'images', type=int)
commands_parent.add_argument('--nccl-max', help='Specify the maximum data '
'size to test with NCCL, in Gigabytes '
'(default is 1 GB)', type=int)
commands_parent.add_argument('--nccl-ib-hcas', help='Specify the list of '
'interfaces to use for NCCL test multinode '
'communication', default='')
commands_parent.add_argument('--ssh-iface', help='Specify ssh interface '
'for the system(s) under test ', default='')
commands_parent.add_argument('--no-direct', help='Disable running with '
'direct IO for applications that support it',
action='store_true')
commands_parent.add_argument('--io-depth', help='Customize the IO depth '
'for direct IO testing', type=int, default=16)
commands_parent.add_argument('--bw-threads', help='Maximum number of '
'threads to use for bandwidth tests',
type=int)
commands_parent.add_argument('--125k-threads', dest='stg_125k_threads',
help='Maximum number of threads to use for '
'125K IO size tests', type=int)
commands_parent.add_argument('--iops-threads', help='Maximum number of '
'threads to use for iops tests', type=int)
commands_parent.add_argument('--read-pattern', help='Specify IO pattern '
'for fio read tests. Supported values: '
'read, randread. Defaults to read.',
default='read',
choices=READ_PATTERNS)
commands_parent.add_argument('--write-pattern', help='Specify IO pattern '
'for fio write tests. Supported values: '
'write, randwrite. Defaults to write.',
default='write',
choices=WRITE_PATTERNS)
commands_parent.add_argument('--iterations', help='Number of iterations to'
' execute per test - a seperate log file will'
' be generated for each iteration', type=int,
default=10)
commands_parent.add_argument('--sweep', help='If present, will run all '
'tests for all specified iterations from a '
'single system to the number of systems '
'specified in the --hosts flag, with a step '
'of a single system (so, 3 systems specified '
'would result in tests for 1, 2, and 3 '
'systems)', action='store_true')
commands_parent.add_argument('--system', help='If system is specified, '
'iops-threads, 125k-threads, bw-threads, '
'gpus, batch size, and network interface '
'names are given default values - override '
'by specifying the flags you\'d prefer to '
'override, ignore the flags you are ok with '
'using defaults for '
'supported systems: dgx-a100-single, '
'dgx-a100-dual, and dgx-2 for now. -single '
'is used for a system with a single storage '
'NIC, and -dual is used for a system with two'
' storage NICs', choices=SYSTEMS.keys())
commands_parent.add_argument('--stg-extra-flags', help='Experimental - '
'add extra flags to stg tests (currently '
'supported - stg-bw and stg-iops). If '
'providing more than one flag, wrap entire '
'set in quotes')
commands_parent.add_argument('--pause', help='Pause between tests for N '
'seconds to ensure any activity is finished '
'before the next test begins. Defaults to 0 '
'(no pause).', type=int, default=0)
# Create the test initiation commands with the general options above
commands.add_parser(RUN_ALL, help='Run all tests',
parents=[commands_parent])
commands.add_parser(RUN_DALI, help='Run DALI tests only',
parents=[commands_parent])
commands.add_parser(RUN_NCCL, help='Run NCCL tests only',
parents=[commands_parent])
commands.add_parser(RUN_STG_BW, help='Run storage bandwidth test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_125K, help='Run storage 125 IO size test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_IOPS, help='Run storage IOPS test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_META, help='Run storage metadata test only',
parents=[commands_parent])
# Options specific to exporting the containers
export = commands.add_parser(EXPORT, help='Export the container for '
'multisystem tests')
# Options specific to parsing the results
parse = commands.add_parser(PARSE_RESULTS, help='Parse and display results'
'from the log files')
parse.add_argument('log_path', metavar='log-path', help='Path to saved '
'logfile location')
parse.add_argument('--json-filename', help='Specify the filename to use '
'for saving the JSON data. If not specified, the JSON '
'data will not be saved.', default=None, type=str)
parse.add_argument('--override-version-check', help='Optionally skip the '
'version check to ensure the same version of Bobber '
'was used for all tests.', action='store_true')
parse.add_argument('--compare-baseline', help='Compare the values produced'
' by a test run against a pre-defined baseline to '
'verify performance meets an acceptable threshold. '
'This command is ignored if the --custom-baseline flag '
'is used.',
choices=BASELINES)
parse.add_argument('--custom-baseline', help='Compare against a custom '
'baseline to verify performance meets an acceptable '
'threshold. This command overrides the '
'--compare-baseline flag.', type=str)
parse.add_argument('--baseline-tolerance', help='The percentage of '
'tolerance to include while comparing results against '
'a baseline. For example, if the desire is to allow '
'results to be within 5%% of the baseline and still '
'pass, enter "5" for the tolerance. This will only '
'measure tolerance below the result and will not punish'
' if numbers are higher than the baseline above the '
'tolerance level. This value is ignored if not running '
'the baseline comparison. Defaults to 0 tolerance.',
type=int, default=0)
parse.add_argument('--verbose', help='Display text-based information for '
'each system count in addition to the table.',
action='store_true')
# Options specific to building the containers
build = commands.add_parser(BUILD, help='Build the container')
# Options specific to casting the containers
cast = commands.add_parser(CAST, help='Start the container')
cast.add_argument('storage_path', metavar='storage-path', help='Path at '
'which the filesystem under test is mounted')
cast.add_argument('--ignore-gpu', help='Start the Bobber container '
'without GPUs', action='store_true')
# Options specific to loading a Docker image from a local binary
load = commands.add_parser(LOAD, help='Load a container from a local '
'binary')
load.add_argument('filename', help='Filename of local *.tar file of '
'the image to load')
return parser.parse_args()
def bobber_version() -> str:
"""
Find the Bobber version.
Read the version of Bobber from the VERSION file and return it as a
trimmed string.
Returns
-------
string
Returns a ``string`` of the version number without any whitespace.
"""
return __version__.strip()
def load_from_config(config_path: str) -> Namespace:
"""
Load a JSON config file and use those values.
If the --config-path flag is passed, the values should be read directly
from that file and used for a new test.
Parameters
----------
config_path : string
A ``string`` pointing to the JSON config file used during a previous
test.
Returns
-------
Namespace
Returns a ``Namespace`` of all previous settings to use for a new test
pass.
"""
with open(config_path, 'r') as config:
settings = json.loads(config.read())
return Namespace(**settings)
def save_config(args: Namespace) -> NoReturn:
"""
Save the settings as JSON.
The settings should be saved in the log directory as a JSON object to allow
a test to be reproduced later on with identical parameters.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings that are used for a test pass.
"""
settings = vars(args)
with open(f'{args.log_path}/command_parameters.json', 'w') as fp:
fp.write(json.dumps(settings))
def load_settings(args: Namespace) -> Namespace:
"""
Load default settings prior to overriding requested flags.
If the --config-path flag is specified pointing to a JSON file with
settings used from a previous run, all values will be read directly from
that run and used for a new run.
If the --config-path flag is not specified, new settings will be puled from
the CLI. While specifying the --system flag, several default parameters are
populated. These values need to be loaded into the args object first to
avoid them from being lost. By reading all other values specified in the
args object, the new arguments object should be updated. By default, the
--system variables are used, but in case of any collisions, the values
passed by the user are taken.
Parameters
----------
args : Namespace
The arguments that were passed by the user from the CLI.
Returns
-------
Namespace
Returns a ``Namespace`` of the final settings to use for all flags
based on the system defaults and the user-specified values.
"""
if args.config_path:
return load_from_config(args.config_path)
# Create a copy of the arguments so they aren't lost while setting the
# defaults from the --system flag.
args_copy = copy(args)
# First set the default values provided by the system.
if args.system:
for key, value in SYSTEMS[args.system].items():
setattr(args_copy, key, value)
# Capture any other arguments that were passed, and override the defaults
# if specified.
for arg in vars(args):
if getattr(args, arg):
setattr(args_copy, arg, getattr(args, arg))
args = copy(args_copy)
if args.no_direct:
setattr(args, 'direct', 0)
else:
setattr(args, 'direct', 1)
return args
def execute_command(args: Namespace, version: str) -> NoReturn:
"""
Execute a specific command from Bobber.
Call and run the command that was passed from the user via the CLI.
Parameters
----------
args : Namespace
A ``Namespace`` of all of the flags that were passed via the CLI.
version : string
A ``string`` of the Bobber version.
"""
if args.command == PARSE_RESULTS:
parse_results.main(args.log_path, args.compare_baseline,
args.custom_baseline, args.baseline_tolerance,
args.verbose, args.override_version_check,
args.json_filename)
elif args.command == BUILD:
bobber.lib.docker.build(version)
elif args.command == EXPORT:
bobber.lib.docker.export(version)
elif args.command == CAST:
bobber.lib.docker.cast(args.storage_path, args.ignore_gpu, version)
elif args.command == LOAD:
bobber.lib.docker.load(args.filename)
else:
# Update the version to be used in filenames
version_underscore = version.replace('.', '_')
args = load_settings(args)
create_directory(args.log_path)
save_config(args)
run_tests.test_selector(args, version_underscore)
def main() -> NoReturn:
"""
Launch a test with the Bobber framework.
Launch a number of quick benchmarks to measure single and multi-node
performance for AI-related systems.
"""
version = bobber_version()
args = parse_args(version)
execute_command(args, version)
if __name__ == "__main__":
main()
|
Bobber-main
|
bobber/bobber.py
|
# SPDX-License-Identifier: MIT
from bobber.__version__ import __version__
|
Bobber-main
|
bobber/__init__.py
|
# SPDX-License-Identifier: MIT
__version__ = '6.3.1'
|
Bobber-main
|
bobber/__version__.py
|
# SPDX-License-Identifier: MIT
BUILD = 'build'
EXPORT = 'export'
CAST = 'cast'
LOAD = 'load'
PARSE_RESULTS = 'parse-results'
RUN_ALL = 'run-all'
RUN_DALI = 'run-dali'
RUN_NCCL = 'run-nccl'
RUN_STG_BW = 'run-stg-bw'
RUN_STG_IOPS = 'run-stg-iops'
RUN_STG_125K = 'run-stg-125k'
RUN_STG_META = 'run-stg-meta'
DGX_A100_SINGLE = {
'gpus': 8,
'bw_threads': 16,
'stg_125k_threads': 16,
'iops_threads': 200,
'batch_size_sm': 512,
'batch_size_lg': 256,
'ssh_iface': 'enp226s0',
'nccl_ib_hcas': 'mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_6,mlx5_7',
'nccl_max': 4
}
DGX_A100_DUAL = {
'gpus': 8,
'bw_threads': 16,
'stg_125k_threads': 16,
'iops_threads': 200,
'batch_size_sm': 512,
'batch_size_lg': 256,
'ssh_iface': 'enp226s0',
'nccl_ib_hcas': 'mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_6,mlx5_7,mlx5_8,mlx5_9',
'nccl_max': 4
}
DGX_2 = {
'gpus': 16,
'bw-threads': 16,
'stg_125k_threads': 16,
'batch-size-sm': 150,
'batch-size-lg': 75,
'iops-threads': 80,
'ssh-iface': 'enp6s0',
'nccl-ib-hcas':
'mlx5_13,mlx5_15,mlx5_17,mlx5_19,mlx5_3,mlx5_5,mlx5_7,mlx5_9',
'nccl-max': 1
}
SYSTEMS = {
'dgx-a100-single': DGX_A100_SINGLE,
'dgx-a100-dual': DGX_A100_DUAL,
'dgx-2': DGX_2
}
READ_PATTERNS = {
'read',
'randread'
}
WRITE_PATTERNS = {
'write',
'randwrite'
}
# Baseline Results
# This is considered a minimum value that tests should hit in order to be
# verified the system has been configured properly for HPC and AI workloads.
SINGLE_DGX_STATION_BASELINE = {
'systems': {
'1': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 1200000000,
'write': 1000000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 100000,
'write': 100000
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 70
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 2000,
'3840x2160 standard jpg': 300,
'800x600 tfrecord': 2000,
'3840x2160 tfrecord': 250
}
}
}
}
DGX_A100_POD_BASELINE = {
'systems': {
'1': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 2250000000,
'write': 875000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 87500,
'write': 16250
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 230
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 2000,
'3840x2160 standard jpg': 1000,
'800x600 tfrecord': 4000,
'3840x2160 tfrecord': 1000
}
},
'2': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 4500000000,
'write': 1750000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 175000,
'write': 32500
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 180
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 4000,
'3840x2160 standard jpg': 2000,
'800x600 tfrecord': 8000,
'3840x2160 tfrecord': 2000
}
},
'3': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 6750000000,
'write': 2625000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 262500,
'write': 48750
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 180
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 6000,
'3840x2160 standard jpg': 3000,
'800x600 tfrecord': 12000,
'3840x2160 tfrecord': 3000
}
},
'4': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 9000000000,
'write': 3500000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 350000,
'write': 65000
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 180
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 8000,
'3840x2160 standard jpg': 4000,
'800x600 tfrecord': 16000,
'3840x2160 tfrecord': 4000
}
},
'5': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 11250000000,
'write': 4375000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 437500,
'write': 81250
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 180
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 20000,
'3840x2160 standard jpg': 5000,
'800x600 tfrecord': 20000,
'3840x2160 tfrecord': 5000
}
},
'6': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 13500000000,
'write': 5250000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 525000,
'write': 97500
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 180
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 24000,
'3840x2160 standard jpg': 6000,
'800x600 tfrecord': 24000,
'3840x2160 tfrecord': 6000
}
},
'7': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 15750000000,
'write': 6125000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 612500,
'write': 113750
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 180
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 28000,
'3840x2160 standard jpg': 7000,
'800x600 tfrecord': 28000,
'3840x2160 tfrecord': 7000
}
},
'8': {
'bandwidth': {
# FIO BW speed in bytes/second
'read': 18000000000,
'write': 7000000000
},
'iops': {
# FIO IOPS speed in ops/second
'read': 700000,
'write': 130000
},
'nccl': {
# NCCL maximum bus bandwidth in GB/s
'max_bus_bw': 180
},
'dali': {
# DALI average speed in images/second
'800x600 standard jpg': 32000,
'3840x2160 standard jpg': 8000,
'800x600 tfrecord': 32000,
'3840x2160 tfrecord': 8000
}
}
}
}
BASELINES = {
'single-dgx-station-baseline': SINGLE_DGX_STATION_BASELINE,
'dgx-a100-pod-baseline': DGX_A100_POD_BASELINE
}
|
Bobber-main
|
bobber/lib/constants.py
|
# SPDX-License-Identifier: MIT
# This file contains a list of exit codes for debugging
SUCCESS = 0 # Successful termination
BASELINE_FAILURE = 10 # Performance did not meet criteria
MISSING_LOG_FILES = 20 # Parsing directory with no logs
DOCKER_BUILD_FAILURE = 30 # Failure building Docker image
DOCKER_COMMUNICATION_ERROR = 31 # Unable to communicate with Docker
CONTAINER_NOT_RUNNING = 32 # Bobber container not running
NVIDIA_RUNTIME_ERROR = 33 # NVIDIA container runtime not found
CONTAINER_VERSION_MISMATCH = 34 # Container different from application
|
Bobber-main
|
bobber/lib/exit_codes.py
|
# SPDX-License-Identifier: MIT
|
Bobber-main
|
bobber/lib/__init__.py
|
# SPDX-License-Identifier: MIT
import docker
from bobber.lib.docker.management import DockerManager
manager = DockerManager()
# Map the instance methods to allow importing as "bobber.docker.<instance>"
# in other modules.
build = manager.build
cast = manager.cast
execute = manager.execute
export = manager.export
load = manager.load
|
Bobber-main
|
bobber/lib/docker/__init__.py
|
# SPDX-License-Identifier: MIT
import docker
import os
import sys
from bobber.__version__ import __version__ as version
from bobber.lib.exit_codes import (CONTAINER_NOT_RUNNING,
CONTAINER_VERSION_MISMATCH,
DOCKER_BUILD_FAILURE,
DOCKER_COMMUNICATION_ERROR,
NVIDIA_RUNTIME_ERROR)
from bobber.lib.system.file_handler import update_log
from docker.models.containers import Container
from typing import NoReturn, Optional
class DockerManager:
"""
Build, launch, and execute commands for Docker containers.
The DockerManager provides a single interface accessible from the entire
Bobber package in which to communicate with Docker containers. The class
provides the ability to build new containers based on the provided
Dockerfile, launch the container with necessary settings for tests, and
execute commands inside the launched container to run tests. An instance
of this class is created in the bobber.lib.docker.__init__.py module which
can be access from other modules without re-instantiating the class.
"""
def __init__(self) -> NoReturn:
try:
self.client = docker.from_env()
self.cli = docker.APIClient(timeout=600)
except docker.errors.DockerException as e:
if 'error while fetching server api version' in str(e).lower():
print('Error: Could not communicate with the Docker daemon.')
print('Ensure Docker is running with "systemctl start docker"')
sys.exit(DOCKER_COMMUNICATION_ERROR)
def _build_if_not_built(self, tag: str, bobber_version: str) -> NoReturn:
"""
Build the image if not built already.
Check if an image exists for the local version of Bobber. If not, build
the image immediately.
Parameters
----------
tag : string
A ``string`` of the Bobber image name, such as
'nvidia/bobber:5.0.0'.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
"""
try:
self.client.images.get(tag)
except docker.errors.ImageNotFound:
print(f'Image {tag} not built, building now...')
self.build(bobber_version)
def get_tag(self, bobber_version: str) -> str:
"""
Create the image name.
Build the full image name including the tag, such as
'nvidia/bobber:5.0.0'.
Parameters
----------
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
Returns
-------
str
Returns a ``string`` of the full image name plus tag, such as
'nvidia/bobber:5.0.0'.
"""
return f'nvidia/bobber:{bobber_version}'
def cast(self, storage_path: str, ignore_gpu: bool,
bobber_version: str) -> NoReturn:
"""
Launch a container with necessary settings.
Launch a Bobber image with various settings required to initiate the
testing framework, including attaching GPUs, starting an SSH daemon,
setting the container to privileged mode, and attaching a filesystem
to be accessible inside the container.
The launched container will be based off of the Bobber image for the
current version of the application. If the image does not yet exist,
it will be built automatically. The launched container is named
'bobber'.
Parameters
----------
storage_path : string
A ``string`` of the absolute path to the storage location to test
against, such as `/mnt/storage`.
ignore_gpu : boolean
When `True`, launches the container without GPU resources. Defaults
to `False`.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
"""
tag = self.get_tag(bobber_version)
self._build_if_not_built(tag, bobber_version)
runtime = None
if not ignore_gpu:
runtime = 'nvidia'
try:
self.client.containers.run(
tag,
'bash -c "/usr/sbin/sshd; sleep infinity"',
detach=True,
auto_remove=True,
ipc_mode='host',
name='bobber',
network_mode='host',
privileged=True,
shm_size='1G',
runtime=runtime,
ulimits=[
docker.types.Ulimit(name='memlock',
soft=-1,
hard=-1),
docker.types.Ulimit(name='stack',
soft=67108864,
hard=67108864)
],
volumes={
f'{storage_path}': {
'bind': '/mnt/fs_under_test',
'mode': 'rw'
}
}
)
except docker.errors.APIError as e:
if 'Unknown runtime specified nvidia' in str(e):
print('NVIDIA container runtime not found. Ensure the latest '
'nvidia-docker libraries and NVIDIA drivers are '
'installed.')
sys.exit(NVIDIA_RUNTIME_ERROR)
def export(self, bobber_version: str) -> NoReturn:
"""
Save an image as a tarball.
To make it easy to transfer an image to multiple machines, the image
can be saved as a tarball which can be copied directly to a remote
device. On the other device, run the "load" command to load the copied
tarball.
Parameters
----------
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
"""
tag = self.get_tag(bobber_version)
self._build_if_not_built(tag, bobber_version)
filename = tag.replace('/', '_').replace(':', '_')
print(f'Exporting {tag} to "{filename}.tar". This may take a while...')
image = self.cli.get_image(tag)
with open(f'{filename}.tar', 'wb') as image_file:
for chunk in image:
image_file.write(chunk)
print(f'{tag} saved to {filename}.tar')
def build(self, bobber_version: str) -> NoReturn:
"""
Build the image on the Dockerfile.
Build a new image based on the Dockerfile named
'nvidia/bobber:{version}'.
Parameters
----------
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
"""
tag = self.get_tag(bobber_version)
print('Building a new image. This may take a while...')
# Set the path to the repository's parent directory.
path = os.path.dirname(os.path.abspath(__file__))
path = '/'.join(path.split('/')[:-2])
output = self.cli.build(path=path,
dockerfile='lib/docker/Dockerfile',
tag=tag,
decode=True)
for line in output:
if 'error' in line.keys():
print(line['error'].rstrip())
print(f'{tag} build failed. See error above.')
sys.exit(DOCKER_BUILD_FAILURE)
if 'stream' in line.keys() and line['stream'].strip() != '':
print(line['stream'].rstrip())
print(f'{tag} successfully built')
def load(self, filename: str) -> NoReturn:
"""
Load a Docker image from a tarball.
If a Bobber image was saved as a tarball using the "export" command, it
can be loaded on the system using the "load" command.
Parameters
----------
filename : string
A ``string`` of the filename for the local tarball to load, such as
'./nvidia_bobber_5.0.0.tar'.
"""
print(f'Importing {filename}. This may take a while...')
with open(filename, 'rb') as image_file:
self.client.images.load(image_file)
def execute(self, command: str, environment: Optional[dict] = None,
log_file: Optional[str] = None) -> NoReturn:
"""
Execute a command against the running container.
Assuming the Bobber container is already launched from the "cast"
command, execute a specific command and stream the output to the
terminal. Optionally specify a dictionary with any necessary
environment variables and a log file to save the output to.
Parameters
----------
command : string
A ``string`` of the command to run inside the container.
environment : dict (Optional)
A ``dictionary`` of environment variables to use where the keys are
the name of the variable and the values are the corresponding value
to set.
log_file : string (Optional)
A ``string`` of the path and filename to optionally save output to.
"""
if not self.running:
print('Bobber container not running. Launch a container with '
'"bobber cast" prior to running any tests.')
sys.exit(CONTAINER_NOT_RUNNING)
bobber = self.client.containers.get('bobber')
if not self.version_match(bobber):
print('Bobber container version mismatch.')
print('Kill the running Bobber container with "docker kill bobber"'
' and re-cast a new container with "bobber cast" prior to '
'running any tests.')
sys.exit(CONTAINER_VERSION_MISMATCH)
result = bobber.exec_run(
command,
demux=False,
environment=environment,
stream=True
)
# Continually print STDOUT and STDERR until there is nothing left
while True:
try:
output = next(result.output).decode('ascii')
print(output.rstrip())
if log_file:
update_log(log_file, output)
# Usually only happens for terminating characters at the end of
# streams
except UnicodeDecodeError:
print(result.output)
except StopIteration:
break
def version_match(self, container: Container) -> bool:
"""
Determine if the Bobber container version matches the application.
The running Bobber container version must match the local Bobber
application version to ensure all tests will function properly.
Parameters
----------
container : Container
A ``Container`` object representing the running Bobber image.
Returns
-------
bool
Returns `True` when the versions match and `False` when not.
"""
if f'nvidia/bobber:{version}' not in container.image.tags:
return False
return True
@property
def running(self) -> bool:
"""
Determine if the Bobber container is running on the system.
Check to see if the current version of the Bobber container is running
on the local machine and return the status. This method can be used to
determine whether or not to run a command that depends on the container
being launched.
Returns
-------
bool
Returns `True` when the container is running and `False` when not.
"""
try:
bobber = self.client.containers.get('bobber')
except docker.errors.NotFound:
return False
else:
return True
|
Bobber-main
|
bobber/lib/docker/management.py
|
# SPDX-License-Identifier: MIT
from functools import wraps
from typing import NoReturn
def average_decorator(func: 'method') -> float:
"""
A simple wrapper to calculate the average of a list.
This wrapper can be used on any function or method which returns a list of
ints or floats and calculates the average of those values. If the average
can't be calculated for any reason, the value will default to 0.0.
Parameters
----------
func : function/method
A function to be wrapped with the average decorator.
Returns
-------
float
Returns a ``float`` of the final average value from the list.
"""
@wraps(func)
def wrapper(*args):
value = func(*args)
try:
return sum(value) / len(value)
except (TypeError, ValueError, ZeroDivisionError):
return 0.0
return wrapper
class AggregateResults:
"""
Determine the aggregate values for all results.
Bobber test runs typically include multiple iterations of all tests in an
attempt to eliminiate noise. In order to find the true result, all
iterations from a single test pass are averaged together. This is done on a
per-system count level where all N-iterations of the single-node tests are
aggregated together, then all N-iterations of the two-node tests (if
applicable) are aggregated together, and so on.
This class has a few helper methods to make it easy to output all data to
both JSON format and a string representing the results.
Parameters
----------
read_bw : dict
A ``dictionary`` containing all of the fio read bandwidth results for
N-systems.
write_bw : dict
A ``dicitonary`` containing all of the fio write bandwidth results for
N-systems.
read_bw_params : dict
A ``dictionary`` of the parameters used during the fio read bandwdith
tests.
write_bw_params : dict
A ``dictionary`` of the parameters used during the fio write bandwidth
tests.
read_iops : dict
A ``dictionary`` containing all of the fio read iops results for
N-systems.
write_iops : dict
A ``dictionary`` containing all of the fio write iops results for
N-systems.
read_iops_params : dict
A ``dictionary`` of the parameters used during the fio read iops tests.
write_iops_params : dict
A ``dictionary`` of the parameters used during the fio write iops
tests.
read_125k_bw : dict
A ``dictionary`` containing all of the fio 125k read bandwidth results
for N-systems.
write_125k_bw : dict
A ``dictionary`` containing all of the fio 125k write bandwidth results
for N-systems.
read_125k_bw_params : dict
A ``dictionary`` of the parameters used during the fio 125k read
bandwidth tests.
write_125k_bw_params : dict
A ``dictionary`` of the parameters used during the fio 125k write
bandwidth tests.
max_bw : dict
A ``dictionary`` of the maximum bus bandwidth achieved from NCCL tests.
bytes_sizes : dict
A ``dictionary`` of the byte size used when the maximum bus bandwidth
was achieved for NCCL tests.
dali_results : dict
A ``dictionary`` of the DALI throughput for all image sizes and types
in images/second.
metadata : dict
A ``dictionary`` of the max, min, and mean values for all metadata
operations.
systems : int
An ``int`` for the number of systems the current results represent.
"""
def __init__(self,
read_bw: dict,
write_bw: dict,
read_bw_params: dict,
write_bw_params: dict,
read_iops: dict,
write_iops: dict,
read_iops_params: dict,
write_iops_params: dict,
read_125k_bw: dict,
write_125k_bw: dict,
read_125k_bw_params: dict,
write_125k_bw_params: dict,
max_bw: dict,
bytes_sizes: dict,
dali_results: dict,
metadata: dict,
systems: int) -> NoReturn:
self._read_bw = read_bw
self._read_bw_params = read_bw_params
self._read_iops = read_iops
self._read_iops_params = read_iops_params
self._125k_read_bw = read_125k_bw
self._125k_read_bw_params = read_125k_bw_params
self._write_bw = write_bw
self._write_bw_params = write_bw_params
self._write_iops = write_iops
self._write_iops_params = write_iops_params
self._125k_write_bw = write_125k_bw
self._125k_write_bw_params = write_125k_bw_params
self._max_bw = max_bw
self._bytes_sizes = bytes_sizes
self._dali_results = dali_results
self._metadata = metadata
self._num_systems = systems
def __str__(self) -> str:
"""
A helper function to display results in human-readable text.
Find the aggregate results for each test for N-systems and return the
final output as a string, similar to the following:
Systems tested: 1
Aggregate Read Bandwidth: 1.595 GB/s
Aggregate Write Bandwidth: 1.232 GB/s
Aggregate Read IOPS: 136.5 k IOPS
Aggregate Write IOPS: 135.0 k IOPS
Aggregate 125k Read Bandwidth: 1.595 GB/s
Aggregate 125k Write Bandwidth: 1.232 GB/s
NCCL Max Bus Bandwidth: 79.865 at 512.0 MB
Mdtest
Directory creation: 71406.29550000001 ops
Directory stat: 2698234.1525 ops
Directory removal: 16016.5275 ops
File creation: 137218.586 ops
File stat: 2705405.084 ops
File read: 2230275.9365 ops
File removal: 175736.5435 ops
Tree creation: 1546.792 ops
Tree removal: 5878.747 ops
DALI Standard 800x600
Min Speed: 2509.35 images/second (0.727 GB/s)
Avg Speed: 2694.595 images/second (0.78 GB/s)
DALI Standard 3840x2160
Min Speed: 344.078 images/second (1.712 GB/s)
Avg Speed: 430.854 images/second (2.144 GB/s)
DALI TFRecord 800x600
Min Speed: 2508.069 images/second (0.726 GB/s)
Avg Speed: 2665.653 images/second (0.772 GB/s)
DALI TFRecord 3840x2160
Min Speed: 317.276 images/second (1.579 GB/s)
Avg Speed: 376.862 images/second (1.875 GB/s)
Returns
-------
str
Returns a ``string`` of the final aggregate results for N-systems.
"""
values_to_print = [
# [Field name, value, unit]
['Systems tested:', self._num_systems, ''],
['Aggregate Read Bandwidth:', self.average_read_bw, ' GB/s'],
['Aggregate Write Bandwidth:', self.average_write_bw, ' GB/s'],
['Aggregate 125k Read Bandwidth:', self.average_125k_read_bw,
' GB/s'],
['Aggregate 125k Write Bandwidth:', self.average_125k_write_bw,
' GB/s'],
['Aggregate Read IOPS:', self.average_read_iops, 'k IOPS'],
['Aggregate Write IOPS:', self.average_write_iops, 'k IOPS'],
]
output = ''
for item in values_to_print:
field, value, unit = item
if value:
output += f'{field} {value} {unit}\n'
if round(self.max_bus_bandwidth, 3) != 0.0:
output += ('NCCL Max Bus Bandwidth: '
f'{round(self.max_bus_bandwidth, 3)} '
f'at {self.max_bus_bytes / 1024 / 1024} MB')
if self._metadata:
output += '\n'
output += self._metadata_print()
if self._dali_results_print('800x600 standard jpg'):
output += (f"""
DALI Standard 800x600{self._dali_results_print('800x600 standard jpg')}
DALI Standard 3840x2160{self._dali_results_print('3840x2160 standard jpg')}
DALI TFRecord 800x600{self._dali_results_print('800x600 tfrecord')}
DALI TFRecord 3840x2160{self._dali_results_print('3840x2160 tfrecord')}
""")
else:
output += '\n'
return output
def _metadata_print(self) -> str:
"""
Determine and return the metadata results.
Iterate through all of the final metadata results for each operation
type and generate the aggregate number of operations for all
iterations.
Returns
-------
str
Returns a ``string`` of the formated metadata results.
"""
output = 'Mdtest\n'
if self._metadata[self._num_systems] == '':
return ''
for key, values in self._metadata[self._num_systems].items():
output += (f" {key}: {values['mean']} ops\n")
return output
def _dali_results_print(self, size: str) -> str:
"""
Determine and return the DALI results.
Calculate the minimum and average speed in images/second and the
resulting bandwidth for each image type and format and return the
result as a string.
Parameters
----------
size : str
The size and type of image to parse, such as '800x600 tfrecord'.
Returns
-------
str
Returns a ``string`` of the formated DALI results.
"""
try:
dali_results = self._dali_results[self._num_systems]
except KeyError:
return ''
min_speed = round(dali_results[size]['min images/second'], 3)
min_bw = round(dali_results[size]['min bandwidth'] * 1e-9, 3)
avg_speed = round(dali_results[size]['average images/second'], 3)
avg_bw = round(dali_results[size]['average bandwidth'] * 1e-9, 3)
output = (f"""
Min Speed: {min_speed} images/second ({min_bw} GB/s)
Avg Speed: {avg_speed} images/second ({avg_bw} GB/s)""")
return output
@property
def json(self) -> dict:
"""
Generate a JSON representation of the results.
Creating a JSON dump of the results makes it easier for remote tools to
archive or display results in an easily ingestible format, such as
webpages or databases.
Returns
-------
dict
Returns a JSON-parsable ``dictionary`` representation of all of the
results including parameters and units where applicable.
"""
results = {
'systems_tested': self._num_systems,
'bandwidth': {
'read': self._average_read_bw(),
'write': self._average_write_bw(),
'unit': 'bytes/second',
'parameters': {
'read': self._read_bw_params,
'write': self._write_bw_params
}
},
'iops': {
'read': self._average_read_iops(),
'write': self._average_write_iops(),
'unit': 'operations/second',
'parameters': {
'read': self._read_iops_params,
'write': self._write_iops_params
}
},
'125k_bandwidth': {
'read': self._average_125k_read_bw(),
'write': self._average_125k_write_bw(),
'unit': 'operations/second',
'parameters': {
'read': self._125k_read_bw_params,
'write': self._125k_write_bw_params
}
},
'nccl': {
'max_bus_bw': self.max_bus_bandwidth,
'max_bus_bytes': self.max_bus_bytes,
'max_bus_bw_units': 'GB/s'
}
}
try:
results['dali'] = self._dali_results[self._num_systems]
except KeyError:
results['dali'] = {}
return results
@average_decorator
def _average_read_bw(self) -> float:
"""
Returns the average read bandwidth as a ``float`` for all iterations
in B/s. Defaults to 0.0.
"""
try:
return self._read_bw[self._num_systems]
except KeyError:
return 0.0
@property
def average_read_bw(self) -> float:
"""
Returns the average read bandwidth as a ``float`` for all iterations
in GB/s, rounded to the nearest thousandth.
"""
return round(self._average_read_bw() * 1e-9, 3)
@average_decorator
def _average_write_bw(self) -> float:
"""
Returns the average write bandwidth as a ``float`` for all iterations
in B/s. Defaults to 0.0
"""
try:
return self._write_bw[self._num_systems]
except KeyError:
return 0.0
@property
def average_write_bw(self) -> float:
"""
Returns the average write bandwidth as a ``float`` for all iterations
in GB/s, rounded to the nearest thousandth.
"""
return round(self._average_write_bw() * 1e-9, 3)
@average_decorator
def _average_125k_read_bw(self) -> float:
"""
Returns the average 125k read bandwidth as a ``float`` for all
iterations in B/s. Defaults to 0.0.
"""
try:
return self._125k_read_bw[self._num_systems]
except KeyError:
return 0.0
@property
def average_125k_read_bw(self) -> float:
"""
Returns the average 125k read bandwidth as a ``float`` for all
iterations in GB/s, rounded to the nearest thousandth.
"""
return round(self._average_125k_read_bw() * 1e-9, 3)
@average_decorator
def _average_125k_write_bw(self) -> float:
"""
Returns the average 125k write bandwidth as a ``float`` for all
iterations in B/s. Defaults to 0.0
"""
try:
return self._125k_write_bw[self._num_systems]
except KeyError:
return 0.0
@property
def average_125k_write_bw(self) -> float:
"""
Returns the average 125k write bandwidth as a ``float`` for all
iterations in GB/s, rounded to the nearest thousandth.
"""
return round(self._average_125k_write_bw() * 1e-9, 3)
@average_decorator
def _average_read_iops(self) -> float:
"""
Returns the average read IOPS as a ``float`` for all iterations in
ops/second. Defaults to 0.0.
"""
try:
return self._read_iops[self._num_systems]
except KeyError:
return 0.0
@property
def average_read_iops(self) -> float:
"""
Returns the average read IOPS as a ``float`` for all iterations in K
ops/second.
"""
return round(self._average_read_iops() * 1e-3, 3)
@average_decorator
def _average_write_iops(self) -> float:
"""
Returns the average write IOPS as a ``float`` for all iterations in
ops/second. Defaults to 0.0.
"""
try:
return self._write_iops[self._num_systems]
except KeyError:
return 0.0
@property
def average_write_iops(self) -> float:
"""
Returns the average write IOPS as a ``float`` for all iterations in K
ops/second.
"""
return round(self._average_write_iops() * 1e-3, 3)
@property
@average_decorator
def max_bus_bandwidth(self) -> float:
"""
Returns the average of the maximum bandwidth achieved as a ``float``
in NCCL in GB/s. Defaults to 0.0
"""
try:
return self._max_bw[self._num_systems]
except KeyError:
return 0.0
@property
def max_bus_bytes(self) -> float:
"""
Returns the associated byte size for the maximum bandwidth achieved in
NCCL as a ``float``. Defaults to 0.0
"""
try:
return int(max(self._bytes_sizes[self._num_systems],
key=self._bytes_sizes[self._num_systems].count))
except (ValueError, KeyError):
return 0.0
|
Bobber-main
|
bobber/lib/analysis/aggregate_results.py
|
# SPDX-License-Identifier: MIT
import json
import sys
from collections import defaultdict
from glob import glob
from os.path import join
from bobber.lib.exit_codes import MISSING_LOG_FILES, SUCCESS
from bobber.lib.analysis.aggregate_results import AggregateResults
from bobber.lib.analysis.common import (check_bobber_version,
divide_logs_by_systems)
from bobber.lib.analysis.compare_baseline import compare_baseline
from bobber.lib.analysis.dali import parse_dali_file
from bobber.lib.analysis.fio import parse_fio_bw_file, parse_fio_iops_file
from bobber.lib.analysis.meta import parse_meta_file
from bobber.lib.analysis.nccl import parse_nccl_file
from bobber.lib.analysis.table import display_table
from bobber.lib.system.file_handler import write_file
from typing import NoReturn, Optional, Tuple
def get_files(directory: str) -> list:
"""
Read all log files.
Given an input directory as a string, read all log files and return the
filenames including the directory as a list.
Parameters
----------
directory : str
A ``string`` pointing to the results directory.
Returns
-------
list
Returns a ``list`` of ``strings`` of the paths to each log file in the
results directory.
"""
return glob(join(directory, '*.log'))
def parse_fio_bw(log_files: list) -> Tuple[dict, dict, dict, dict]:
"""
Parse all FIO bandwidth logs.
Find each FIO bandwidth log in the results directory and parse the read and
write results and parameters from each log for all system counts.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the paths to each log file in the results
directory.
Returns
-------
tuple
A ``tuple`` of four dictionaries containing the read results, write
results, read parameters, and write parameters, respectively for all
system counts.
"""
read_sys_results = defaultdict(list)
write_sys_results = defaultdict(list)
read_params, write_params = None, None
fio_logs_by_systems = divide_logs_by_systems(log_files, 'stg_bw_iteration')
for systems, files in fio_logs_by_systems.items():
read_sys_results, write_sys_results, read_params, write_params = \
parse_fio_bw_file(files,
systems,
read_sys_results,
write_sys_results)
return read_sys_results, write_sys_results, read_params, write_params
def parse_fio_iops(log_files: list) -> Tuple[dict, dict, dict, dict]:
"""
Parse all FIO IOPS logs.
Find each FIO IOPS log in the results directory and parse the read and
write results and parameters from each log for all system counts.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the paths to each log file in the results
directory.
Returns
-------
tuple
A ``tuple`` of four dictionaries containing the read results, write
results, read parameters, and write parameters, respectively for all
system counts.
"""
read_sys_results = defaultdict(list)
write_sys_results = defaultdict(list)
read_params, write_params = None, None
fio_logs_by_systems = divide_logs_by_systems(log_files,
'stg_iops_iteration')
for systems, files in fio_logs_by_systems.items():
read_sys_results, write_sys_results, read_params, write_params = \
parse_fio_iops_file(files,
systems,
read_sys_results,
write_sys_results)
return read_sys_results, write_sys_results, read_params, write_params
def parse_fio_125k_bw(log_files: list) -> Tuple[dict, dict, dict, dict]:
"""
Parse all FIO 125k bandwidth logs.
Find each FIO 125k bandwidth log in the results directory and parse the
read and write results and parameters from each log for all system counts.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the paths to each log file in the results
directory.
Returns
-------
tuple
A ``tuple`` of four dictionaries containing the 125k read results, 125k
write results, 125k read parameters, and 125k write parameters for all
system counts.
"""
read_sys_results = defaultdict(list)
write_sys_results = defaultdict(list)
read_params, write_params = None, None
fio_logs_by_systems = divide_logs_by_systems(log_files,
'stg_125k_iteration')
for systems, files in fio_logs_by_systems.items():
read_sys_results, write_sys_results, read_params, write_params = \
parse_fio_bw_file(files,
systems,
read_sys_results,
write_sys_results)
return read_sys_results, write_sys_results, read_params, write_params
def parse_nccl(log_files: list) -> Tuple[dict, dict]:
"""
Parse all NCCL logs.
Find the maximum bus bandwidth and resulting byte size for all NCCL files
for all system counts.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the paths to each log file in the results
directory.
Returns
-------
tuple
Returns a ``tuple`` of (``dict``, ``dict``) representing the maximum
bus bandwidth and corresponding byte size for all system counts.
"""
bw_results = defaultdict(list)
bytes_results = defaultdict(list)
nccl_logs_by_systems = divide_logs_by_systems(log_files, 'nccl')
for systems, files in nccl_logs_by_systems.items():
max_bw, byte_size = parse_nccl_file(files, systems)
bw_results[systems] = max_bw
bytes_results[systems] = byte_size
return bw_results, bytes_results
def parse_dali(log_files: list) -> dict:
"""
Parse all DALI logs.
Parse the bandwidth and throughput for all image types and sizes from all
DALI log files.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the paths to each log file in the results
directory.
Returns
-------
dict
Returns a ``dictionary`` of the throughput and bandwidth for all system
counts.
"""
results_dict = {}
dali_logs_by_systems = divide_logs_by_systems(log_files, 'dali')
for systems, files in dali_logs_by_systems.items():
results_dict = parse_dali_file(files, systems, results_dict)
return results_dict
def parse_meta(log_files: list) -> dict:
"""
Parse all metadata logs.
Parse the minimum, maximum, and mean values for all operations in the
metadata log files.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the paths to each log file in the results
directory.
Returns
-------
dict
Returns a ``dictionary`` of the results from various metadata
operations for all system counts.
"""
results_dict = {}
meta_logs_by_systems = divide_logs_by_systems(log_files, 'stg_meta')
for systems, files in meta_logs_by_systems.items():
results_dict = parse_meta_file(files, systems, results_dict)
return results_dict
def save_json(final_dictionary_output: dict, filename: str) -> NoReturn:
"""
Save results to a file.
Save the final JSON data to a file for future reference. If the filename is
not specified, don't save the file.
Parameters
----------
final_dictionary_output : dict
A ``dictionary`` of the final JSON output to save.
filename : str
A ``string`` of the filename to write the JSON data to.
"""
if not filename:
return
with open(filename, 'w') as json_file:
json.dump(final_dictionary_output, json_file)
print(f'JSON data saved to {filename}')
def save_yaml_baseline(final_dictionary_output: dict,
directory: str) -> NoReturn:
"""
Save results as a YAML baseline file.
The parsed results should be saved as a YAML baseline file which can be
used to compare similar systems against existing results. The YAML file
will be saved in the results directory as "baseline.yaml".
Parameters
----------
final_dictionary_output : dict
A ``dictionary`` of the parsed results on a per-system level.
directory : str
A ``string`` of the directory where results are saved.
"""
contents = 'systems:\n'
for systems, results in final_dictionary_output['systems'].items():
dali = results.get('dali', {})
small_jpg = dali.get('800x600 standard jpg', {})
large_jpg = dali.get('3840x2160 standard jpg', {})
small_tf = dali.get('800x600 tfrecord', {})
large_tf = dali.get('3840x2160 tfrecord', {})
contents += f""" {systems}:
bandwidth:
# FIO BW speed in bytes/second
read: {results.get('bandwidth', {}).get('read', 0)}
write: {results.get('bandwidth', {}).get('write', 0)}
iops:
# FIO IOPS speed in ops/second
read: {results.get('iops', {}).get('read', 0)}
write: {results.get('iops', {}).get('write', 0)}
125k_bandwidth:
# FIO 125k BW speed in bytes/second
read: {results.get('125k_bandwidth', {}).get('read', 0)}
write: {results.get('125k_bandwidth', {}).get('write', 0)}
nccl:
# NCCL maximum bus bandwidth in GB/s
max_bus_bw: {results.get('nccl', {}).get('max_bus_bw', 0)}
dali:
# DALI average speed in images/second
800x600 standard jpg: {small_jpg.get('average images/second', 0)}
3840x2160 standard jpg: {large_jpg.get('average images/second', 0)}
800x600 tfrecord: {small_tf.get('average images/second', 0)}
3840x2160 tfrecord: {large_tf.get('average images/second', 0)}
"""
write_file(f'{directory}/baseline.yaml', contents)
def main(directory: str,
baseline: Optional[str] = None,
custom_baseline: Optional[str] = None,
tolerance: Optional[int] = 0,
verbose: Optional[bool] = False,
override_version_check: Optional[bool] = False,
json_filename: Optional[str] = None) -> NoReturn:
"""
Parse all results on a per-system level.
Read all log files from a results directory and iterate through the results
on a per-system level. The results displayed are of the aggregate value for
each system count.
A baseline can be optionally included to compare the results in the output
directory against pre-configured results to verify performance meets
desired levels.
Parameters
----------
directory : str
A ``string`` of the directory where results are located.
baseline : str (optional)
A ``string`` representing the key from the included baselines to
compare results to.
custom_baseline : str (optional)
A ``string`` of the filename to a custom YAML config file to read and
compare results to.
tolerance : int (optional)
An ``integer`` of the tolerance as a percentage below the baseline to
allow results to still be marked as passing.
verbose : bool (optional)
A ``boolean`` that prints additional textual output when `True`.
override_version_check : bool (optional)
A ``boolean`` which skips checking the Bobber version tested when
`True`.
json_filename : str (optional)
A ``string`` of the filename to save JSON data to.
"""
final_dictionary_output = {'systems': {}}
log_files = get_files(directory)
if len(log_files) < 1:
print('No log files found. Please specify a directory containing '
'valid logs.')
print('Exiting...')
sys.exit(MISSING_LOG_FILES)
bobber_version = check_bobber_version(log_files,
override_version_check)
bw_results = parse_fio_bw(log_files)
read_bw, write_bw, read_bw_params, write_bw_params = bw_results
bw_125k_results = parse_fio_125k_bw(log_files)
read_125k_bw, write_125k_bw, read_125k_bw_params, write_125k_bw_params = \
bw_125k_results
iops_results = parse_fio_iops(log_files)
read_iops, write_iops, read_iops_params, write_iops_params = iops_results
metadata = parse_meta(log_files)
max_bw, bytes_sizes = parse_nccl(log_files)
dali_results = parse_dali(log_files)
total_systems = 0
systems = []
for result in [read_bw, read_iops, read_125k_bw, max_bw, dali_results,
metadata]:
try:
total_systems = max(result.keys())
systems = sorted(result.keys())
except ValueError:
continue
else:
break
for system_num in systems:
aggregate = AggregateResults(read_bw,
write_bw,
read_bw_params,
write_bw_params,
read_iops,
write_iops,
read_iops_params,
write_iops_params,
read_125k_bw,
write_125k_bw,
read_125k_bw_params,
write_125k_bw_params,
max_bw,
bytes_sizes,
dali_results,
metadata,
system_num)
final_dictionary_output['systems'][str(system_num)] = aggregate.json
if verbose:
print(aggregate)
final_dictionary_output['total_systems'] = total_systems
final_dictionary_output['bobber_version'] = bobber_version
display_table(final_dictionary_output)
save_yaml_baseline(final_dictionary_output, directory)
save_json(final_dictionary_output, json_filename)
if custom_baseline:
compare_baseline(final_dictionary_output, custom_baseline, tolerance,
custom=True)
elif baseline:
compare_baseline(final_dictionary_output, baseline, tolerance)
|
Bobber-main
|
bobber/lib/analysis/parse_results.py
|
# SPDX-License-Identifier: MIT
import sys
from bobber.lib.constants import BASELINES
from bobber.lib.exit_codes import BASELINE_FAILURE
from bobber.lib.analysis.common import bcolors
from bobber.lib.system.file_handler import read_yaml
from typing import NoReturn, Optional, Tuple
# Map the dicitonary keys in the baseline to human-readable names.
TEST_MAPPING = {
'bandwidth': 'FIO Bandwidth',
'iops': 'FIO IOPS',
'nccl': 'NCCL',
'dali': 'DALI'
}
def metric_passes(expected: float, got: float, tolerance: int) -> bool:
"""
Determine if a test result meets a particular threshold.
Compares the parsed value with the requested baseline for the same test and
return a boolean of whether or not it is greater than expected. If a
tolerance is passed, any value that is N-percent or higher below the
requested tolerance of N will still be marked as passing.
Parameters
----------
expected : float
A ``float`` of the baseline value to compare against.
got : float
A ``float`` of the test result that was parsed.
tolerance : int
An ``int`` of the percentage below the threshold to still mark as
passing.
Returns
-------
bool
Returns a ``boolean`` which evaluates to `True` when the parsed value
is greater than the baseline and `False` otherwise.
"""
if tolerance > 0:
# If user passes a 5% tolerance, multiply the expected value by 5% less
# than current value to get the tolerance.
expected = (1 - tolerance / 100) * expected
if got > expected:
return True
else:
return False
def result_text(result: bool, failures: int) -> Tuple[str, int]:
"""
Color-code the result output.
If the result passes the threshold, it will be marked as PASSing in green
text. Otherwise, it will be marked as FAILing in red text.
Parameters
----------
result : bool
A ``boolean`` which evaluates to `True` when the value meets the
threshold and `False` if not.
failures : int
An ``integer`` of the number of results that have not met the
threshold.
Returns
-------
tuple
Returns a ``tuple`` of (``str``, ``int``) representing the color-coded
text and the number of failures found, respectively.
"""
if result:
output = f'{bcolors.PASS}PASS{bcolors.ENDC}'
else:
failures += 1
output = f'{bcolors.FAIL}FAIL{bcolors.ENDC}'
return output, failures
def evaluate_fio(baselines: dict, results: dict, test_name: str, failures: int,
tolerance: int) -> int:
"""
Evaluate the fio test results against the baseline.
Determine if the fio test results meet the expected threshold and display
the outcome with appropriate units.
Parameters
----------
baselines : dict
A ``dictionary`` of the baseline to compare results against.
results : dict
A ``dictionary`` of the parsed results.
test_name : str
A ``string`` of the name of the test being parsed.
failures : int
An ``integer`` of the number of results that have not met the
threshold.
tolerance : int
An ``int`` of the percentage below the threshold to still mark as
passing.
Returns
-------
int
Returns an ``integer`` of the number of results that have not met the
threshold.
"""
for test, value in baselines.items():
if test_name not in results.keys():
continue
if test_name == 'bandwidth':
unit = '(GB/s)'
expected = value / 1000000000
got = round(results[test_name][test] / 1000000000, 3)
elif test_name == 'iops':
unit = '(k IOPS)'
expected = value / 1000
got = round(results[test_name][test] / 1000, 3)
print(f' {TEST_MAPPING[test_name]} {test.title()} {unit}')
text = f' Expected: {expected}, Got: {got}'
result = metric_passes(expected, got, tolerance)
output, failures = result_text(result, failures)
text += f', Result: {output}'
print(text)
return failures
def evaluate_nccl(baseline: dict, results: dict, failures: int,
tolerance: int) -> int:
"""
Evaluate the NCCL test results against the baseline.
Determine if the NCCL test results meet the expected threshold and display
the outcome with appropriate units.
Parameters
----------
baselines : dict
A ``dictionary`` of the baseline to compare results against.
results : dict
A ``dictionary`` of the parsed results.
failures : int
An ``integer`` of the number of results that have not met the
threshold.
tolerance : int
An ``int`` of the percentage below the threshold to still mark as
passing.
Returns
-------
int
Returns an ``integer`` of the number of results that have not met the
threshold.
"""
if 'max_bus_bw' not in baseline.keys():
return failures
print(' NCCL Max Bus Bandwidth (GB/s)')
expected = baseline['max_bus_bw']
got = results['nccl']['max_bus_bw']
text = f' Expected: {expected}, Got: {got}'
result = metric_passes(expected, got, tolerance)
output, failures = result_text(result, failures)
text += f', Result: {output}'
print(text)
return failures
def evaluate_dali(baselines: dict, results: dict, test_name: str,
failures: int, tolerance: int) -> int:
"""
Evaluate the DALI test results against the baseline.
Determine if the DALI test results meet the expected threshold and display
the outcome with appropriate units.
Parameters
----------
baselines : dict
A ``dictionary`` of the baseline to compare results against.
results : dict
A ``dictionary`` of the parsed results.
test_name : str
A ``string`` of the name of the test being parsed.
failures : int
An ``integer`` of the number of results that have not met the
threshold.
tolerance : int
An ``int`` of the percentage below the threshold to still mark as
passing.
Returns
-------
int
Returns an ``integer`` of the number of results that have not met the
threshold.
"""
for test, value in baselines.items():
if test not in results.keys():
continue
print(f' DALI {test} (images/second)')
expected = value
got = round(results[test]['average images/second'], 3)
text = f' Expected: {expected}, Got: {got}'
result = metric_passes(expected, got, tolerance)
output, failures = result_text(result, failures)
text += f', Result: {output}'
print(text)
return failures
def evaluate_test(baseline: dict, results: dict, system_count: int,
tolerance: int, failures: int) -> int:
"""
Evaluate all tests for N-nodes and compare against the baseline.
The comparison verifies results meet a certain threshold for each system
count in a sweep. For example, in an 8-node sweep, compare the one-node
results to the baseline before comparing the two-node results and so on.
Parameters
----------
baseline : dict
A ``dictionary`` of the baseline to compare results against.
results : dict
A ``dictionary`` of the parsed results.
system_count : int
An ``int`` of the number of systems that were tested for each
comparison level.
tolerance : int
An ``int`` of the percentage below the threshold to still mark as
passing.
failures : int
An ``integer`` of the number of results that have not met the
threshold.
Returns
-------
int
Returns an ``integer`` of the number of results that have not met the
threshold.
"""
for test_name, test_values in baseline.items():
print('-' * 80)
if test_name in ['bandwidth', 'iops']:
failures = evaluate_fio(test_values, results, test_name, failures,
tolerance)
elif test_name == 'nccl':
failures = evaluate_nccl(test_values, results, failures, tolerance)
elif test_name == 'dali':
failures = evaluate_dali(test_values,
results['dali'],
test_name,
failures,
tolerance)
return failures
def compare_baseline(results: dict, baseline: str, tolerance: int,
custom: Optional[bool] = False) -> NoReturn:
"""
Compare a baseline against parsed results.
Pull the requested baseline either from a custom YAML file or one of the
existing baselines included with the application and compare against the
parsed results by checking if the parsed result is greater than the
baseline on a per-system basis.
Parameters
----------
results : dict
A ``dictionary`` of the complete set of results from a parsed
dictionary.
baseline : str
A ``string`` of the baseline to use. This either represents a key from
the included baselines, or a filename to a custom YAML config file to
read.
tolerance : int
An ``int`` of the tolerance as a percentage below the baseline to allow
results to still be marked as passing.
custom : bool (optional)
An optional ``boolean`` that, when `True`, will read in a baseline
passed from a YAML file. If `False`, it will compare against an
included baseline.
"""
failures = 0
print('=' * 80)
print('Baseline assessment')
if custom:
print('Comparing against a custom config')
baseline = read_yaml(baseline)
else:
print(f'Comparing against "{baseline}"')
baseline = BASELINES[baseline]
if tolerance > 0:
print(f'Allowing a tolerance of {tolerance}% below expected to PASS')
for system_count, baseline_results in baseline['systems'].items():
print('=' * 80)
if str(system_count) not in results['systems'].keys():
print(f'No results found for {system_count} system(s)')
print('Skipping...')
continue
print(f' {system_count} System(s)')
failures = evaluate_test(baseline_results,
results['systems'][str(system_count)],
system_count,
tolerance,
failures)
if failures > 0:
print('-' * 80)
print(f'{failures} test(s) did not meet the suggested criteria!')
print('See results above for failed tests and verify setup.')
# Throw a non-zero exit status so any tools that read codes will catch
# that the baseline was not met.
sys.exit(BASELINE_FAILURE)
print('=' * 80)
|
Bobber-main
|
bobber/lib/analysis/compare_baseline.py
|
# SPDX-License-Identifier: MIT
import re
from typing import Tuple
def parse_nccl_file(log_files: list, systems: int) -> Tuple[list, list]:
"""
Find the maximum bus bandwidth and bus bytes from NCCL tests.
Parse the bandwidth at all byte sizes achieved during NCCL tests and match
the maximum bus bandwidth with the corresponding byte size from the
results. Only the maximum and corresponding byte size from each log are
returned to later find the overall average.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the filenames for all NCCL log files in
the results directory.
systems : int
An ``integer`` of the number of systems used during the current test.
Returns
-------
tuple
Returns a ``tuple`` of (``list``, ``list``) containing the maximum bus
bandwidth and the bus bytes, respectively.
"""
max_bus_bw_list = []
bus_bytes_list = []
for log in log_files:
with open(log, 'r') as f:
log_contents = f.read()
out_of_place_results = re.findall('.*float sum.*', log_contents)
results = [line.split() for line in out_of_place_results]
bytes_array = [float(result[0]) for result in results]
bus_bw_array = [float(result[6]) for result in results]
max_bus_bw_list.append(max(bus_bw_array))
max_index = bus_bw_array.index(max(bus_bw_array))
bus_bytes_list.append(bytes_array[max_index])
return max_bus_bw_list, bus_bytes_list
|
Bobber-main
|
bobber/lib/analysis/nccl.py
|
# SPDX-License-Identifier: MIT
|
Bobber-main
|
bobber/lib/analysis/__init__.py
|
# SPDX-License-Identifier: MIT
import re
def _clean_sizes(sizes: list) -> list:
"""
Remove all text from sizes.
The parser to capture sizes of various objects includes 'in bytes: ' in the
string which should be stripped, leaving only numbers.
Parameters
----------
sizes : list
A ``list`` of ``strings`` of sizes of various objects.
Returns
-------
list
Returns a ``list`` of ``integers`` of sizes of various objects.
"""
return [int(size.replace('in bytes: ', '')) for size in sizes]
def _size_parsing(log_contents: str) -> dict:
"""
Capture the image and directory size for image data.
Parse the image and directory size for all images generated using
Imageinary. It is assumed that the image and directory size are identical
for both the TFRecord and standard JPEG images of similar sizes.
Parameters
----------
log_contents : str
A ``string`` of the contents from a DALI log file.
Returns
-------
dict
Returns a ``dictionary`` of image size information for all image sizes
and formats.
Raises
------
ValueError
Raises a ``ValueError`` if the log file does not contain size
information.
"""
results_sub_dict = {
'image size': 0,
'size unit': 'B',
'directory size': 0,
'min images/second': 0,
'average images/second': 0,
'min bandwidth': 0,
'average bandwidth': 0,
'bandwidth unit': 'bytes/second'
}
results = {
'800x600 standard jpg': results_sub_dict.copy(),
'3840x2160 standard jpg': results_sub_dict.copy(),
'800x600 tfrecord': results_sub_dict.copy(),
'3840x2160 tfrecord': results_sub_dict.copy()
}
image_size = re.findall('First image size from .*\n.*', log_contents)
if len(image_size) != 4:
raise ValueError('Error: Incomplete DALI file. Missing information on'
' file sizes')
for line in image_size:
sizes = re.findall(r'in bytes: \d+', line)
if len(sizes) != 2:
raise ValueError('Error: Missing data sizes in DALI log file.')
image_size, directory_size = _clean_sizes(sizes)
if '3840x2160' in line:
results['3840x2160 standard jpg']['image size'] = image_size
results['3840x2160 standard jpg']['directory size'] = \
directory_size
results['3840x2160 tfrecord']['image size'] = image_size
results['3840x2160 tfrecord']['directory size'] = directory_size
elif '800x600' in line:
results['800x600 standard jpg']['image size'] = image_size
results['800x600 standard jpg']['directory size'] = directory_size
results['800x600 tfrecord']['image size'] = image_size
results['800x600 tfrecord']['directory size'] = directory_size
return results
def _average(input_list: list) -> float:
"""
Find the average of a list.
Given a list of numbers, calculate the average of all values in the list.
If the list is empty, default to 0.0.
Parameters
----------
input_list : list
A ``list`` of ``floats`` to find an average of.
Returns
-------
float
Returns a ``float`` of the average value of the list.
"""
try:
return float(sum(input_list) / len(input_list))
except ZeroDivisionError:
return 0.0
def _update_results(image_type_match: dict, results: list) -> dict:
"""
Update image dictionary with throughput and bandwidth.
Find the minimum and average throughput and bandwdith for a particular
image size and type by processing a list of all corresponding results.
Parameters
----------
image_type_match : dict
A ``dictionary`` of the throughput and bandwidth for a particular image
size and type.
results : list
A ``list`` of ``floats`` representing results from the experiment runs.
Returns
-------
dict
An updated ``dictionary`` of the throughput and bandwidth for a
particular image size and type.
"""
size = image_type_match['image size']
image_type_match['min images/second'] = min(results)
image_type_match['average images/second'] = _average(results)
image_type_match['min bandwidth'] = size * min(results)
image_type_match['average bandwidth'] = size * _average(results)
return image_type_match
def _result_parsing(log_contents: str, systems: int, image_results: dict,
log_file: str) -> dict:
"""
Parse the throughput results from the log file.
Given a log file, find all of the results for each of the four test runs
including both standard JPEG and TFRecord formats for 800x600 and 4K
images. Each section starts with 'RUN 1/1' and runs for 11 epochs before
printing 'OK' once complete. The result sections are in a strict order,
allowing us to deterministically match results with the corresponding
image size and type:
0: 800x600 Standard File Read
1: 3840x2160 Standard File Read
2: 800x600 TFRecord
3: 3840x2160 TFRecord
Parameters
----------
log_contents : str
A ``string`` of the contents from a DALI log file.
systems : int
An ``integer`` of the number of systems used during the current test.
image_results : dict
A ``dictionary`` of image size information for all image sizes and
formats.
log_file : str
A ``string`` of the name of the log file being parsed.
Returns
-------
dict
Returns an updated ``dictionary`` of image size information for all
image sizes and formats.
"""
# The result sections are in a strict order, allowing us to
# deterministically match results with the corresponding image size and
# type:
# 0: 800x600 Standard File Read
# 1: 3840x2160 Standard File Read
# 2: 800x600 TFRecord
# 3: 3840x2160 TFRecord
image_type_match = [
image_results['800x600 standard jpg'],
image_results['3840x2160 standard jpg'],
image_results['800x600 tfrecord'],
image_results['3840x2160 tfrecord']
]
test_sections = re.findall(r'RUN 1/1.*?OK', log_contents, re.DOTALL)
if len(test_sections) != 4:
print(f'Warning: Invalid number of results found in {log_file} log '
'file. Skipping...')
return {}
for num, section in enumerate(test_sections):
result_lines = re.findall('.*img/s', section)
all_speeds = []
for line in result_lines:
speed = re.sub('.*speed: ', '', line)
speed = float(speed.replace(' [img/s', ''))
all_speeds.append(speed)
# Per standard practices, the first N results for N systems is treated
# as a warmup and discarded. Occasionally, the timing of results will
# be off, and one node will showcase the 2nd test pass before all nodes
# have finished the first. To accomodate for this, the lowest N results
# are assumed to be the first test pass and are dropped.
all_speeds = sorted(all_speeds)[systems:]
image_type_match[num] = _update_results(image_type_match[num],
all_speeds)
# Rebuild the dictionary based on the updated results.
image_results = {
'800x600 standard jpg': image_type_match[0],
'3840x2160 standard jpg': image_type_match[1],
'800x600 tfrecord': image_type_match[2],
'3840x2160 tfrecord': image_type_match[3]
}
return image_results
def _combine_results(results: list, systems: int) -> dict:
"""
Aggregate all results for N-systems.
Find the average throughput, bandwidth, and size for all iterations
combined and create a single object which can be used to easily reference
results.
Parameters
----------
results : list
A ``list`` of ``dicts`` for all results from a particular test.
systems : int
An ``integer`` of the number of systems used during the current test.
Returns
-------
dict
Returns a ``dictionary`` of the final aggregate results for all
iterations for N-nodes for all image types and sizes.
"""
system_results = {}
for image_type in ['800x600 standard jpg',
'3840x2160 standard jpg',
'800x600 tfrecord',
'3840x2160 tfrecord']:
avg_min_speed, avg_avg_speed = [], []
avg_min_bw, avg_avg_bw = [], []
avg_img_size, avg_dir_size = [], []
for result in results:
if image_type not in result:
continue
avg_min_speed.append(result[image_type]['min images/second'])
avg_avg_speed.append(result[image_type]['average images/second'])
avg_min_bw.append(result[image_type]['min bandwidth'])
avg_avg_bw.append(result[image_type]['average bandwidth'])
avg_img_size.append(result[image_type]['image size'])
avg_dir_size.append(result[image_type]['directory size'])
# Multiply the average in all performance categories by the number of
# systems tested to get an average aggregate throughput result for the
# cluster.
system_results[image_type] = {
'image size': _average(avg_img_size),
'size unit': 'B',
'directory size': _average(avg_dir_size),
'min images/second': _average(avg_min_speed) * systems,
'average images/second': _average(avg_avg_speed) * systems,
'min bandwidth': _average(avg_min_bw) * systems,
'average bandwidth': _average(avg_avg_bw) * systems,
'bandwidth unit': 'bytes/second'
}
return system_results
def parse_dali_file(log_files: list, systems: int, results_dict: dict) -> dict:
"""
Parse the aggregate DALI results for N-systems.
Search through each DALI log for N-systems and find the minimum and average
throughput and bandwidth for all four of the DALI tests of various image
sizes and formats.
Parameters
----------
log_files : list
A ``list`` of ``strings`` where each element is a filepath to a log
file.
systems : int
An ``integer`` of the current number of systems to aggregate results
for.
results_dict : dict
A ``dictionary`` of the aggregate test results for all system counts.
Returns
-------
dict
An updated ``dictionary`` of the aggregate test results including the
newly-parsed results for N-systems.
"""
results = []
for log in log_files:
with open(log, 'r') as f:
log_contents = f.read()
image_results = _size_parsing(log_contents)
results.append(_result_parsing(log_contents,
systems,
image_results,
log))
results_dict[systems] = _combine_results(results, systems)
return results_dict
|
Bobber-main
|
bobber/lib/analysis/dali.py
|
# SPDX-License-Identifier: MIT
import re
from collections import defaultdict
from typing import Tuple
class bcolors:
"""
A helper class to annotate text with colors.
"""
PASS = '\033[92m' # nosec
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[1m'
ENDC = '\033[0m'
def num_systems(log: str) -> int:
"""
Returns an ``integer`` of the number of systems that were tested during a
particular run.
Parameters
----------
log : str
A ``string`` of the filename for a single log.
Returns
-------
int
Returns an ``int`` of the number of systems that were tested for the
given logfile. Defaults to None if not found.
"""
try:
systems = re.findall(r'systems_\d+_', log)
systems = re.findall(r'\d+', systems[0])
return int(systems[0])
except ValueError:
return None
def _bobber_version(log: str) -> str:
"""
Returns a ``string`` representation of the Bobber version tested, such as
'6.3.1'.
Parameters
----------
log : str
A ``string`` of the filename for a single log.
Returns
-------
str
Returns a ``string`` of the Bobber version tested, such as '6.3.1'.
Raises
------
ValueError
Raises a ``ValueError`` if the version cannot be parsed from the log
file.
"""
version = re.findall(r'version_\d+_\d+_\d+', log)
if len(version) != 1:
raise ValueError(f'Could not parse Bobber version from {log} file!')
version = version[0].replace('version_', '')
return version.replace('_', '.')
def check_bobber_version(logs: list, override: bool) -> str:
"""
Ensure the Bobber version matches in all logs being parsed.
As a safeguard to mixing results from different Bobber versions, the
version needs to be checked for all logs to ensure they are equal. By
comparing each new log version with the previous version captured, if all
are equal in the list of logs, then it is guaranteed that the logs are all
the same.
Parameters
----------
logs : list
A ``list`` of strings of all of the log filenames in the directory that
was passed.
override : bool
A ``boolean`` which evaluates to ``True`` when the version-checking
should be skipped.
Returns
-------
str
Returns a ``string`` of the Bobber version being tested.
Raises
------
ValueError
Raises a ``ValueError`` when any log versions don't match.
"""
last_version = None
for log in logs:
version = _bobber_version(log)
if override:
return version
if last_version and version != last_version:
raise ValueError('Error: Only logs using the same Bobber version '
'are allowed in the results directory.')
last_version = version
return version
def _convert_to_bytes(value: str) -> float:
"""
Convert a number to bytes.
Convert a passed number to bytes by parsing the number from the passed
string and multiplying by the appropriate multiplier to convert from a
larger unit to bytes.
Parameters
----------
value : str
A ``string`` of the value to convert to bytes.
Returns
-------
float
Returns a ``float`` of the final value in bytes.
"""
number = float(re.sub('[a-zA-Z]*', '', value))
if 'gib' in value.lower():
return number * 1024 * 1024 * 1024
elif 'g' in value.lower():
return number * 1e9
elif 'mib' in value.lower():
return number * 1024 * 1024
elif 'm' in value.lower():
return number * 1e6
elif 'kib' in value.lower():
return number * 1024
elif 'k' in value.lower():
return number * 1e3
def _fio_command_parse(command: str) -> dict:
"""
Parse the command parameters for fio.
Pull all of the flags and parameters used during a fio run and save them as
a dictionary to make it easier to reference what was used during a test.
Parameters
----------
command : str
A ``string`` of the command used during the fio run.
Returns
-------
dict
Returns a ``dictionary`` of the parameters used during the fio run.
"""
parameter_dict = {}
for parameter in command.split():
# Skip the following parameters as they don't provide meaningful data.
if parameter == '/usr/bin/fio':
continue
key, value = parameter.split('=')
key = key.replace('--', '')
if key in ['blocksize', 'size']:
value = _convert_to_bytes(value)
else:
# Attempt to convert to a int for numerical values. If it fails,
# keep as a string as that's likely intended type.
try:
value = int(value)
except ValueError:
value = str(value)
parameter_dict[key] = value
return parameter_dict
def _compare_dicts(old_results: dict, new_results: dict) -> bool:
"""
Compare testing dictionaries for equality.
Compare the dictionaries for equality while ignoring the 'directory' and
'command' keys since these will always differ amongst tests. If all other
parameters are equal, it is assumed the tests used the same parameters.
Parameters
----------
old_results : dict
A ``dictionary`` of the test parameters used during the
previously-parsed test log.
new_results : dict
A ``dictionary`` of the test parameters used during the test log
currently being parsed.
Returns
-------
bool
Returns a ``boolean`` which evaluates to `True` when all of the
parameters are equal between the two dictionaries and `False` if at
least on parameter is different.
"""
ignore_keys = ['directory', 'command']
old = dict((k, v) for k, v in old_results.items() if k not in ignore_keys)
new = dict((k, v) for k, v in new_results.items() if k not in ignore_keys)
return old == new
def fio_command_details(log_contents: str, old_reads: dict,
old_writes: dict) -> Tuple[dict, dict]:
"""
Parse the command parameters and compare with the previous log.
Pull the fio parameters used for both the read and write commands during
the tests and compare them with the previous log file that was parsed to
ensure all tests being parsed are using the same parameters.
Parameters
----------
log_contents : str
A ``string`` of all the output inside a log file.
old_reads : dict
A ``dictionary`` of the previous read test parameters that were parsed.
old_writes : dict
A ``dictionary`` of the previous write test parameters that were
parsed.
Returns
-------
tuple
Returns a ``tuple`` of (``dict``, ``dict``) where each dictionary are
the parsed read and write parameters, respectively, from the tests.
Raises
------
ValueError
Raises a ``ValueError`` if the fio command type is unexpected or the
parameters differ between two or more tests.
"""
commands = re.findall(r'/usr/bin/fio --rw.*', log_contents)
if len(commands) < 2:
raise ValueError(f'FIO command not found in {log} file!')
for command in commands:
if '--rw=read' in command:
read_params = _fio_command_parse(command)
read_params['command'] = command
elif '--rw=write' in command:
write_params = _fio_command_parse(command)
write_params['command'] = command
elif '--rw=randread' in command:
read_params = _fio_command_parse(command)
read_params['command'] = command
elif '--rw=randwrite' in command:
write_params = _fio_command_parse(command)
write_params['command'] = command
else:
raise ValueError('Unexpected FIO test type. Expected '
'read, write, randread, or randwrite.')
if old_reads and old_writes:
if not _compare_dicts(old_reads, read_params) or \
not _compare_dicts(old_writes, write_params):
raise ValueError('Parameters differ between tests. Ensure only '
'tests with the same parameters are used.')
return read_params, write_params
def divide_logs_by_systems(log_files: list, log_to_match: str) -> dict:
"""
Extract logs on a per-system basis.
Given a list of all logs in a directory and a string to match for the log
files, extract all of the requested logs and group them together on a
per-system basis. For example, matching 'stg_iops' will pull all of the
IOPS test logs and combine all of the one-node IOPS logs in a list, then
all of the two-node IOPS logs in another list, and so on.
Parameters
----------
log_files : list
A ``list`` of log filenames from the passed directory to parse.
log_to_match : str
A ``string`` of the logs to match in the directory. 'stg_iops' will
match all logs that begin with 'stg_iops'.
Returns
-------
dict
Returns a ``dictionary`` of all results where the key is the number of
nodes being tested and the value is a list of all of the logs that
match the filter for that system count.
"""
# Divide the results based on the number of systems tested.
num_systems_dict = defaultdict(list)
for log in log_files:
if log_to_match not in log:
continue
systems = num_systems(log)
num_systems_dict[systems].append(log)
return num_systems_dict
|
Bobber-main
|
bobber/lib/analysis/common.py
|
# SPDX-License-Identifier: MIT
import numpy as np
import operator
from bobber.lib.analysis.common import bcolors
from tabulate import tabulate
from typing import NoReturn, Tuple
FIO_READ_BW = f'{bcolors.BOLD}FIO Read (GB/s) - 1MB BS{bcolors.ENDC}'
FIO_WRITE_BW = f'{bcolors.BOLD}FIO Write (GB/s) - 1MB BS{bcolors.ENDC}'
FIO_READ_IOP = f'{bcolors.BOLD}FIO Read (k IOPS) - 4K BS{bcolors.ENDC}'
FIO_WRITE_IOP = f'{bcolors.BOLD}FIO Write (k IOPS) - 4K BS{bcolors.ENDC}'
FIO_125K_READ_BW = f'{bcolors.BOLD}FIO Read (GB/s) - 125K BS{bcolors.ENDC}'
FIO_125K_WRITE_BW = f'{bcolors.BOLD}FIO Write (GB/s) - 125K BS{bcolors.ENDC}'
NCCL = f'{bcolors.BOLD}NCCL Max BW (GB/s){bcolors.ENDC}'
DALI_IMG_SM = (f'{bcolors.BOLD}DALI Standard 800x600 throughput '
f'(images/second){bcolors.ENDC}')
DALI_IMG_SM_BW = (f'{bcolors.BOLD}DALI Standard 800x600 bandwidth '
f'(GB/s){bcolors.ENDC}')
DALI_IMG_LG = (f'{bcolors.BOLD}DALI Standard 3840x2160 throughput '
f'(images/second){bcolors.ENDC}')
DALI_IMG_LG_BW = (f'{bcolors.BOLD}DALI Standard 3840x2160 bandwidth '
f'(GB/s){bcolors.ENDC}')
DALI_TF_SM = (f'{bcolors.BOLD}DALI TFRecord 800x600 throughput '
f'(images/second){bcolors.ENDC}')
DALI_TF_SM_BW = (f'{bcolors.BOLD}DALI TFRecord 800x600 bandwidth '
f'(GB/s){bcolors.ENDC}')
DALI_TF_LG = (f'{bcolors.BOLD}DALI TFRecord 3840x2160 throughput '
f'(images/second){bcolors.ENDC}')
DALI_TF_LG_BW = (f'{bcolors.BOLD}DALI TFRecord 3840x2160 bandwidth '
f'(GB/s){bcolors.ENDC}')
def bytes_to_gb(number: float) -> float:
"""
Convert bytes to gigabytes.
Parameters
----------
number : float
A ``float`` in bytes.
Returns
-------
float
Returns a ``float`` of the number in gigabytes.
"""
return round(number * 1e-9, 3)
def iops_to_kiops(number: float) -> float:
"""
Convert iops to k-iops.
Parameters
----------
number : float
A ``float`` in iops.
Returns
-------
float
Returns a ``float`` of the number in k-iops.
"""
return round(number * 1e-3, 3)
def scale(values: list) -> float:
"""
Calculate the scaling factor of results.
Calculate the scale by determining the slope of the line of best fit and
dividing by the first value in the results, plus 1.
Parameters
----------
values : list
A ``list`` of ``floats`` to calculate the scale factor for.
Returns
-------
float
Returns a ``float`` of the scaling factor.
"""
x = np.array(range(1, len(values) + 1))
y = np.array(values)
slope, _ = np.polyfit(x, y, 1)
return slope / values[0] + 1.0
def fio_bw(results: list) -> Tuple[list, list]:
"""
Save the FIO bandwidth read and write results.
Save the read and write results from the FIO bandwidth tests on an
increasing per-system basis with the first element in the list being the
column header.
Parameters
----------
results : list
A ``list`` of ``dictionaries`` containing all results from the tests.
Returns
-------
tuple
Returns a ``tuple`` of (``list``, ``list``) containing the read and
write bandwidth results, respectively.
"""
try:
read = [FIO_READ_BW] + [bytes_to_gb(result[1]['bandwidth']['read'])
for result in results]
write = [FIO_WRITE_BW] + [bytes_to_gb(result[1]['bandwidth']['write'])
for result in results]
except KeyError:
return []
else:
return [read, write]
def fio_iops(results: list) -> Tuple[list, list]:
"""
Save the FIO IOPS read and write results.
Save the read and write results from the FIO IOPS tests on an increasing
per-system basis with the first element in the list being the column
header.
Parameters
----------
results : list
A ``list`` of ``dictionaries`` containing all results from the tests.
Returns
-------
tuple
Returns a ``tuple`` of (``list``, ``list``) containing the read and
write IOPS results, respectively.
"""
try:
read = [FIO_READ_IOP] + [iops_to_kiops(result[1]['iops']['read'])
for result in results]
write = [FIO_WRITE_IOP] + [iops_to_kiops(result[1]['iops']['write'])
for result in results]
except KeyError:
return []
else:
return [read, write]
def fio_125k_bw(results: list) -> Tuple[list, list]:
"""
Save the FIO 125k bandwidth read and write results.
Save the read and write results from the FIO 125k bandwidth tests on an
increasing per-system basis with the first element in the list being the
column header.
Parameters
----------
results : list
A ``list`` of ``dictionaries`` containing all results from the tests.
Returns
-------
tuple
Returns a ``tuple`` of (``list``, ``list``) containing the read and
write 125k bandwidth results, respectively.
"""
try:
read = [FIO_125K_READ_BW] + [bytes_to_gb(result[1]['125k_bandwidth']
['read'])
for result in results]
write = [FIO_125K_WRITE_BW] + [bytes_to_gb(result[1]['125k_bandwidth']
['write'])
for result in results]
except KeyError:
return []
else:
return [read, write]
def nccl(results: list) -> list:
"""
Save the NCCL results.
Save the maximum bus bandwidth results from the NCCL tests on an increasing
per-system basis with the first element in the list being the column
header.
Parameters
----------
results : list
A ``list`` of dictionaries containing all results from the tests.
Returns
-------
list
Returns a ``list`` of the NCCL max bus bandwidth results.
"""
try:
nccl = [NCCL] + [round(result[1]['nccl']['max_bus_bw'], 3)
for result in results]
except KeyError:
return []
else:
return [nccl]
def dali(results: list) -> Tuple[list, list, list, list, list, list, list,
list]:
"""
Save the DALI results.
Save the throughput and bandwidth results from the DALI tests on an
increasing per-system basis with the first element in the list being the
column header.
Parameters
----------
results : list
A ``list`` of dictionaries containing all results from the tests.
Returns
-------
tuple
Returns a ``tuple`` of eight ``lists`` containing the throughput
followed by bandwidth for small and large standard images, then small
and large TFRecords.
"""
try:
img_sm = [DALI_IMG_SM] + [result[1]['dali']['800x600 standard jpg']
['average images/second']
for result in results]
img_sm_bw = [DALI_IMG_SM_BW] + [bytes_to_gb(result[1]['dali']
['800x600 standard jpg']
['average bandwidth'])
for result in results]
img_lg = [DALI_IMG_LG] + [result[1]['dali']['3840x2160 standard jpg']
['average images/second']
for result in results]
img_lg_bw = [DALI_IMG_LG_BW] + [bytes_to_gb(result[1]['dali']
['3840x2160 standard jpg']
['average bandwidth'])
for result in results]
tf_sm = [DALI_TF_SM] + [result[1]['dali']['800x600 tfrecord']
['average images/second']
for result in results]
tf_sm_bw = [DALI_TF_SM_BW] + [bytes_to_gb(result[1]['dali']
['800x600 tfrecord']
['average bandwidth'])
for result in results]
tf_lg = [DALI_TF_LG] + [result[1]['dali']['3840x2160 tfrecord']
['average images/second']
for result in results]
tf_lg_bw = [DALI_TF_LG_BW] + [bytes_to_gb(result[1]['dali'][
'3840x2160 tfrecord']
['average bandwidth'])
for result in results]
except KeyError:
return []
else:
return [img_sm, img_sm_bw, img_lg, img_lg_bw, tf_sm, tf_sm_bw, tf_lg,
tf_lg_bw]
def add_scale(data: list) -> NoReturn:
"""
Add the scaling factor to results.
Iterate through all results and append the scaling factor to each of the
categories, if applicable. Results that have a scaling factor greater than
1.9x are marked GREEN, results greater than 1.5 are marked YELLOW, and all
other results are RED.
Parameters
----------
data : list
A ``list`` of ``lists`` of all categories of results.
"""
for subset in data:
# No results in the data - just the test category name
if len(subset) < 2:
continue
# Scaling can't be calculated for NCCL as it has a different behavior
# from other tests. For single-node only tests, there is nothing to
# measure for scaling. Both scenarios should be ignored for calculating
# scale factor.
if 'nccl' in subset[0].lower() or len(subset) == 2:
subset += ['N/A']
continue
values = subset[1:]
scale_val = round(scale(values), 2)
if scale_val > 1.9:
scale_text = f'{bcolors.PASS}{scale_val}X{bcolors.ENDC}'
elif scale_val > 1.5:
scale_text = f'{bcolors.WARNING}{scale_val}X{bcolors.ENDC}'
else:
scale_text = f'{bcolors.FAIL}{scale_val}X{bcolors.ENDC}'
subset += [scale_text]
def display_table(json_results: dict) -> NoReturn:
"""
Display results in tabular format.
Find the results on a per-system basis for all categories and display the
resulting scaling factor.
Parameters
----------
json_results : dict
A ``dictionary`` of the final results that have been parsed from the
results directory.
"""
data = []
headers = [f'{bcolors.BOLD}Test{bcolors.ENDC}'] + \
[f'{bcolors.BOLD}{num} Node(s){bcolors.ENDC}'
for num in sorted(json_results['systems'])] + \
[f'{bcolors.BOLD}Scale{bcolors.ENDC}']
results = sorted(json_results['systems'].items())
data += fio_bw(results)
data += fio_iops(results)
data += fio_125k_bw(results)
data += nccl(results)
data += dali(results)
add_scale(data)
print(tabulate(data, headers=headers, tablefmt='grid', numalign='right'))
print()
|
Bobber-main
|
bobber/lib/analysis/table.py
|
# SPDX-License-Identifier: MIT
import re
from bobber.lib.analysis.common import fio_command_details
from typing import Tuple
def clean_iops(iops: str) -> float:
"""
Convert the IOPS into an equivalent operations/second result.
Parse the IOPS value from the input string and convert the value from a
larger unit to an equivalent operations/second, if applicable.
Parameters
----------
iops : str
A ``string`` of the number of operations/second and resulting unit.
Returns
-------
float
Returns a ``float`` of the final IOPS value in operations/second.
"""
number = float(re.findall(r'\d+', iops)[0])
if 'G' in iops:
ops_per_second = number * 1e9
elif 'M' in iops:
ops_per_second = number * 1e6
elif 'k' in iops:
ops_per_second = number * 1e3
else:
ops_per_second = number
return ops_per_second
def clean_bw(bandwidth: str) -> float:
"""
Convert the bandwidth into an equivalent bytes/second result.
Parse the bandwidth value from the input string and convert the value from
a larger unit to an equivalent operations/second, if applicable.
Parameters
----------
bandwidth : str
A ``string`` of the bandwidth and unit from the test.
Returns
-------
float
Returns a ``float`` of the final bandwidth in bytes/second.
"""
number = float(re.findall(r'(\d+(?:\.\d+)?)', bandwidth)[0])
if 'GB/s' in bandwidth:
bytes_per_second = number * 1e9
elif 'MB/s' in bandwidth:
bytes_per_second = number * 1e6
elif 'kb/s' in bandwidth.lower():
bytes_per_second = number * 1e3
else:
bytes_per_second = number
return bytes_per_second
def fio_bw_results(log_contents: str, systems: int, string_to_match: str,
log: str) -> list:
"""
Capture the bandwidth results from the log files.
Search the log for any lines containing a bandwidth value and return a
final list of all of the parsed values.
Parameters
----------
log_contents : str
A ``string`` of the contents from an FIO log file.
systems : int
An ``integer`` of the number of systems used during the current test.
string_to_match : str
A regex ``string`` of the line to pull from the log file to match any
bandwidth lines.
log : str
A ``string`` of the name of the log file being parsed.
Returns
-------
list
Returns a ``list`` of ``floats`` representing all of the bandwidth
values parsed from the log.
Raises
------
ValueError
Raises a ``ValueError`` if the bandwidth cannot be parsed from the log
file.
"""
final_bw = []
match = re.findall(string_to_match, log_contents)
if len(match) != systems:
print(f'Warning: Invalid number of results found in {log} log file. '
'Skipping...')
return []
for result in match:
bw = re.findall(r'\(\d+[kMG]B/s\)', result)
if len(bw) != 1:
bw = re.findall(r'\(\d+\.\d+[kMG]B/s\)', result)
if len(bw) != 1:
raise ValueError('Bandwidth cannot be parsed from FIO log!')
bw = clean_bw(bw[0])
final_bw.append(bw)
return final_bw
def fio_iops_results(log_contents: str, systems: int, string_to_match: str,
log: str) -> list:
"""
Capture the IOPS results from the log files.
Search the log for any lines containing IOPS values and return a final list
of all of the parsed values. The FIO IOPS tests print an extra line for
multi-node tests and are subsequently dropped.
Parameters
----------
log_contents : str
A ``string`` of the contents from an FIO log file.
systems : int
An ``integer`` of the number of systems used during the current test.
string_to_match : str
A regex ``string`` of the line to pull from the log file to match any
IOPS lines.
log : str
A ``string`` of the name of the log file being parsed.
Returns
-------
list
Returns a ``list`` of ``floats`` representing all of the IOPS values
parsed from the log.
Raises
------
ValueError
Raises a ``ValueError`` if the IOPS cannot be parsed from the log
file.
"""
final_iops = []
match = re.findall(string_to_match, log_contents)
if (systems == 1 and len(match) != systems) or \
(systems != 1 and len(match) != systems + 1):
print(f'Warning: Invalid number of results found in {log} log file. '
'Skipping...')
return []
for result in match:
iops = re.findall(r'[-+]?\d*\.\d+[kMG]|\d+[kMG]|\d+', result)
if len(iops) not in [5, 6]:
raise ValueError('IOPS cannot be parsed from FIO log!')
iops = clean_iops(iops[0])
final_iops.append(iops)
# For multi-system benchmarks, an extra IOPS line is included with
# semi-aggregate results, but needs to be dropped from our results for a
# more accurate analysis.
if systems != 1:
final_iops = final_iops[:-1]
return final_iops
def parse_fio_bw_file(log_files: list, systems: int, read_system_results: dict,
write_system_results: dict) -> Tuple[dict, dict, dict,
dict]:
"""
Parse the FIO bandwidth results and test parameters.
Search all log files for read and write parameters used to initiate the
test and the final results and return the resulting objects.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the filenames of all FIO bandwidth logs in
the results directory.
systems : int
An ``integer`` of the number of systems used during the current test.
read_system_results : dict
A ``dictionary`` of the final read results for N-systems.
write_system_results : dict
A ``dictionary`` of the final write results for N-systems.
Returns
-------
tuple
A ``tuple`` of four dictionaries containing the read results, write
results, read parameters, and write parameters, respectively.
"""
read_params, write_params = None, None
for log in log_files:
with open(log, 'r') as f:
log_contents = f.read()
read_params, write_params = fio_command_details(log_contents,
read_params,
write_params)
write_bw = fio_bw_results(log_contents, systems, 'WRITE: bw=.*', log)
if write_bw == []:
continue
read_bw = fio_bw_results(log_contents, systems, 'READ: bw=.*', log)
write_system_results[systems].append(sum(write_bw))
read_system_results[systems].append(sum(read_bw))
return read_system_results, write_system_results, read_params, write_params
def parse_fio_iops_file(log_files: list, systems: int,
read_system_results: dict,
write_system_results: dict) -> Tuple[dict, dict, dict,
dict]:
"""
Parse the FIO IOPS results and test parameters.
Search all log files for read and write parameters used to initiate the
test and the final results and return the resulting objects.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the filenames of all FIO IOPS logs in the
results directory.
systems : int
An ``integer`` of the number of systems used during the current test.
read_system_results : dict
A ``dictionary`` of the final read results for N-systems.
write_system_results : dict
A ``dictionary`` of the final write results for N-systems.
Returns
-------
tuple
A ``tuple`` of four dictionaries containing the read results, write
results, read parameters, and write parameters, respectively.
"""
read_params, write_params = None, None
for log in log_files:
with open(log, 'r') as f:
log_contents = f.read()
read_params, write_params = fio_command_details(log_contents,
read_params,
write_params)
write_iops = fio_iops_results(log_contents, systems, 'write: IOPS=.*',
log)
read_iops = fio_iops_results(log_contents, systems, 'read: IOPS=.*',
log)
write_system_results[systems].append(sum(write_iops))
read_system_results[systems].append(sum(read_iops))
return read_system_results, write_system_results, read_params, write_params
|
Bobber-main
|
bobber/lib/analysis/fio.py
|
# SPDX-License-Identifier: MIT
import re
from argparse import ArgumentParser, Namespace
from glob import glob
from os.path import join
from typing import NoReturn, Tuple
class Aggregate:
"""
Find the aggregate results for from multiple iterations.
Parameters
----------
epoch_zero_speeds : list
A ``list`` of ``floats`` of the first epoch speeds.
epoch_zero_times : list
A ``list`` of ``floats`` of the epoch zero times.
elapsed_times : list
A ``list`` of ``floats`` of the overall elapsed time.
average_speeds : list
A ``list`` of ``floats`` of the overall average speeds.
"""
def __init__(self, epoch_zero_speeds: list, epoch_zero_times: list,
elapsed_times: list, average_speeds: list) -> NoReturn:
self.epoch_zero_speeds = epoch_zero_speeds
self.epoch_zero_times = epoch_zero_times
self.elapsed_times = elapsed_times
self.average_speeds = average_speeds
class Results:
"""
The results from a single test run.
Parameters
----------
epoch_zero_speed : float
A ``float`` of the first epoch speed.
epoch_zero_time : float
A ``float`` of the epoch zero time.
elapsed_time : float
A ``float`` of the overall elapsed time.
average_speed : float
A ``float`` of the overall average speed.
"""
def __init__(self, epoch_zero_speed: float, epoch_zero_time: float,
elapsed_time: float, average_speed: float) -> NoReturn:
self.epoch_zero_speed = epoch_zero_speed
self.epoch_zero_time = epoch_zero_time
self.elapsed_time = elapsed_time
self.average_speed = average_speed
def parse_args() -> Namespace:
"""
Parse arguments passed to the MLPerf parser.
Returns
-------
Namespace
Returns a ``Namespace`` of all of the arguments that were parsed from
the application during runtime.
"""
parser = ArgumentParser(description='Parse MLPerf results')
parser.add_argument('directory', type=str, help='The directory where '
'MLPerf log results are saved.')
return parser.parse_args()
def average(list_to_average: list) -> float:
"""
Find the average of a list.
Given a list of numbers, calculate the average of all values in the list.
If the list is empty, default to 0.0.
Parameters
----------
list_to_average : list
A ``list`` of ``floats`` to find an average of.
Returns
-------
float
Returns a ``float`` of the average value of the list.
"""
try:
return round(sum(list_to_average) / len(list_to_average), 3)
except ZeroDivisionError:
return 0.0
def ms_to_seconds(time: float) -> float:
"""
Convert milliseconds to seconds.
Parameters
----------
time : float
A ``float`` of time in milliseconds.
Returns
-------
float
Returns a ``float`` of the converted time in seconds.
"""
return round(time / 1000, 3)
def ms_to_minutes(time: float) -> float:
"""
Convert milliseconds to minutes.
Parameters
----------
time : float
A ``float`` of time in milliseconds.
Returns
-------
float
Returns a ``float`` of the converted time in minutes.
"""
return round(time / 1000 / 60, 3)
def get_files(directory: str) -> list:
"""
Read all log files.
Given an input directory as a string, read all log files and return the
filenames including the directory as a list.
Parameters
----------
directory : str
A ``string`` pointing to the results directory.
Returns
-------
list
Returns a ``list`` of ``strings`` of the paths to each log file in the
results directory.
"""
return glob(join(directory, '*.log'))
def parse_epoch_line(line: str) -> Tuple[int, float]:
"""
Parse the throughput for each epoch.
Pull the images/second and epoch for each results line in an MLPerf log.
Parameters
----------
line : str
A ``string`` of a results line in an MLPerf log.
Returns
-------
tuple
Returns a ``tuple`` of (``int``, ``float``) of the epoch number and
resulting speed in images/second.
"""
# Lines are in the format:
# "Epoch[NUM] Batch [NUM-NUM] Speed: NUM.NUM samples/sec accuracy=NUM.NUM"
epoch = re.findall(r'\[\d+\]', line)[0].replace('[', '').replace(']', '')
speed = re.findall(r'Speed: .* samples', line)
if len(speed) == 1:
speed = speed[0].replace('Speed: ', '').replace(' samples', '')
return int(epoch), float(speed)
def parse_time(line: str) -> int:
"""
Parse the timestamp from a line in the log.
Parameters
----------
line : str
A ``string`` of a line in an MLPerf log file.
Returns
-------
int
Returns an ``int`` of the parsed timestamp.
"""
return int(re.findall(r'\d+', line)[0])
def parse_epoch_values(logfile: str) -> Tuple[list, list]:
"""
Parse the epoch and throughput lines.
Find all of the lines that contain a throughput and save the first epoch
and overall epoch results in lists.
Parameters
----------
logfile : str
A ``string`` of all contents from a logfile.
Returns
-------
tuple
Returns a ``tuple`` of (``list``, ``list``) containing the first epoch
results followed by all results.
"""
epoch_zero_vals, all_epoch_vals = [], []
epoch_values = re.findall(r'Epoch\[\d+\] Batch.*', logfile)
for value in epoch_values:
epoch, speed = parse_epoch_line(value)
all_epoch_vals.append(speed)
if epoch == 0:
epoch_zero_vals.append(speed)
return epoch_zero_vals, all_epoch_vals
def parse_epoch_times(logfile: str) -> Tuple[list, list]:
"""
Parse the time for each epoch.
Find the overall time it takes to complete each epoch by finding the
difference in milliseconds.
Parameters
----------
logfile : str
A ``string`` of all contents from a logfile.
Returns
-------
tuple
Returns a ``tuple`` of (``list``, ``list``) representing the time taken
during the first epoch and the overall elapsed time for the test.
"""
epoch_start_times = re.findall(r'time_ms.*?epoch_start', logfile)
epoch_stop_times = re.findall(r'time_ms.*?epoch_stop', logfile)
# The epoch 0 time is the difference between the timestamp where epoch 0
# ended, and the timestamp where epoch 0 began.
epoch_zero_time = parse_time(epoch_stop_times[0]) - \
parse_time(epoch_start_times[0])
# The total elapsed time is the difference between the timestamp of when
# the final epoch ended, and the timestamp where epoch 0 began.
elapsed_time = parse_time(epoch_stop_times[-1]) - \
parse_time(epoch_start_times[0])
return epoch_zero_time, elapsed_time
def parse_file(logfile: str) -> object:
"""
Parse a single MLPerf file.
Find the first epoch and overall results for a single MLPerf file and
create a singular object to represent the results.
Parameters
----------
logfile : str
A ``string`` of all contents from a logfile.
Returns
-------
Results instance
Returns an instance of the Results class.
"""
epoch_zero_vals, all_epoch_vals = parse_epoch_values(logfile)
epoch_zero_time, elapsed_time = parse_epoch_times(logfile)
results = Results(average(epoch_zero_vals),
epoch_zero_time,
elapsed_time,
average(all_epoch_vals))
return results
def find_num_nodes(logfile: str) -> int:
"""
Find the number of nodes tested.
Parameters
----------
logfile : str
A ``string`` of all contents from a logfile.
Returns
-------
int
Returns an ``integer`` of the number of nodes tested.
"""
clear_cache_command = re.findall(r'srun.*Clearing cache on ', logfile)
if len(clear_cache_command) == 0:
print('Unable to find number of nodes tested. Assuming single node.')
return 1
n_tasks = re.findall(r'ntasks=\d+', clear_cache_command[0])
num_nodes = n_tasks[0].replace('ntasks=', '')
return num_nodes
def find_filesystem_test_path(logfile: str) -> str:
"""
Parse the filesystem path from the log file.
The 'container-mounts=...' line in each log file contains the location of
the shared filesystem.
Parameters
----------
logfiles : str
A ``string`` of all contents from a logfile.
Returns
-------
str
Returns a ``string`` of the location of the filesystem.
"""
container_mounts_line = re.findall(r'container-mounts=\S*:/data', logfile)
if len(container_mounts_line) == 0:
print('Unable to find container mount directory. Leaving empty.')
return '<Unknown>'
container_data_mount = container_mounts_line[0].replace(
'container-mounts=', '')
return container_data_mount
def read_files(logfiles: list) -> Tuple[object, int, str]:
"""
Read all MLPerf files and find aggregate results.
Read all log files in a directory and determine the average speed and time
taken to process images for both the first epoch and all results combined.
Parameters
----------
logfiles : list
A ``list`` of the filepaths for all log files in an input directory.
Returns
-------
tuple
Returns a ``tuple`` of an instance of the Aggregate class, the number
of nodes tested, and the path to the filesystem under test.
"""
all_results = []
prev_nodes_found = None
prev_filesystem_test_path = None
for filename in logfiles:
with open(filename, 'r') as logpointer:
log = logpointer.read()
results = parse_file(log)
all_results.append(results)
nodes_tested = find_num_nodes(log)
filesystem_test_path = find_filesystem_test_path(log)
if prev_nodes_found and nodes_tested != prev_nodes_found:
raise ValueError('Error: Mixed node sizes found in log files!')
if prev_filesystem_test_path and \
filesystem_test_path != prev_filesystem_test_path:
raise ValueError('Error: Mixed test paths found in log files!')
prev_nodes_found = nodes_tested
prev_filesystem_test_path = filesystem_test_path
aggregate = Aggregate(
[result.epoch_zero_speed for result in all_results],
[result.epoch_zero_time for result in all_results],
[result.elapsed_time for result in all_results],
[result.average_speed for result in all_results]
)
return aggregate, nodes_tested, filesystem_test_path
def print_averages(results: object, directory: str, nodes_tested: int,
filesystem_test_path: str) -> NoReturn:
"""
Print the average results.
Print the average time and speed for epoch 0 and all results, plus test
information including the log directory and the location of the filesystem
under test.
Parameters
----------
results : object
An instance of the Results class containing the results from a single
test.
directory : str
A ``string`` of the passed directory where results were saved.
nodes_tested : int
An ``int`` of the number of nodes that were tested for a file.
filesystem_test_path : str
A ``string`` of the path to the filesystem under test.
"""
e_zero_speed = average(results.epoch_zero_speeds)
e_zero_time = ms_to_seconds(average(results.epoch_zero_times))
overall_speed = average(results.average_speeds)
overall_time = ms_to_minutes(average(results.elapsed_times))
output = f"""MLPerf Results:
Log directory name: {directory}
Filesystem test path: {filesystem_test_path}
Number of iterations: {len(results.epoch_zero_speeds)}
Nodes tested: {nodes_tested}
Epoch 0:
Speed: {e_zero_speed} images/second
Average time: {e_zero_time} seconds
Overall:
Speed: {overall_speed} images/second
Average time: {overall_time} minutes"""
print(output)
def main() -> NoReturn:
"""
Parse MLPerf test results.
"""
args = parse_args()
logfiles = get_files(args.directory)
aggregate, nodes_tested, filesystem_test_path = read_files(logfiles)
print_averages(aggregate, args.directory, nodes_tested,
filesystem_test_path)
if __name__ == '__main__':
main()
|
Bobber-main
|
bobber/lib/analysis/parse-mlperf.py
|
# SPDX-License-Identifier: MIT
import re
def avg(stats: list) -> float:
"""
Find the average of a list.
Given a list of numbers, calculate the average of all values in the list.
If the list is empty, default to 0.0.
Parameters
----------
input_list : list
A ``list`` of ``floats`` to find an average of.
Returns
-------
float
Returns a ``float`` of the average value of the list.
"""
if len(stats) > 0:
return sum(stats) / len(stats)
else:
return 0.0
def pull_stats(summary: list) -> dict:
"""
Convert stats to a dictionary.
Each line in the summary table in the log file needs to be parsed by first
converting the table to a comma-separated list for easy parsing, then
taking the first column as the statistical category and placing the
remaining values into maximum, minimum, mean, and standard deviation.
Parameters
----------
summary : list
A ``list`` of ``strings`` representing each line in the summary table
of the metadata file.
Returns
-------
dict
Returns a ``dictionary`` of the converted table.
"""
results = {}
for stat in summary:
# Convert the table to a comma-separated list to make it easier to
# parse.
stat = stat.replace(':', '')
stat_csv = re.sub(' +', ',', stat.strip())
components = stat_csv.split(',')
key, max_val, min_val, mean, stdev = components
results[key] = {
'max': float(max_val),
'min': float(min_val),
'mean': float(mean),
'stdev': float(stdev)
}
return results
def parse_summary(log_contents: str) -> list:
"""
Pull the summary table from the metadata log.
The bottom of the metadata log contains a summary table with all of the
individual metadata operations and the results from the test. This table is
denoted by a line of '-' signs and is ended with '-- finished'. Since these
lines are used to make parsing easier, they should be dropped in the end.
Parameters
----------
log_contents : str
A ``string`` of the contents of the entire contents of a metadata log
file.
Returns
-------
list
Returns a ``list`` of ``strings`` representing each line in the summary
table.
"""
summary = re.findall('--------- .*-- finished',
log_contents, re.DOTALL)
if len(summary) == 0:
return None
# `summary` is a single-element list where the element is a list of all of
# the metadata stats. The first and last lines are unecessary as they are
# only used to parse the table and can be dropped.
summary = summary[0].split('\n')[1:-1]
return summary
def aggregate_results(combined_results: list) -> dict:
"""
Find the aggregate results for all categories.
Parse every result from the metadata log files and capture the min, max,
and mean for each operation for all iterations in a single object.
Parameters
----------
combined_results : list
A ``list`` of ``dictionaries`` containing the results from each summary
table in each log file.
Returns
-------
dict
Returns a ``dictionary`` of the final aggregate results for each
operation in the summary tables of all logs.
"""
final_aggregate = {}
if len(combined_results) == 0:
return final_aggregate
for key, stats in combined_results[0].items():
key_metrics = [stat[key] for stat in combined_results]
final_aggregate[key] = {
'max': max([result['max'] for result in key_metrics]),
'min': min([result['min'] for result in key_metrics]),
'mean': avg([result['mean'] for result in key_metrics])
}
return final_aggregate
def parse_meta_file(log_files: list, systems: int, results: dict) -> dict:
"""
Parse the metadata results from the metadata logs.
Search through each metadata log and extract the operations in the summary
table, saving the aggregate results in a dictionary.
Parameters
----------
log_files : list
A ``list`` of ``strings`` of the filename of each metadata log file in
the results directory.
systems : int
An ``integer`` of the number of systems used during the current test.
results : dict
A ``dictionary`` of the aggregate metadata results for each system
count.
Returns
-------
dict
Returns an updated ``dictionary`` including the aggregate metadata
results for N-systems.
"""
combined_results = []
for log in log_files:
with open(log, 'r') as f:
log_contents = f.read()
summary = parse_summary(log_contents)
if not summary:
print(f'Warning: Invalid results found in {log} log file.')
print('Skipping...')
continue
stats = pull_stats(summary)
combined_results.append(stats)
results[systems] = aggregate_results(combined_results)
return results
|
Bobber-main
|
bobber/lib/analysis/meta.py
|
# SPDX-License-Identifier: MIT
import os
from argparse import Namespace
from bobber.lib.constants import (
RUN_ALL,
RUN_DALI,
RUN_NCCL,
RUN_STG_BW,
RUN_STG_IOPS,
RUN_STG_125K,
RUN_STG_META
)
from bobber.lib.docker import manager
from time import sleep
from typing import NoReturn
def run_dali(args: Namespace, bobber_version: str, iteration: int,
hosts: str) -> NoReturn:
"""
Run single or multi-node DALI tests.
Run a single or multi-node DALI test which reads random image data in from
designated storage and loads it onto local resources after preprocessing
that is typically done for ResNet50 pipelines.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings specified by the user for the test.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
iteration : int
An ``int`` of the local test number, starting at 1.
hosts : string
A comma-separated list of hostnames to test against, such as
'host1,host2,host3,host4'.
"""
dali_log = os.path.join(args.log_path,
f'dali_iteration_{iteration}_'
f'gpus_{args.gpus}_'
f'batch_size_lg_{args.batch_size_lg}_'
f'batch_size_sm_{args.batch_size_sm}_'
f'systems_{len(hosts.split(","))}_'
f'version_{bobber_version}.log')
environment = {
'BATCH_SIZE_LG': args.batch_size_lg,
'BATCH_SIZE_SM': args.batch_size_sm,
'GPUS': args.gpus,
'HOSTS': hosts,
'SSH_IFACE': args.ssh_iface
}
manager.execute('tests/dali_multi.sh',
environment=environment,
log_file=dali_log)
if args.pause > 0:
sleep(args.pause)
def run_stg_bw(args: Namespace, bobber_version: str, iteration: int,
hosts: str) -> NoReturn:
"""
Run single or multi-node storage bandwidth tests with FIO.
Run a single or multi-node storage bandwidth test with FIO which first
writes data to the filesystem with 1MB block size and 4GB file size,
followed by reading the data back.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings specified by the user for the test.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
iteration : int
An ``int`` of the local test number, starting at 1.
hosts : string
A comma-separated list of hostnames to test against, such as
'host1,host2,host3,host4'.
"""
stg_bw_log = os.path.join(args.log_path,
f'stg_bw_iteration_{iteration}_'
f'threads_{args.bw_threads}_'
f'direct_{args.direct}_'
f'depth_{args.io_depth}_'
f'read_pattern_{args.read_pattern}_'
f'write_pattern_{args.write_pattern}_'
f'systems_{len(hosts.split(","))}_'
f'version_{bobber_version}.log')
environment = {
'EXTRA_FLAGS': args.stg_extra_flags,
'IO_DEPTH': args.io_depth,
'DIRECTIO': args.direct,
'THREADS': args.bw_threads,
'READ_PATTERN': args.read_pattern,
'WRITE_PATTERN': args.write_pattern,
'HOSTS': hosts
}
manager.execute('tests/fio_multi.sh',
environment=environment,
log_file=stg_bw_log)
if args.pause > 0:
sleep(args.pause)
def run_stg_125k(args: Namespace, bobber_version: str, iteration: int,
hosts: str) -> NoReturn:
"""
Run single or multi-node storage 125KB IO size tests with FIO.
Run a single or multi-node storage bandwidth test with FIO which first
writes data to the filesystem with 125KB block size and 4GB file size,
followed by reading the data back.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings specified by the user for the test.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
iteration : int
An ``int`` of the local test number, starting at 1.
hosts : string
A comma-separated list of hostnames to test against, such as
'host1,host2,host3,host4'.
"""
stg_125k_log = os.path.join(args.log_path,
f'stg_125k_iteration_{iteration}_'
f'threads_{args.stg_125k_threads}_'
f'direct_{args.direct}_'
f'depth_{args.io_depth}_'
f'systems_{len(hosts.split(","))}_'
f'version_{bobber_version}.log')
environment = {
'EXTRA_FLAGS': args.stg_extra_flags,
'IO_DEPTH': args.io_depth,
'IOSIZE': 125,
'DIRECTIO': args.direct,
'THREADS': args.stg_125k_threads,
'READ_PATTERN': args.read_pattern,
'WRITE_PATTERN': args.write_pattern,
'HOSTS': hosts
}
manager.execute('tests/fio_multi.sh',
environment=environment,
log_file=stg_125k_log)
if args.pause > 0:
sleep(args.pause)
def run_stg_iops(args: Namespace, bobber_version: str, iteration: int,
hosts: str) -> NoReturn:
"""
Run single or multi-node storage IOPS tests with FIO.
Run a single or multi-node storage IOPS test with FIO which first writes
data to the filesystem with 4kB block size and 4GB file size, followed by
reading the data back.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings specified by the user for the test.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
iteration : int
An ``int`` of the local test number, starting at 1.
hosts : string
A comma-separated list of hostnames to test against, such as
'host1,host2,host3,host4'.
"""
stg_iops_log = os.path.join(args.log_path,
f'stg_iops_iteration_{iteration}_'
f'threads_{args.iops_threads}_'
f'direct_{args.direct}_'
f'depth_{args.io_depth}_'
f'read_pattern_{args.read_pattern}_'
f'write_pattern_{args.write_pattern}_'
f'systems_{len(hosts.split(","))}_'
f'version_{bobber_version}.log')
environment = {
'EXTRA_FLAGS': args.stg_extra_flags,
'IO_DEPTH': args.io_depth,
'DIRECTIO': args.direct,
'THREADS': args.iops_threads,
'IOSIZE': 4,
'READ_PATTERN': args.read_pattern,
'WRITE_PATTERN': args.write_pattern,
'HOSTS': hosts
}
manager.execute('tests/fio_multi.sh',
environment=environment,
log_file=stg_iops_log)
if args.pause > 0:
sleep(args.pause)
def run_stg_meta(args: Namespace, bobber_version: str, iteration: int,
hosts: str) -> NoReturn:
"""
Run single or multi-node storage metadata test with FIO.
Run a single or multi-node storage metadata test with FIO which tests
various metadata operation performance for the filesystem.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings specified by the user for the test.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
iteration : int
An ``int`` of the local test number, starting at 1.
hosts : string
A comma-separated list of hostnames to test against, such as
'host1,host2,host3,host4'.
"""
stg_meta_log = os.path.join(args.log_path,
f'stg_meta_iteration_{iteration}_'
f'systems_{len(hosts.split(","))}_'
f'version_{bobber_version}.log')
environment = {
'HOSTS': hosts,
'SSH_IFACE': args.ssh_iface,
'NCCL_IB_HCAS': args.nccl_ib_hcas
}
manager.execute('tests/mdtest_multi.sh',
environment=environment,
log_file=stg_meta_log)
if args.pause > 0:
sleep(args.pause)
def run_nccl(args: Namespace, bobber_version: str, iteration: int,
hosts: str) -> NoReturn:
"""
Run single or multi-node NCCL test.
Run a single or multi-node NCCL test which verifies network and GPU
performance and communication.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings specified by the user for the test.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
iteration : int
An ``int`` of the local test number, starting at 1.
hosts : string
A comma-separated list of hostnames to test against, such as
'host1,host2,host3,host4'.
"""
nccl_log = os.path.join(args.log_path,
f'nccl_iteration_{iteration}_'
f'gpus_{args.gpus}_'
f'nccl_max_{args.nccl_max}_'
f'gid_{args.compute_gid}_'
f'nccl_tc_{args.nccl_tc}_'
f'systems_{len(hosts.split(","))}_'
f'version_{bobber_version}.log')
environment = {
'GPUS': args.gpus,
'NCCL_MAX': args.nccl_max,
'NCCL_TC': args.nccl_tc,
'COMPUTE_GID': args.compute_gid,
'HOSTS': hosts,
'SSH_IFACE': args.ssh_iface,
'NCCL_IB_HCAS': args.nccl_ib_hcas
}
manager.execute('tests/nccl_multi.sh',
environment=environment,
log_file=nccl_log)
if args.pause > 0:
sleep(args.pause)
def kickoff_test(args: Namespace, bobber_version: str, iteration: int,
hosts: str) -> NoReturn:
"""
Start a specified test.
Launch a test as requested from the CLI for the given iteration.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings specified by the user for the test.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
iteration : int
An ``int`` of the local test number, starting at 1.
hosts : string
A comma-separated list of hostnames to test against, such as
'host1,host2,host3,host4'.
"""
if args.command == RUN_DALI:
run_dali(args, bobber_version, iteration, hosts)
elif args.command == RUN_NCCL:
run_nccl(args, bobber_version, iteration, hosts)
elif args.command == RUN_STG_BW:
run_stg_bw(args, bobber_version, iteration, hosts)
elif args.command == RUN_STG_IOPS:
run_stg_iops(args, bobber_version, iteration, hosts)
elif args.command == RUN_STG_125K:
run_stg_125k(args, bobber_version, iteration, hosts)
elif args.command == RUN_STG_META:
run_stg_meta(args, bobber_version, iteration, hosts)
elif args.command == RUN_ALL:
run_nccl(args, bobber_version, iteration, hosts)
run_stg_meta(args, bobber_version, iteration, hosts)
run_stg_bw(args, bobber_version, iteration, hosts)
run_dali(args, bobber_version, iteration, hosts)
run_stg_iops(args, bobber_version, iteration, hosts)
run_stg_125k(args, bobber_version, iteration, hosts)
def test_selector(args: Namespace, bobber_version: str) -> NoReturn:
"""
Start a test iteration.
If the user requested to run a sweep of the hosts, the tests will begin
with the first node in the hosts list for a single-node test, then
progressively add the next host in the list until all nodes are tested
together. During each iteration, one run of each requested test will be
executed before going to the next iteration.
Parameters
----------
args : Namespace
A ``Namespace`` of all settings specified by the user for the test.
bobber_version : string
A ``string`` of the local version of Bobber, such as '5.0.0'.
"""
if args.sweep:
hosts = []
for host in args.hosts.split(','):
hosts.append(host)
for iteration in range(1, args.iterations + 1):
host_string = ','.join(hosts)
kickoff_test(args, bobber_version, iteration, host_string)
else:
for iteration in range(1, args.iterations + 1):
kickoff_test(args, bobber_version, iteration, args.hosts)
|
Bobber-main
|
bobber/lib/tests/run_tests.py
|
# SPDX-License-Identifier: MIT
|
Bobber-main
|
bobber/lib/tests/__init__.py
|
# SPDX-License-Identifier: MIT
import os
import yaml
from typing import NoReturn
def create_directory(directory: str) -> NoReturn:
"""
Create a directory if it doesn't exist.
Parameters
----------
directory : string
A ``string`` of the full directory path to create if it doesn't exist.
"""
if not os.path.exists(directory):
os.makedirs(directory)
def update_log(logfile: str, contents: str) -> NoReturn:
"""
Append a log with new output from a test.
Parameters
----------
logfile : string
A ``string`` of the logfile to write data to.
contents : string
A ``string`` of the contents to append the log file with.
"""
with open(logfile, 'a') as log:
log.write(contents)
def write_file(filename: str, contents: str) -> NoReturn:
"""
Write data to a file.
Parameters
----------
filename : string
A ``string`` of the file to write data to.
contents : string
A ``string`` of the contents to write to the file.
"""
with open(filename, 'w') as fp:
fp.write(contents)
def read_yaml(filename: str) -> dict:
"""
Read a YAML file and return the contents.
Parameters
----------
filename : string
A ``string`` of the full file path to read.
Returns
-------
dict
Returns a ``dict`` representing the entire contents of the file.
"""
with open(filename, 'r') as handler:
return yaml.safe_load(handler)
|
Bobber-main
|
bobber/lib/system/file_handler.py
|
# SPDX-License-Identifier: MIT
|
Bobber-main
|
bobber/lib/system/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_TENSORRT_VERSION = '5'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16, 17, 18]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
_TF_MIN_BAZEL_VERSION = '0.24.1'
_TF_MAX_BAZEL_VERSION = '0.26.1'
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
# List of files to configure when building Bazel on Apple platforms.
APPLE_BAZEL_FILES = [
'tensorflow/lite/experimental/ios/BUILD',
'tensorflow/lite/experimental/objc/BUILD',
'tensorflow/lite/experimental/swift/BUILD'
]
# List of files to move when building for iOS.
IOS_FILES = []
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
if python_major_version == '2':
write_to_bazelrc('build --host_force_python=PY2')
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version (must exist!).
max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build --copt=%s' % opt)
# It should be safe on the same build host.
# if not is_ppc64le() and not is_windows():
# write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' %
(android_ndk_home_path, ndk_version, _SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='18', # 18 is required for GPU acceleration.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_version(environ_cp):
"""Set TF_TENSORRT_VERSION."""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
ask_tensorrt_version = (
'Please specify the TensorRT version you want to use. '
'[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
tf_tensorrt_version = get_from_env_or_user_or_default(
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
_DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
def set_tf_nccl_version(environ_cp):
"""Set TF_NCCL_VERSION."""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platform.')
if 'TF_NCCL_VERSION' in environ_cp:
return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
'[Leave empty to use http://github.com/nvidia/nccl]: ')
tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
'TF_NCCL_VERSION',
ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'CUDA compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size, and that '
'TensorFlow only supports compute '
'capabilities >= 3.5 [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
(os.path.exists(os.path.join(mpi_home, 'lib')) or
os.path.exists(os.path.join(mpi_home, 'lib64')) or
os.path.exists(os.path.join(mpi_home, 'lib32'))))
if not exists:
print(
'Invalid path to the MPI Toolkit. %s or %s or %s or %s cannot be found'
% (os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib')),
os.path.exists(os.path.join(mpi_home, 'lib64')),
os.path.exists(os.path.join(mpi_home, 'lib32'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI = False',
'MPI_LIB_IS_OPENMPI = True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI = True',
'MPI_LIB_IS_OPENMPI = False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib64/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib64/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib32/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib32/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError(
'Cannot find the MPI library file in %s/lib or %s/lib64 or %s/lib32' %
(mpi_home, mpi_home, mpi_home))
def system_specific_test_config(env):
"""Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --flaky_test_attempts=3')
write_to_bazelrc('test --test_size_filters=small,medium')
write_to_bazelrc(
'test --test_tag_filters=-benchmark-test,-no_oss,-oss_serial')
write_to_bazelrc('test --build_tag_filters=-benchmark-test,-no_oss')
if is_windows():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc(
'test --test_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
write_to_bazelrc(
'test --build_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
else:
write_to_bazelrc('test --test_tag_filters=-no_windows,-gpu')
write_to_bazelrc('test --build_tag_filters=-no_windows,-gpu')
elif is_macos():
write_to_bazelrc('test --test_tag_filters=-gpu,-nomac,-no_mac')
write_to_bazelrc('test --build_tag_filters=-gpu,-nomac,-no_mac')
elif is_linux():
if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc('test --test_tag_filters=-no_gpu')
write_to_bazelrc('test --build_tag_filters=-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
else:
write_to_bazelrc('test --test_tag_filters=-gpu')
write_to_bazelrc('test --build_tag_filters=-gpu')
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Fix winsock2.h conflicts
write_to_bazelrc(
'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN '
'--copt=-DNOGDI --host_copt=-DNOGDI')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def configure_ios():
"""Configures TensorFlow for iOS builds.
This function will only be executed if `is_macos()` is true.
"""
if not is_macos():
return
for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
symlink_force(existing_filepath, renamed_filepath)
for filepath in IOS_FILES:
filename = os.path.basename(filepath)
new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
symlink_force(filepath, new_filepath)
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
current_bazel_version = check_bazel_version(_TF_MIN_BAZEL_VERSION,
_TF_MAX_BAZEL_VERSION)
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
else:
environ_cp['TF_CONFIGURE_IOS'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
xla_enabled_by_default = is_linux() or is_macos()
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
xla_enabled_by_default, 'xla')
set_action_env_var(
environ_cp,
'TF_NEED_OPENCL_SYCL',
'OpenCL SYCL',
False,
bazel_config_name='sycl')
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(
environ_cp, 'TF_NEED_ROCM', 'ROCm', False, bazel_config_name='rocm')
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
write_action_env_to_bazelrc("TF_USE_CCACHE",
environ_cp.get('TF_USE_CCACHE', '0'))
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_action_env_var(
environ_cp,
'TF_NEED_TENSORRT',
'TensorRT',
False,
bazel_config_name='tensorrt')
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUBLAS_VERSION',
'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION',
'TF_NCCL_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
'NCCL_INSTALL_PATH',
'NCCL_HDR_PATH',
'TENSORRT_INSTALL_PATH'
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
system_specific_test_config(os.environ)
set_action_env_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False)
if environ_cp.get('TF_CONFIGURE_IOS') == '1':
configure_ios()
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
config_info_line('numa', 'Build with NUMA support.')
config_info_line(
'dynamic_kernels',
'(Experimental) Build kernels into separate shared objects.')
config_info_line('v2', 'Build TensorFlow 2.x instead of 1.x.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('noignite', 'Disable Apache Ignite support.')
config_info_line('nokafka', 'Disable Apache Kafka support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('noignite', 'Disable Apacha Ignite support.')
config_info_line('nokafka', 'Disable Apache Kafka support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
if __name__ == '__main__':
main()
|
tensorflow-r1.15.5-nv23.03
|
configure.py
|
tensorflow-r1.15.5-nv23.03
|
third_party/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
third_party/llvm/expand_cmake_vars.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prints CUDA library and header directories and versions found on the system.
The script searches for CUDA library and header files on the system, inspects
them to determine their version and prints the configuration to stdout.
The paths to inspect and the required versions are specified through environment
variables. If no valid configuration is found, the script prints to stderr and
returns an error code.
The list of libraries to find is specified as arguments. Supported libraries are
CUDA (includes cuBLAS), cuDNN, NCCL, and TensorRT.
The script takes a list of base directories specified by the TF_CUDA_PATHS
environment variable as comma-separated glob list. The script looks for headers
and library files in a hard-coded set of subdirectories from these base paths.
If TF_CUDA_PATHS is not specified, a OS specific default is used:
Linux: /usr/local/cuda, /usr, and paths from 'ldconfig -p'.
Windows: CUDA_PATH environment variable, or
C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\*
For backwards compatibility, some libraries also use alternative base
directories from other environment variables if they are specified. List of
library-specific environment variables:
Library Version env variable Additional base directories
----------------------------------------------------------------
CUDA TF_CUDA_VERSION CUDA_TOOLKIT_PATH
cuBLAS TF_CUBLAS_VERSION CUDA_TOOLKIT_PATH
cuDNN TF_CUDNN_VERSION CUDNN_INSTALL_PATH
NCCL TF_NCCL_VERSION NCCL_INSTALL_PATH, NCCL_HDR_PATH
TensorRT TF_TENSORRT_VERSION TENSORRT_INSTALL_PATH
Versions environment variables can be of the form 'x' or 'x.y' to request a
specific version, empty or unspecified to accept any version.
The output of a found library is of the form:
tf_<library>_version: x.y.z
tf_<library>_header_dir: ...
tf_<library>_library_dir: ...
"""
import io
import os
import glob
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
class ConfigError(Exception):
pass
def _is_aarch64():
return platform.machine() == "aarch64"
def _is_linux():
return platform.system() == "Linux"
def _is_windows():
return platform.system() == "Windows"
def _is_macos():
return platform.system() == "Darwin"
def _matches_version(actual_version, required_version):
"""Checks whether some version meets the requirements.
All elements of the required_version need to be present in the
actual_version.
required_version actual_version result
-----------------------------------------
1 1.1 True
1.2 1 False
1.2 1.3 False
1 True
Args:
required_version: The version specified by the user.
actual_version: The version detected from the CUDA installation.
Returns: Whether the actual version matches the required one.
"""
if actual_version is None:
return False
# Strip spaces from the versions.
actual_version = actual_version.strip()
required_version = required_version.strip()
return actual_version.startswith(required_version)
def _at_least_version(actual_version, required_version):
actual = [int(v) for v in actual_version.split(".")]
required = [int(v) for v in required_version.split(".")]
return actual >= required
def _get_header_version(path, name):
"""Returns preprocessor defines in C header file."""
for line in io.open(path, "r", encoding="utf-8").readlines():
match = re.match("\s*#\s*define %s\s+(\d+)" % name, line)
if match:
return match.group(1)
return ""
def _cartesian_product(first, second):
"""Returns all path combinations of first and second."""
return [os.path.join(f, s) for f in first for s in second]
def _get_ld_config_paths():
"""Returns all directories from 'ldconfig -p'."""
if not _is_linux():
return []
ldconfig_path = which("ldconfig") or "/sbin/ldconfig"
output = subprocess.check_output([ldconfig_path, "-p"])
pattern = re.compile(".* => (.*)")
result = set()
for line in output.splitlines():
try:
match = pattern.match(line.decode("ascii"))
except UnicodeDecodeError:
match = False
if match:
result.add(os.path.dirname(match.group(1)))
return sorted(list(result))
def _get_default_cuda_paths(cuda_version):
if not cuda_version:
cuda_version = "*"
elif not "." in cuda_version:
cuda_version = cuda_version + ".*"
if _is_windows():
return [
os.environ.get(
"CUDA_PATH",
"C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v%s\\" %
cuda_version)
]
return ["/usr/local/cuda-%s" % cuda_version, "/usr/local/cuda", "/usr"
] + _get_ld_config_paths()
def _header_paths():
"""Returns hard-coded set of relative paths to look for header files."""
return [
"",
"include",
"include/cuda",
"include/*-linux-gnu",
"extras/CUPTI/include",
"include/cuda/CUPTI",
]
def _library_paths():
"""Returns hard-coded set of relative paths to look for library files."""
return [
"",
"lib64",
"lib",
"lib/*-linux-gnu",
"lib/x64",
"extras/CUPTI/*",
]
def _not_found_error(base_paths, relative_paths, filepattern):
base_paths = "".join(["\n '%s'" % path for path in sorted(base_paths)])
relative_paths = "".join(["\n '%s'" % path for path in relative_paths])
return ConfigError(
"Could not find any %s in any subdirectory:%s\nof:%s\n" %
(filepattern, relative_paths, base_paths))
def _find_file(base_paths, relative_paths, filepattern):
for path in _cartesian_product(base_paths, relative_paths):
for file in glob.glob(os.path.join(path, filepattern)):
return file
raise _not_found_error(base_paths, relative_paths, filepattern)
def _find_library(base_paths, library_name, required_version):
"""Returns first valid path to the requested library."""
if _is_windows():
filepattern = library_name + ".lib"
elif _is_macos():
filepattern = "%s*.dylib" % (".".join(["lib" + library_name] +
required_version.split(".")[:1]))
else:
filepattern = ".".join(["lib" + library_name, "so"] +
required_version.split(".")[:1]) + "*"
return _find_file(base_paths, _library_paths(), filepattern)
def _find_versioned_file(base_paths, relative_paths, filepattern,
required_version, get_version):
"""Returns first valid path to a file that matches the requested version."""
if type(filepattern) not in [list, tuple]:
filepattern = [filepattern]
for path in _cartesian_product(base_paths, relative_paths):
for pattern in filepattern:
for file in glob.glob(os.path.join(path, pattern)):
actual_version = get_version(file)
if _matches_version(actual_version, required_version):
return file, actual_version
if type(filepattern) not in [list, tuple]:
raise _not_found_error(
base_paths, relative_paths,
filepattern + " matching version '%s'" % required_version)
else:
raise _not_found_error(
base_paths, relative_paths,
filepattern[0] + " matching version '%s'" % required_version)
def _find_header(base_paths, header_name, required_version, get_version):
"""Returns first valid path to a header that matches the requested version."""
return _find_versioned_file(base_paths, _header_paths(), header_name,
required_version, get_version)
def _find_cuda_config(base_paths, required_version):
def get_header_version(path):
version = int(_get_header_version(path, "CUDA_VERSION"))
if not version:
return None
return "%d.%d" % (version // 1000, version % 1000 // 10)
cuda_header_path, header_version = _find_header(base_paths, "cuda.h",
required_version,
get_header_version)
cuda_version = header_version # x.y, see above.
cuda_library_path = _find_library(base_paths, "cudart", cuda_version)
def get_nvcc_version(path):
pattern = "Cuda compilation tools, release \d+\.\d+, V(\d+\.\d+\.\d+)"
for line in subprocess.check_output([path, "--version"]).splitlines():
match = re.match(pattern, line.decode("ascii"))
if match:
return match.group(1)
return None
nvcc_name = "nvcc.exe" if _is_windows() else "nvcc"
nvcc_path, nvcc_version = _find_versioned_file(base_paths, [
"",
"bin",
], nvcc_name, cuda_version, get_nvcc_version)
nvvm_path = _find_file(base_paths, [
"nvvm/libdevice",
"share/cuda",
"lib/nvidia-cuda-toolkit/libdevice",
], "libdevice*.10.bc")
cupti_header_path = _find_file(base_paths, _header_paths(), "cupti.h")
cupti_library_path = _find_library(base_paths, "cupti", required_version)
cuda_binary_dir = os.path.dirname(nvcc_path)
nvvm_library_dir = os.path.dirname(nvvm_path)
# XLA requires the toolkit path to find ptxas and libdevice.
# TODO(csigg): pass in both directories instead.
cuda_toolkit_paths = (
os.path.normpath(os.path.join(cuda_binary_dir, "..")),
os.path.normpath(os.path.join(nvvm_library_dir, "../..")),
)
if cuda_toolkit_paths[0] != cuda_toolkit_paths[1]:
raise ConfigError("Inconsistent CUDA toolkit path: %s vs %s" %
cuda_toolkit_paths)
return {
"cuda_version": cuda_version,
"cuda_include_dir": os.path.dirname(cuda_header_path),
"cuda_library_dir": os.path.dirname(cuda_library_path),
"cuda_binary_dir": cuda_binary_dir,
"nvvm_library_dir": nvvm_library_dir,
"cupti_include_dir": os.path.dirname(cupti_header_path),
"cupti_library_dir": os.path.dirname(cupti_library_path),
"cuda_toolkit_path": cuda_toolkit_paths[0],
}
def _find_cublas_config(base_paths, required_version, cuda_version):
if _at_least_version(cuda_version, "10.1"):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUBLAS_VER_MAJOR", "CUBLAS_VER_MINOR",
"CUBLAS_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cublas_api.h",
required_version,
get_header_version)
# cuBLAS uses the major version only.
cublas_version = header_version.split(".")[0]
else:
# There is no version info available before CUDA 10.1, just find the file.
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "cublas_api.h")
# cuBLAS version is the same as CUDA version (x.y).
cublas_version = required_version
library_path = _find_library(base_paths, "cublas", cublas_version)
return {
"cublas_version": header_version,
"cublas_include_dir": os.path.dirname(header_path),
"cublas_library_dir": os.path.dirname(library_path),
}
def _find_cusolver_config(base_paths, required_version, cuda_version):
if (_at_least_version(cuda_version, "11.0") or (_is_aarch64 and _at_least_version(cuda_version, "10.2"))):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUSOLVER_VER_MAJOR", "CUSOLVER_VER_MINOR",
"CUSOLVER_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cusolver_common.h",
required_version,
get_header_version)
cusolver_version = header_version.split(".")[0]
else:
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "cusolver_common.h")
cusolver_version = required_version
library_path = _find_library(base_paths, "cusolver", cusolver_version)
return {
"cusolver_version": header_version,
"cusolver_include_dir": os.path.dirname(header_path),
"cusolver_library_dir": os.path.dirname(library_path),
}
def _find_curand_config(base_paths, required_version, cuda_version):
if (_at_least_version(cuda_version, "11.0") or (_is_aarch64 and _at_least_version(cuda_version, "10.2"))):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CURAND_VER_MAJOR", "CURAND_VER_MINOR",
"CURAND_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "curand.h",
required_version,
get_header_version)
curand_version = header_version.split(".")[0]
else:
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "curand.h")
curand_version = required_version
library_path = _find_library(base_paths, "curand", curand_version)
return {
"curand_version": header_version,
"curand_include_dir": os.path.dirname(header_path),
"curand_library_dir": os.path.dirname(library_path),
}
def _find_cufft_config(base_paths, required_version, cuda_version):
if (_at_least_version(cuda_version, "11.0") or (_is_aarch64 and _at_least_version(cuda_version, "10.2"))):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUFFT_VER_MAJOR", "CUFFT_VER_MINOR",
"CUFFT_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cufft.h",
required_version,
get_header_version)
cufft_version = header_version.split(".")[0]
else:
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "cufft.h")
cufft_version = required_version
library_path = _find_library(base_paths, "cufft", cufft_version)
return {
"cufft_version": header_version,
"cufft_include_dir": os.path.dirname(header_path),
"cufft_library_dir": os.path.dirname(library_path),
}
def _find_cudnn_config(base_paths, required_version):
def get_header_version(path):
version = [
_get_header_version(path, name)
for name in ("CUDNN_MAJOR", "CUDNN_MINOR", "CUDNN_PATCHLEVEL")]
return ".".join(version) if version[0] else None
header_path, header_version = _find_header(base_paths,
("cudnn.h", "cudnn_version.h"),
required_version,
get_header_version)
cudnn_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "cudnn", cudnn_version)
return {
"cudnn_version": cudnn_version,
"cudnn_include_dir": os.path.dirname(header_path),
"cudnn_library_dir": os.path.dirname(library_path),
}
def _find_cusparse_config(base_paths, required_version, cuda_version):
if (_at_least_version(cuda_version, "11.0") or (_is_aarch64 and _at_least_version(cuda_version, "10.2"))):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("CUSPARSE_VER_MAJOR", "CUSPARSE_VER_MINOR",
"CUSPARSE_VER_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "cusparse.h",
required_version,
get_header_version)
cusparse_version = header_version.split(".")[0]
else:
header_version = cuda_version
header_path = _find_file(base_paths, _header_paths(), "cusparse.h")
cusparse_version = required_version
library_path = _find_library(base_paths, "cusparse", cusparse_version)
return {
"cusparse_version": header_version,
"cusparse_include_dir": os.path.dirname(header_path),
"cusparse_library_dir": os.path.dirname(library_path),
}
def _find_nccl_config(base_paths, required_version):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("NCCL_MAJOR", "NCCL_MINOR", "NCCL_PATCH"))
return ".".join(version)
header_path, header_version = _find_header(base_paths, "nccl.h",
required_version,
get_header_version)
nccl_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "nccl", nccl_version)
return {
"nccl_version": nccl_version,
"nccl_include_dir": os.path.dirname(header_path),
"nccl_library_dir": os.path.dirname(library_path),
}
def _find_tensorrt_config(base_paths, required_version):
def get_header_version(path):
version = (
_get_header_version(path, name)
for name in ("NV_TENSORRT_MAJOR", "NV_TENSORRT_MINOR",
"NV_TENSORRT_PATCH"))
# `version` is a generator object, so we convert it to a list before using
# it (muitiple times below).
version = list(version)
if not all(version):
return None # Versions not found, make _matches_version returns False.
return ".".join(version)
try:
header_path, header_version = _find_header(base_paths, "NvInfer.h",
required_version,
get_header_version)
except ConfigError:
# TensorRT 6 moved the version information to NvInferVersion.h.
header_path, header_version = _find_header(base_paths, "NvInferVersion.h",
required_version,
get_header_version)
tensorrt_version = header_version.split(".")[0]
library_path = _find_library(base_paths, "nvinfer", tensorrt_version)
return {
"tensorrt_version": tensorrt_version,
"tensorrt_include_dir": os.path.dirname(header_path),
"tensorrt_library_dir": os.path.dirname(library_path),
}
def _list_from_env(env_name, default=[]):
"""Returns comma-separated list from environment variable."""
if env_name in os.environ:
return os.environ[env_name].split(",")
return default
def _get_legacy_path(env_name, default=[]):
"""Returns a path specified by a legacy environment variable.
CUDNN_INSTALL_PATH, NCCL_INSTALL_PATH, TENSORRT_INSTALL_PATH set to
'/usr/lib/x86_64-linux-gnu' would previously find both library and header
paths. Detect those and return '/usr', otherwise forward to _list_from_env().
"""
if env_name in os.environ:
match = re.match("^(/[^/ ]*)+/lib/\w+-linux-gnu/?$", os.environ[env_name])
if match:
return [match.group(1)]
return _list_from_env(env_name, default)
def _normalize_path(path):
"""Returns normalized path, with forward slashes on Windows."""
path = os.path.normpath(path)
if _is_windows():
path = path.replace("\\", "/")
return path
def find_cuda_config():
"""Returns a dictionary of CUDA library and header file paths."""
libraries = [argv.lower() for argv in sys.argv[1:]]
cuda_version = os.environ.get("TF_CUDA_VERSION", "")
base_paths = _list_from_env("TF_CUDA_PATHS",
_get_default_cuda_paths(cuda_version))
base_paths = [path for path in base_paths if os.path.exists(path)]
result = {}
if "cuda" in libraries:
cuda_paths = _list_from_env("CUDA_TOOLKIT_PATH", base_paths)
result.update(_find_cuda_config(cuda_paths, cuda_version))
cuda_version = result["cuda_version"]
cublas_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (10, 1):
# Before CUDA 10.1, cuBLAS was in the same directory as the toolkit.
cublas_paths = cuda_paths
cublas_version = os.environ.get("TF_CUBLAS_VERSION", "")
result.update(
_find_cublas_config(cublas_paths, cublas_version, cuda_version))
cusolver_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (11, 0):
cusolver_paths = cuda_paths
cusolver_version = os.environ.get("TF_CUSOLVER_VERSION", "")
result.update(
_find_cusolver_config(cusolver_paths, cusolver_version, cuda_version))
curand_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (11, 0):
curand_paths = cuda_paths
curand_version = os.environ.get("TF_CURAND_VERSION", "")
result.update(
_find_curand_config(curand_paths, curand_version, cuda_version))
cufft_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (11, 0):
cufft_paths = cuda_paths
cufft_version = os.environ.get("TF_CUFFT_VERSION", "")
result.update(
_find_cufft_config(cufft_paths, cufft_version, cuda_version))
cusparse_paths = base_paths
if tuple(int(v) for v in cuda_version.split(".")) < (11, 0):
cusparse_paths = cuda_paths
cusparse_version = os.environ.get("TF_CUSPARSE_VERSION", "")
result.update(
_find_cusparse_config(cusparse_paths, cusparse_version, cuda_version))
if "cudnn" in libraries:
cudnn_paths = _get_legacy_path("CUDNN_INSTALL_PATH", base_paths)
cudnn_version = os.environ.get("TF_CUDNN_VERSION", "")
result.update(_find_cudnn_config(cudnn_paths, cudnn_version))
if "nccl" in libraries:
nccl_paths = _get_legacy_path("NCCL_INSTALL_PATH", base_paths)
nccl_version = os.environ.get("TF_NCCL_VERSION", "")
result.update(_find_nccl_config(nccl_paths, nccl_version))
if "tensorrt" in libraries:
tensorrt_paths = _get_legacy_path("TENSORRT_INSTALL_PATH", base_paths)
tensorrt_version = os.environ.get("TF_TENSORRT_VERSION", "")
result.update(_find_tensorrt_config(tensorrt_paths, tensorrt_version))
for k, v in result.items():
if k.endswith("_dir") or k.endswith("_path"):
result[k] = _normalize_path(v)
return result
def main():
try:
for key, value in sorted(find_cuda_config().items()):
print("%s: %s" % (key, value))
except ConfigError as e:
sys.stderr.write(str(e))
sys.exit(1)
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
third_party/gpus/find_cuda_config.py
|
r"""Implementation of SPINN in TensorFlow eager execution.
SPINN: Stack-Augmented Parser-Interpreter Neural Network.
Ths file contains model definition and code for training the model.
The model definition is based on PyTorch implementation at:
https://github.com/jekbradbury/examples/tree/spinn/snli
which was released under a BSD 3-Clause License at:
https://github.com/jekbradbury/examples/blob/spinn/LICENSE:
Copyright (c) 2017,
All rights reserved.
See ./LICENSE for more details.
Instructions for use:
* See `README.md` for details on how to prepare the SNLI and GloVe data.
* Suppose you have prepared the data at "/tmp/spinn-data", use the folloing
command to train the model:
```bash
python spinn.py --data_root /tmp/spinn-data --logdir /tmp/spinn-logs
```
Checkpoints and TensorBoard summaries will be written to "/tmp/spinn-logs".
References:
* Bowman, S.R., Gauthier, J., Rastogi A., Gupta, R., Manning, C.D., & Potts, C.
(2016). A Fast Unified Model for Parsing and Sentence Understanding.
https://arxiv.org/abs/1603.06021
* Bradbury, J. (2017). Recursive Neural Networks with PyTorch.
https://devblogs.nvidia.com/parallelforall/recursive-neural-networks-pytorch/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import itertools
import os
import sys
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.spinn import data
layers = tf.keras.layers
def _bundle(lstm_iter):
"""Concatenate a list of Tensors along 1st axis and split result into two.
Args:
lstm_iter: A `list` of `N` dense `Tensor`s, each of which has the shape
(R, 2 * M).
Returns:
A `list` of two dense `Tensor`s, each of which has the shape (N * R, M).
"""
return tf.split(tf.concat(lstm_iter, 0), 2, axis=1)
def _unbundle(state):
"""Concatenate a list of Tensors along 2nd axis and split result.
This is the inverse of `_bundle`.
Args:
state: A `list` of two dense `Tensor`s, each of which has the shape (R, M).
Returns:
A `list` of `R` dense `Tensors`, each of which has the shape (1, 2 * M).
"""
return tf.split(tf.concat(state, 1), state[0].shape[0], axis=0)
# pylint: disable=not-callable
class Reducer(tf.keras.Model):
"""A module that applies reduce operation on left and right vectors."""
def __init__(self, size, tracker_size=None):
super(Reducer, self).__init__()
self.left = layers.Dense(5 * size, activation=None)
self.right = layers.Dense(5 * size, activation=None, use_bias=False)
if tracker_size is not None:
self.track = layers.Dense(5 * size, activation=None, use_bias=False)
else:
self.track = None
def call(self, left_in, right_in, tracking=None):
"""Invoke forward pass of the Reduce module.
This method feeds a linear combination of `left_in`, `right_in` and
`tracking` into a Tree LSTM and returns the output of the Tree LSTM.
Args:
left_in: A list of length L. Each item is a dense `Tensor` with
the shape (1, n_dims). n_dims is the size of the embedding vector.
right_in: A list of the same length as `left_in`. Each item should have
the same shape as the items of `left_in`.
tracking: Optional list of the same length as `left_in`. Each item is a
dense `Tensor` with shape (1, tracker_size * 2). tracker_size is the
size of the Tracker's state vector.
Returns:
Output: A list of length batch_size. Each item has the shape (1, n_dims).
"""
left, right = _bundle(left_in), _bundle(right_in)
lstm_in = self.left(left[0]) + self.right(right[0])
if self.track and tracking:
lstm_in += self.track(_bundle(tracking)[0])
return _unbundle(self._tree_lstm(left[1], right[1], lstm_in))
def _tree_lstm(self, c1, c2, lstm_in):
a, i, f1, f2, o = tf.split(lstm_in, 5, axis=1)
c = tf.tanh(a) * tf.sigmoid(i) + tf.sigmoid(f1) * c1 + tf.sigmoid(f2) * c2
h = tf.sigmoid(o) * tf.tanh(c)
return h, c
class Tracker(tf.keras.Model):
"""A module that tracks the history of the sentence with an LSTM."""
def __init__(self, tracker_size, predict):
"""Constructor of Tracker.
Args:
tracker_size: Number of dimensions of the underlying `LSTMCell`.
predict: (`bool`) Whether prediction mode is enabled.
"""
super(Tracker, self).__init__()
self._rnn = tf.nn.rnn_cell.LSTMCell(tracker_size)
self._state_size = tracker_size
if predict:
self._transition = layers.Dense(4)
else:
self._transition = None
def reset_state(self):
self.state = None
def call(self, bufs, stacks):
"""Invoke the forward pass of the Tracker module.
This method feeds the concatenation of the top two elements of the stacks
into an LSTM cell and returns the resultant state of the LSTM cell.
Args:
bufs: A `list` of length batch_size. Each item is a `list` of
max_sequence_len (maximum sequence length of the batch). Each item
of the nested list is a dense `Tensor` of shape (1, d_proj), where
d_proj is the size of the word embedding vector or the size of the
vector space that the word embedding vector is projected to.
stacks: A `list` of size batch_size. Each item is a `list` of
variable length corresponding to the current height of the stack.
Each item of the nested list is a dense `Tensor` of shape (1, d_proj).
Returns:
1. A list of length batch_size. Each item is a dense `Tensor` of shape
(1, d_tracker * 2).
2. If under predict mode, result of applying a Dense layer on the
first state vector of the RNN. Else, `None`.
"""
buf = _bundle([buf[-1] for buf in bufs])[0]
stack1 = _bundle([stack[-1] for stack in stacks])[0]
stack2 = _bundle([stack[-2] for stack in stacks])[0]
x = tf.concat([buf, stack1, stack2], 1)
if self.state is None:
batch_size = int(x.shape[0])
zeros = tf.zeros((batch_size, self._state_size), dtype=tf.float32)
self.state = [zeros, zeros]
_, self.state = self._rnn(x, self.state)
unbundled = _unbundle(self.state)
if self._transition:
return unbundled, self._transition(self.state[0])
else:
return unbundled, None
class SPINN(tf.keras.Model):
"""Stack-augmented Parser-Interpreter Neural Network.
See https://arxiv.org/abs/1603.06021 for more details.
"""
def __init__(self, config):
"""Constructor of SPINN.
Args:
config: A `namedtupled` with the following attributes.
d_proj - (`int`) number of dimensions of the vector space to project the
word embeddings to.
d_tracker - (`int`) number of dimensions of the Tracker's state vector.
d_hidden - (`int`) number of the dimensions of the hidden state, for the
Reducer module.
n_mlp_layers - (`int`) number of multi-layer perceptron layers to use to
convert the output of the `Feature` module to logits.
predict - (`bool`) Whether the Tracker will enabled predictions.
"""
super(SPINN, self).__init__()
self.config = config
self.reducer = Reducer(config.d_hidden, config.d_tracker)
if config.d_tracker is not None:
self.tracker = Tracker(config.d_tracker, config.predict)
else:
self.tracker = None
def call(self, buffers, transitions, training=False):
"""Invoke the forward pass of the SPINN model.
Args:
buffers: Dense `Tensor` of shape
(max_sequence_len, batch_size, config.d_proj).
transitions: Dense `Tensor` with integer values that represent the parse
trees of the sentences. A value of 2 indicates "reduce"; a value of 3
indicates "shift". Shape: (max_sequence_len * 2 - 3, batch_size).
training: Whether the invocation is under training mode.
Returns:
Output `Tensor` of shape (batch_size, config.d_embed).
"""
max_sequence_len, batch_size, d_proj = (int(x) for x in buffers.shape)
# Split the buffers into left and right word items and put the initial
# items in a stack.
splitted = tf.split(
tf.reshape(tf.transpose(buffers, [1, 0, 2]), [-1, d_proj]),
max_sequence_len * batch_size, axis=0)
buffers = [splitted[k:k + max_sequence_len]
for k in xrange(0, len(splitted), max_sequence_len)]
stacks = [[buf[0], buf[0]] for buf in buffers]
if self.tracker:
# Reset tracker state for new batch.
self.tracker.reset_state()
num_transitions = transitions.shape[0]
# Iterate through transitions and perform the appropriate stack-pop, reduce
# and stack-push operations.
transitions = transitions.numpy()
for i in xrange(num_transitions):
trans = transitions[i]
if self.tracker:
# Invoke tracker to obtain the current tracker states for the sentences.
tracker_states, trans_hypothesis = self.tracker(buffers, stacks=stacks)
if trans_hypothesis:
trans = tf.argmax(trans_hypothesis, axis=-1)
else:
tracker_states = itertools.repeat(None)
lefts, rights, trackings = [], [], []
for transition, buf, stack, tracking in zip(
trans, buffers, stacks, tracker_states):
if int(transition) == 3: # Shift.
stack.append(buf.pop())
elif int(transition) == 2: # Reduce.
rights.append(stack.pop())
lefts.append(stack.pop())
trackings.append(tracking)
if rights:
reducer_output = self.reducer(lefts, rights, trackings)
reduced = iter(reducer_output)
for transition, stack in zip(trans, stacks):
if int(transition) == 2: # Reduce.
stack.append(next(reduced))
return _bundle([stack.pop() for stack in stacks])[0]
class Perceptron(tf.keras.Model):
"""One layer of the SNLIClassifier multi-layer perceptron."""
def __init__(self, dimension, dropout_rate, previous_layer):
"""Configure the Perceptron."""
super(Perceptron, self).__init__()
self.dense = tf.keras.layers.Dense(dimension, activation=tf.nn.elu)
self.batchnorm = layers.BatchNormalization()
self.dropout = layers.Dropout(rate=dropout_rate)
self.previous_layer = previous_layer
def call(self, x, training):
"""Run previous Perceptron layers, then this one."""
x = self.previous_layer(x, training=training)
x = self.dense(x)
x = self.batchnorm(x, training=training)
x = self.dropout(x, training=training)
return x
class SNLIClassifier(tf.keras.Model):
"""SNLI Classifier Model.
A model aimed at solving the SNLI (Standford Natural Language Inference)
task, using the SPINN model from above. For details of the task, see:
https://nlp.stanford.edu/projects/snli/
"""
def __init__(self, config, embed):
"""Constructor of SNLICLassifier.
Args:
config: A namedtuple containing required configurations for the model. It
needs to have the following attributes.
projection - (`bool`) whether the word vectors are to be projected onto
another vector space (of `d_proj` dimensions).
d_proj - (`int`) number of dimensions of the vector space to project the
word embeddings to.
embed_dropout - (`float`) dropout rate for the word embedding vectors.
n_mlp_layers - (`int`) number of multi-layer perceptron (MLP) layers to
use to convert the output of the `Feature` module to logits.
mlp_dropout - (`float`) dropout rate of the MLP layers.
d_out - (`int`) number of dimensions of the final output of the MLP
layers.
lr - (`float`) learning rate.
embed: A embedding matrix of shape (vocab_size, d_embed).
"""
super(SNLIClassifier, self).__init__()
self.config = config
self.embed = tf.constant(embed)
self.projection = layers.Dense(config.d_proj)
self.embed_bn = layers.BatchNormalization()
self.embed_dropout = layers.Dropout(rate=config.embed_dropout)
self.encoder = SPINN(config)
self.feature_bn = layers.BatchNormalization()
self.feature_dropout = layers.Dropout(rate=config.mlp_dropout)
current_mlp = lambda result, training: result
for _ in range(config.n_mlp_layers):
current_mlp = Perceptron(dimension=config.d_mlp,
dropout_rate=config.mlp_dropout,
previous_layer=current_mlp)
self.mlp = current_mlp
self.mlp_output = layers.Dense(
config.d_out,
kernel_initializer=tf.random_uniform_initializer(minval=-5e-3,
maxval=5e-3))
def call(self,
premise,
premise_transition,
hypothesis,
hypothesis_transition,
training=False):
"""Invoke the forward pass the SNLIClassifier model.
Args:
premise: The word indices of the premise sentences, with shape
(max_prem_seq_len, batch_size).
premise_transition: The transitions for the premise sentences, with shape
(max_prem_seq_len * 2 - 3, batch_size).
hypothesis: The word indices of the hypothesis sentences, with shape
(max_hypo_seq_len, batch_size).
hypothesis_transition: The transitions for the hypothesis sentences, with
shape (max_hypo_seq_len * 2 - 3, batch_size).
training: Whether the invocation is under training mode.
Returns:
The logits, as a dense `Tensor` of shape (batch_size, d_out), where d_out
is the size of the output vector.
"""
# Perform embedding lookup on the premise and hypothesis inputs, which have
# the word-index format.
premise_embed = tf.nn.embedding_lookup(self.embed, premise)
hypothesis_embed = tf.nn.embedding_lookup(self.embed, hypothesis)
if self.config.projection:
# Project the embedding vectors to another vector space.
premise_embed = self.projection(premise_embed)
hypothesis_embed = self.projection(hypothesis_embed)
# Perform batch normalization and dropout on the possibly projected word
# vectors.
premise_embed = self.embed_bn(premise_embed, training=training)
hypothesis_embed = self.embed_bn(hypothesis_embed, training=training)
premise_embed = self.embed_dropout(premise_embed, training=training)
hypothesis_embed = self.embed_dropout(hypothesis_embed, training=training)
# Run the batch-normalized and dropout-processed word vectors through the
# SPINN encoder.
premise = self.encoder(premise_embed, premise_transition,
training=training)
hypothesis = self.encoder(hypothesis_embed, hypothesis_transition,
training=training)
# Combine encoder outputs for premises and hypotheses into logits.
# Then apply batch normalization and dropuout on the logits.
logits = tf.concat(
[premise, hypothesis, premise - hypothesis, premise * hypothesis], 1)
logits = self.feature_dropout(
self.feature_bn(logits, training=training), training=training)
# Apply the multi-layer perceptron on the logits.
logits = self.mlp(logits, training=training)
logits = self.mlp_output(logits)
return logits
class SNLIClassifierTrainer(tfe.Checkpointable):
"""A class that coordinates the training of an SNLIClassifier."""
def __init__(self, snli_classifier, lr):
"""Constructor of SNLIClassifierTrainer.
Args:
snli_classifier: An instance of `SNLIClassifier`.
lr: Learning rate.
"""
self._model = snli_classifier
# Create a custom learning rate Variable for the RMSProp optimizer, because
# the learning rate needs to be manually decayed later (see
# decay_learning_rate()).
self._learning_rate = tf.Variable(lr, name="learning_rate")
self._optimizer = tf.train.RMSPropOptimizer(self._learning_rate,
epsilon=1e-6)
def loss(self, labels, logits):
"""Calculate the loss given a batch of data.
Args:
labels: The truth labels, with shape (batch_size,).
logits: The logits output from the forward pass of the SNLIClassifier
model, with shape (batch_size, d_out), where d_out is the output
dimension size of the SNLIClassifier.
Returns:
The loss value, as a scalar `Tensor`.
"""
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
def train_batch(self,
labels,
premise,
premise_transition,
hypothesis,
hypothesis_transition):
"""Train model on batch of data.
Args:
labels: The truth labels, with shape (batch_size,).
premise: The word indices of the premise sentences, with shape
(max_prem_seq_len, batch_size).
premise_transition: The transitions for the premise sentences, with shape
(max_prem_seq_len * 2 - 3, batch_size).
hypothesis: The word indices of the hypothesis sentences, with shape
(max_hypo_seq_len, batch_size).
hypothesis_transition: The transitions for the hypothesis sentences, with
shape (max_hypo_seq_len * 2 - 3, batch_size).
Returns:
1. loss value as a scalar `Tensor`.
2. logits as a dense `Tensor` of shape (batch_size, d_out), where d_out is
the output dimension size of the SNLIClassifier.
"""
with tf.GradientTape() as tape:
tape.watch(self._model.variables)
logits = self._model(premise,
premise_transition,
hypothesis,
hypothesis_transition,
training=True)
loss = self.loss(labels, logits)
gradients = tape.gradient(loss, self._model.variables)
self._optimizer.apply_gradients(zip(gradients, self._model.variables),
global_step=tf.train.get_global_step())
return loss, logits
def decay_learning_rate(self, decay_by):
"""Decay learning rate of the optimizer by factor decay_by."""
self._learning_rate.assign(self._learning_rate * decay_by)
print("Decayed learning rate of optimizer to: %s" %
self._learning_rate.numpy())
@property
def learning_rate(self):
return self._learning_rate
@property
def model(self):
return self._model
@property
def variables(self):
return (self._model.variables + [self.learning_rate] +
self._optimizer.variables())
def _batch_n_correct(logits, label):
"""Calculate number of correct predictions in a batch.
Args:
logits: A logits Tensor of shape `(batch_size, num_categories)` and dtype
`float32`.
label: A labels Tensor of shape `(batch_size,)` and dtype `int64`
Returns:
Number of correct predictions.
"""
return tf.reduce_sum(
tf.cast((tf.equal(
tf.argmax(logits, axis=1), label)), tf.float32)).numpy()
def _evaluate_on_dataset(snli_data, batch_size, trainer, use_gpu):
"""Run evaluation on a dataset.
Args:
snli_data: The `data.SnliData` to use in this evaluation.
batch_size: The batch size to use during this evaluation.
trainer: An instance of `SNLIClassifierTrainer to use for this
evaluation.
use_gpu: Whether GPU is being used.
Returns:
1. Average loss across all examples of the dataset.
2. Average accuracy rate across all examples of the dataset.
"""
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
for label, prem, prem_trans, hypo, hypo_trans in _get_dataset_iterator(
snli_data, batch_size):
if use_gpu:
label, prem, hypo = label.gpu(), prem.gpu(), hypo.gpu()
logits = trainer.model(prem, prem_trans, hypo, hypo_trans, training=False)
loss_val = trainer.loss(label, logits)
batch_size = tf.shape(label)[0]
mean_loss(loss_val, weights=batch_size.gpu() if use_gpu else batch_size)
accuracy(tf.argmax(logits, axis=1), label)
return mean_loss.result().numpy(), accuracy.result().numpy()
def _get_dataset_iterator(snli_data, batch_size):
"""Get a data iterator for a split of SNLI data.
Args:
snli_data: A `data.SnliData` object.
batch_size: The desired batch size.
Returns:
A dataset iterator.
"""
with tf.device("/device:CPU:0"):
# Some tf.data ops, such as ShuffleDataset, are available only on CPU.
dataset = tf.data.Dataset.from_generator(
snli_data.get_generator(batch_size),
(tf.int64, tf.int64, tf.int64, tf.int64, tf.int64))
dataset = dataset.shuffle(snli_data.num_batches(batch_size))
return tfe.Iterator(dataset)
def train_or_infer_spinn(embed,
word2index,
train_data,
dev_data,
test_data,
config):
"""Perform Training or Inference on a SPINN model.
Args:
embed: The embedding matrix as a float32 numpy array with shape
[vocabulary_size, word_vector_len]. word_vector_len is the length of a
word embedding vector.
word2index: A `dict` mapping word to word index.
train_data: An instance of `data.SnliData`, for the train split.
dev_data: Same as above, for the dev split.
test_data: Same as above, for the test split.
config: A configuration object. See the argument to this Python binary for
details.
Returns:
If `config.inference_premise ` and `config.inference_hypothesis` are not
`None`, i.e., inference mode: the logits for the possible labels of the
SNLI data set, as a `Tensor` of three floats.
else:
The trainer object.
Raises:
ValueError: if only one of config.inference_premise and
config.inference_hypothesis is specified.
"""
# TODO(cais): Refactor this function into separate one for training and
# inference.
use_gpu = tfe.num_gpus() > 0 and not config.force_cpu
device = "gpu:0" if use_gpu else "cpu:0"
print("Using device: %s" % device)
if ((config.inference_premise and not config.inference_hypothesis) or
(not config.inference_premise and config.inference_hypothesis)):
raise ValueError(
"--inference_premise and --inference_hypothesis must be both "
"specified or both unspecified, but only one is specified.")
if config.inference_premise:
# Inference mode.
inference_sentence_pair = [
data.encode_sentence(config.inference_premise, word2index),
data.encode_sentence(config.inference_hypothesis, word2index)]
else:
inference_sentence_pair = None
log_header = (
" Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss"
" Accuracy Dev/Accuracy")
log_template = (
"{:>6.0f} {:>5.0f} {:>9.0f} {:>5.0f}/{:<5.0f} {:>7.0f}% {:>8.6f} {} "
"{:12.4f} {}")
dev_log_template = (
"{:>6.0f} {:>5.0f} {:>9.0f} {:>5.0f}/{:<5.0f} {:>7.0f}% {:>8.6f} "
"{:8.6f} {:12.4f} {:12.4f}")
summary_writer = tf.contrib.summary.create_file_writer(
config.logdir, flush_millis=10000)
with tf.device(device), \
summary_writer.as_default(), \
tf.contrib.summary.always_record_summaries():
model = SNLIClassifier(config, embed)
global_step = tf.train.get_or_create_global_step()
trainer = SNLIClassifierTrainer(model, config.lr)
checkpoint = tf.train.Checkpoint(trainer=trainer, global_step=global_step)
checkpoint.restore(tf.train.latest_checkpoint(config.logdir))
if inference_sentence_pair:
# Inference mode.
prem, prem_trans = inference_sentence_pair[0]
hypo, hypo_trans = inference_sentence_pair[1]
hypo_trans = inference_sentence_pair[1][1]
inference_logits = model(
tf.constant(prem), tf.constant(prem_trans),
tf.constant(hypo), tf.constant(hypo_trans), training=False)
inference_logits = inference_logits[0][1:]
max_index = tf.argmax(inference_logits)
print("\nInference logits:")
for i, (label, logit) in enumerate(
zip(data.POSSIBLE_LABELS, inference_logits)):
winner_tag = " (winner)" if max_index == i else ""
print(" {0:<16}{1:.6f}{2}".format(label + ":", logit, winner_tag))
return inference_logits
train_len = train_data.num_batches(config.batch_size)
start = time.time()
iterations = 0
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
print(log_header)
for epoch in xrange(config.epochs):
batch_idx = 0
for label, prem, prem_trans, hypo, hypo_trans in _get_dataset_iterator(
train_data, config.batch_size):
if use_gpu:
label, prem, hypo = label.gpu(), prem.gpu(), hypo.gpu()
# prem_trans and hypo_trans are used for dynamic control flow and can
# remain on CPU. Same in _evaluate_on_dataset().
iterations += 1
batch_train_loss, batch_train_logits = trainer.train_batch(
label, prem, prem_trans, hypo, hypo_trans)
batch_size = tf.shape(label)[0]
mean_loss(batch_train_loss.numpy(),
weights=batch_size.gpu() if use_gpu else batch_size)
accuracy(tf.argmax(batch_train_logits, axis=1), label)
if iterations % config.save_every == 0:
checkpoint.save(os.path.join(config.logdir, "ckpt"))
if iterations % config.dev_every == 0:
dev_loss, dev_frac_correct = _evaluate_on_dataset(
dev_data, config.batch_size, trainer, use_gpu)
print(dev_log_template.format(
time.time() - start,
epoch, iterations, 1 + batch_idx, train_len,
100.0 * (1 + batch_idx) / train_len,
mean_loss.result(), dev_loss,
accuracy.result() * 100.0, dev_frac_correct * 100.0))
tf.contrib.summary.scalar("dev/loss", dev_loss)
tf.contrib.summary.scalar("dev/accuracy", dev_frac_correct)
elif iterations % config.log_every == 0:
mean_loss_val = mean_loss.result()
accuracy_val = accuracy.result()
print(log_template.format(
time.time() - start,
epoch, iterations, 1 + batch_idx, train_len,
100.0 * (1 + batch_idx) / train_len,
mean_loss_val, " " * 8, accuracy_val * 100.0, " " * 12))
tf.contrib.summary.scalar("train/loss", mean_loss_val)
tf.contrib.summary.scalar("train/accuracy", accuracy_val)
# Reset metrics.
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
batch_idx += 1
if (epoch + 1) % config.lr_decay_every == 0:
trainer.decay_learning_rate(config.lr_decay_by)
test_loss, test_frac_correct = _evaluate_on_dataset(
test_data, config.batch_size, trainer, use_gpu)
print("Final test loss: %g; accuracy: %g%%" %
(test_loss, test_frac_correct * 100.0))
return trainer
def main(_):
config = FLAGS
# Load embedding vectors.
vocab = data.load_vocabulary(FLAGS.data_root)
word2index, embed = data.load_word_vectors(FLAGS.data_root, vocab)
if not (config.inference_premise or config.inference_hypothesis):
print("Loading train, dev and test data...")
train_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_train.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
dev_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_dev.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
test_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_test.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
else:
train_data = None
dev_data = None
test_data = None
train_or_infer_spinn(
embed, word2index, train_data, dev_data, test_data, config)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"TensorFlow eager implementation of the SPINN SNLI classifier.")
parser.add_argument("--data_root", type=str, default="/tmp/spinn-data",
help="Root directory in which the training data and "
"embedding matrix are found. See README.md for how to "
"generate such a directory.")
parser.add_argument("--sentence_len_limit", type=int, default=-1,
help="Maximum allowed sentence length (# of words). "
"The default of -1 means unlimited.")
parser.add_argument("--logdir", type=str, default="/tmp/spinn-logs",
help="Directory in which summaries will be written for "
"TensorBoard.")
parser.add_argument("--inference_premise", type=str, default=None,
help="Premise sentence for inference. Must be "
"accompanied by --inference_hypothesis. If specified, "
"will override all training parameters and perform "
"inference.")
parser.add_argument("--inference_hypothesis", type=str, default=None,
help="Hypothesis sentence for inference. Must be "
"accompanied by --inference_premise. If specified, will "
"override all training parameters and perform inference.")
parser.add_argument("--epochs", type=int, default=50,
help="Number of epochs to train.")
parser.add_argument("--batch_size", type=int, default=128,
help="Batch size to use during training.")
parser.add_argument("--d_proj", type=int, default=600,
help="Dimensions to project the word embedding vectors "
"to.")
parser.add_argument("--d_hidden", type=int, default=300,
help="Size of the hidden layer of the Tracker.")
parser.add_argument("--d_out", type=int, default=4,
help="Output dimensions of the SNLIClassifier.")
parser.add_argument("--d_mlp", type=int, default=1024,
help="Size of each layer of the multi-layer perceptron "
"of the SNLICLassifier.")
parser.add_argument("--n_mlp_layers", type=int, default=2,
help="Number of layers in the multi-layer perceptron "
"of the SNLICLassifier.")
parser.add_argument("--d_tracker", type=int, default=64,
help="Size of the tracker LSTM.")
parser.add_argument("--log_every", type=int, default=50,
help="Print log and write TensorBoard summary every _ "
"training batches.")
parser.add_argument("--lr", type=float, default=2e-3,
help="Initial learning rate.")
parser.add_argument("--lr_decay_by", type=float, default=0.75,
help="The ratio to multiply the learning rate by every "
"time the learning rate is decayed.")
parser.add_argument("--lr_decay_every", type=float, default=1,
help="Decay the learning rate every _ epoch(s).")
parser.add_argument("--dev_every", type=int, default=1000,
help="Run evaluation on the dev split every _ training "
"batches.")
parser.add_argument("--save_every", type=int, default=1000,
help="Save checkpoint every _ training batches.")
parser.add_argument("--embed_dropout", type=float, default=0.08,
help="Word embedding dropout rate.")
parser.add_argument("--mlp_dropout", type=float, default=0.07,
help="SNLIClassifier multi-layer perceptron dropout "
"rate.")
parser.add_argument("--no-projection", action="store_false",
dest="projection",
help="Whether word embedding vectors are projected to "
"another set of vectors (see d_proj).")
parser.add_argument("--predict_transitions", action="store_true",
dest="predict",
help="Whether the Tracker will perform prediction.")
parser.add_argument("--force_cpu", action="store_true", dest="force_cpu",
help="Force use CPU-only regardless of whether a GPU is "
"available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-r1.15.5-nv23.03
|
third_party/examples/eager/spinn/spinn.py
|
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/dt7/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/dt7/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc'
NVCC_VERSION = '10.0'
NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir"
DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,6.0'
# Taken from environment variable for supported TF CUDA Compute Capabilities
# eg. export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0
supported_cuda_compute_capabilities = os.environ.get(
'TF_CUDA_COMPUTE_CAPABILITIES',
DEFAULT_CUDA_COMPUTE_CAPABILITIES).split(',')
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ("7.0",):
return nvcc_options
update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
return [ update_options[opt] if opt in update_options else opt
for opt in nvcc_options ]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')
out_file = [ f for f in argv if f.startswith('/Fo') ]
if len(out_file) != 1:
raise Error('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ["-I " + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
m_options = ["-m64"]
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
capability, capability, capability)]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log: Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log: Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [flag for flag in sys.argv[1:]
if not flag.startswith(('--cuda_log'))
and not flag.startswith(('-nvcc_options'))]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
|
tensorflow-r1.15.5-nv23.03
|
third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py
|
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc'
NVCC_VERSION = '10.0'
NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir"
supported_cuda_compute_capabilities = [ "3.0", "6.0" ]
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ("7.0",):
return nvcc_options
update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
return [ update_options[opt] if opt in update_options else opt
for opt in nvcc_options ]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')
out_file = [ f for f in argv if f.startswith('/Fo') ]
if len(out_file) != 1:
raise Error('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ["-I " + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
m_options = ["-m64"]
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
capability, capability, capability)]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log: Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log: Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [flag for flag in sys.argv[1:]
if not flag.startswith(('--cuda_log'))
and not flag.startswith(('-nvcc_options'))]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
|
tensorflow-r1.15.5-nv23.03
|
third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py
|
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of
//third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/opt/rh/devtoolset-7/root/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/opt/rh/devtoolset-7/root/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.1/bin/nvcc'
NVCC_VERSION = '10.1'
NVCC_TEMP_DIR = 'C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir'
supported_cuda_compute_capabilities = ['3.0', '6.0']
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ('7.0',):
return nvcc_options
update_options = {'relaxed-constexpr': 'expt-relaxed-constexpr'}
return [
update_options[opt] if opt in update_options else opt
for opt in nvcc_options
]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [f for f in argv if re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0:
raise RuntimeError('No source files found for cuda compilation.')
out_file = [f for f in argv if f.startswith('/Fo')]
if len(out_file) != 1:
raise RuntimeError('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ['-I ' + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [
option for option in argv if option not in (src_files + out_file)
]
m_options = ['-m64']
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [
r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' %
(capability, capability, capability)
]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + ' '.join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(
cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log:
Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log:
Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [
flag for flag in sys.argv[1:] if not flag.startswith(('--cuda_log')) and
not flag.startswith(('-nvcc_options'))
]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
|
tensorflow-r1.15.5-nv23.03
|
third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.1/windows/msvc_wrapper_for_nvcc.py
|
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/opt/rh/devtoolset-7/root/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/opt/rh/devtoolset-7/root/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc'
NVCC_VERSION = '10.0'
NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir"
supported_cuda_compute_capabilities = [ "3.0", "6.0" ]
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ("7.0",):
return nvcc_options
update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
return [ update_options[opt] if opt in update_options else opt
for opt in nvcc_options ]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')
out_file = [ f for f in argv if f.startswith('/Fo') ]
if len(out_file) != 1:
raise Error('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ["-I " + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
m_options = ["-m64"]
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
capability, capability, capability)]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log: Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log: Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [flag for flag in sys.argv[1:]
if not flag.startswith(('--cuda_log'))
and not flag.startswith(('-nvcc_options'))]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
|
tensorflow-r1.15.5-nv23.03
|
third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import os as _os
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
# pylint: disable=g-bad-import-order
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
# Hook external TensorFlow modules.
_current_module = _sys.modules[__name__]
try:
from tensorflow_estimator.python.estimator.api._v1 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
setattr(_current_module, "estimator", estimator)
except ImportError:
pass
try:
from tensorflow.python.keras.api._v1 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
_current_module.app.flags = flags # pylint: disable=undefined-variable
setattr(_current_module, "flags", flags)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compat_template_v1.__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Top-level module of TensorFlow. By convention, we refer to this module as
`tf` instead of `tensorflow`, following the common practice of importing
TensorFlow via the command `import tensorflow as tf`.
The primary function of this module is to import all of the public TensorFlow
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
Note that the file `__init__.py` in the TensorFlow source code tree is actually
only a placeholder to enable test cases to run. The TensorFlow build replaces
this file with a file generated from [`api_template.__init__.py`](https://www.github.com/tensorflow/tensorflow/blob/master/tensorflow/api_template.__init__.py)
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
import logging as _logging
import os as _os
import site as _site
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = _sys.modules[__name__].bitwise
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
# Import compat before trying to import summary from tensorboard, so that
# reexport_tf_summary can get compat from sys.modules
_current_module.compat.v2.compat.v1 = _current_module.compat.v1
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
setattr(_current_module, "summary", summary)
except ImportError:
_logging.warning(
"Limited tf.summary API due to missing TensorBoard installation.")
try:
from tensorflow_estimator.python.estimator.api._v2 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
setattr(_current_module, "estimator", estimator)
except ImportError:
pass
try:
from tensorflow.python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
# Enable TF2 behaviors
from tensorflow.python.compat import v2_compat as _compat # pylint: disable=g-import-not-at-top
_compat.enable_v2_behavior()
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
_site_packages_dirs += [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
for s in _site_packages_dirs:
# TODO(gunan): Add sanity checks to loaded modules here.
plugin_dir = _os.path.join(s, 'tensorflow-plugins')
if _fi.file_exists(plugin_dir):
_ll.load_library(plugin_dir)
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
try:
del core
except NameError:
pass
try:
del compiler
except NameError:
pass
# pylint: enable=undefined-variable
# Add module aliases
if hasattr(_current_module, 'keras'):
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
# pylint: enable=undefined-variable
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/api_template.__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import logging as _logging
import os as _os
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
# pylint: disable=g-bad-import-order
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
# Hook external TensorFlow modules.
_current_module = _sys.modules[__name__]
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
# Make sure we get the correct summary module with lazy loading
setattr(_current_module, "summary", summary)
except ImportError:
_logging.warning(
"Limited tf.compat.v2.summary API due to missing TensorBoard "
"installation.")
try:
from tensorflow_estimator.python.estimator.api._v2 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
setattr(_current_module, "estimator", estimator)
except ImportError:
pass
try:
from tensorflow.python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
# We would like the following to work for fully enabling 2.0 in a 1.0 install:
#
# import tensorflow.compat.v2 as tf
# tf.enable_v2_behavior()
#
# This make this one symbol available directly.
from tensorflow.python.compat.v2_compat import enable_v2_behavior # pylint: disable=g-import-not-at-top
setattr(_current_module, "enable_v2_behavior", enable_v2_behavior)
# Add module aliases
if hasattr(_current_module, 'keras'):
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/compat_template.__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Bring in all of the public TensorFlow interface into this
# module.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python.util.lazy_loader import LazyLoader
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib')
del LazyLoader
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
from tensorflow.python.platform import app # pylint: disable=g-import-not-at-top
app.flags = flags
del absolute_import
del division
del print_function
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
del python
del core
# pylint: enable=undefined-variable
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/__init__.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# LINT.IfChange
"""TensorFlow root package"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import sys as _sys
import importlib as _importlib
import types as _types
# Since TensorFlow Python code now resides in tensorflow_core but TensorFlow
# ecosystem code (e.g. estimator, but also even tensorflow) imports tensorflow
# we need to do forwarding between the two. To do so, we use a lazy loader to
# load and forward the top level modules. We cannot use the LazyLoader defined
# by tensorflow at tensorflow/python/util/lazy_loader.py as to use that we would
# already need to import tensorflow. Hence, we define it inline.
class _LazyLoader(_types.ModuleType):
"""Lazily import a module so that we can forward it."""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(_LazyLoader, self).__init__(name)
def _load(self):
"""Import the target module and insert it into the parent's namespace."""
module = _importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
def __reduce__(self):
return __import__, (self.__name__,)
# Forwarding a module is as simple as lazy loading the module from the new path
# and then registering it to sys.modules using the old path
def _forward_module(old_name):
parts = old_name.split(".")
parts[0] = parts[0] + "_core"
local_name = parts[-1]
existing_name = ".".join(parts)
_module = _LazyLoader(local_name, globals(), existing_name)
return _sys.modules.setdefault(old_name, _module)
# This list should contain all modules _immediately_ under tensorflow
_top_level_modules = [
"tensorflow._api",
"tensorflow.python",
"tensorflow.tools",
"tensorflow.core",
"tensorflow.compiler",
"tensorflow.keras",
"tensorflow.compat",
"tensorflow.summary", # tensorboard
"tensorflow.examples",
]
# Estimator needs to be handled separatedly so we can still allow both
# import tensorflow_estimator and import tensorflow.estimator work
# Only in the second case do we actually need to do forwarding, the first case
# already defines most of the hierarchy and eagerly forwarding would result in
# an import loop.
if "tensorflow_estimator" not in _sys.modules:
_root_estimator = False
_top_level_modules.append("tensorflow.estimator")
else:
_root_estimator = True
# Lazy load all of the _top_level_modules, we don't need their names anymore
for _m in _top_level_modules:
_forward_module(_m)
# We still need all the names that are toplevel on tensorflow_core
from tensorflow_core import *
# These should not be visible in the main tf module.
try:
del core
except NameError:
pass
try:
del python
except NameError:
pass
try:
del compiler
except NameError:
pass
try:
del tools
except NameError:
pass
try:
del examples
except NameError:
pass
# LINT.ThenChange(//tensorflow/virtual_root_template_v1.__init__.py.oss)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/virtual_root_template_v2.__init__.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
import os as _os
import site as _site
import sys as _sys
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python.tools import module_util as _module_util
from tensorflow.python.platform import tf_logging as _logging
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
if "dev" in __version__: # pylint: disable=undefined-variable
_logging.warning("""
TensorFlow's `tf-nightly` package will soon be updated to TensorFlow 2.0.
Please upgrade your code to TensorFlow 2.0:
* https://www.tensorflow.org/beta/guide/migration_guide
Or install the latest stable TensorFlow 1.X release:
* `pip install -U "tensorflow==1.*"`
Otherwise your code may be broken by the change.
""")
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = _sys.modules[__name__].bitwise # pylint: disable=undefined-variable
_current_module = _sys.modules[__name__]
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
try:
from tensorflow_estimator.python.estimator.api._v1 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
setattr(_current_module, "estimator", estimator)
except ImportError:
pass
try:
from tensorflow.python.keras.api._v1 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
from tensorflow.python.util.lazy_loader import LazyLoader # pylint: disable=g-import-not-at-top
_CONTRIB_WARNING = """
The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
* https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
* https://github.com/tensorflow/addons
* https://github.com/tensorflow/io (for I/O related ops)
If you depend on functionality not listed there, please file an issue.
"""
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib',
_CONTRIB_WARNING)
del LazyLoader
# The templated code that replaces the placeholder above sometimes
# sets the __all__ variable. If it does, we have to be sure to add
# "contrib".
if '__all__' in vars():
vars()['__all__'].append('contrib')
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
# The 'app' module will be imported as part of the placeholder section above.
_current_module.app.flags = flags # pylint: disable=undefined-variable
setattr(_current_module, "flags", flags)
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
_site_packages_dirs += [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
for s in _site_packages_dirs:
# TODO(gunan): Add sanity checks to loaded modules here.
plugin_dir = _os.path.join(s, 'tensorflow-plugins')
if _fi.file_exists(plugin_dir):
_ll.load_library(plugin_dir)
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
try:
del core
except NameError:
pass
try:
del compiler
except NameError:
pass
_current_module.compat.v2.compat.v1 = _current_module.compat.v1
# pylint: enable=undefined-variable
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/api_template_v1.__init__.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# LINT.IfChange
"""TensorFlow root package"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import sys as _sys
import importlib as _importlib
import types as _types
# Since TensorFlow Python code now resides in tensorflow_core but TensorFlow
# ecosystem code (e.g. estimator, but also even tensorflow) imports tensorflow
# we need to do forwarding between the two. To do so, we use a lazy loader to
# load and forward the top level modules. We cannot use the LazyLoader defined
# by tensorflow at tensorflow/python/util/lazy_loader.py as to use that we would
# already need to import tensorflow. Hence, we define it inline.
class _LazyLoader(_types.ModuleType):
"""Lazily import a module so that we can forward it."""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(_LazyLoader, self).__init__(name)
def _load(self):
"""Import the target module and insert it into the parent's namespace."""
module = _importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
def __reduce__(self):
return __import__, (self.__name__,)
# Forwarding a module is as simple as lazy loading the module from the new path
# and then registering it to sys.modules using the old path
def _forward_module(old_name):
parts = old_name.split(".")
parts[0] = parts[0] + "_core"
local_name = parts[-1]
existing_name = ".".join(parts)
_module = _LazyLoader(local_name, globals(), existing_name)
return _sys.modules.setdefault(old_name, _module)
# This list should contain all modules _immediately_ under tensorflow
_top_level_modules = [
"tensorflow._api",
"tensorflow.python",
"tensorflow.tools",
"tensorflow.core",
"tensorflow.compiler",
"tensorflow.keras",
"tensorflow.contrib",
"tensorflow.compat",
"tensorflow.summary", # tensorboard
"tensorflow.examples",
]
# Estimator needs to be handled separatedly so we can still allow both
# import tensorflow_estimator and import tensorflow.estimator work
# Only in the second case do we actually need to do forwarding, the first case
# already defines most of the hierarchy and eagerly forwarding would result in
# an import loop.
if "tensorflow_estimator" not in _sys.modules:
_root_estimator = False
_top_level_modules.append("tensorflow.estimator")
else:
_root_estimator = True
# Lazy load all of the _top_level_modules, we don't need their names anymore
for _m in _top_level_modules:
_forward_module(_m)
# We still need all the names that are toplevel on tensorflow_core
from tensorflow_core import *
# In V1 API we need to print deprecation messages
from tensorflow.python.util import deprecation_wrapper as _deprecation
if not isinstance(_sys.modules[__name__], _deprecation.DeprecationWrapper):
_sys.modules[__name__] = _deprecation.DeprecationWrapper(
_sys.modules[__name__], "")
# These should not be visible in the main tf module.
try:
del core
except NameError:
pass
try:
del python
except NameError:
pass
try:
del compiler
except NameError:
pass
try:
del tools
except NameError:
pass
try:
del examples
except NameError:
pass
# LINT.ThenChange(//tensorflow/virtual_root_template_v2.__init__.py.oss)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/virtual_root_template_v1.__init__.py
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/__init__.py
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.x TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import ipynb
from tensorflow.tools.compatibility import tf_upgrade_v2
from tensorflow.tools.compatibility import tf_upgrade_v2_safety
# Make straightforward changes to convert to 2.0. In harder cases,
# use compat.v1.
_DEFAULT_MODE = "DEFAULT"
# Convert to use compat.v1.
_SAFETY_MODE = "SAFETY"
def process_file(in_filename, out_filename, upgrader):
"""Process a file of type `.py` or `.ipynb`."""
if in_filename.endswith(".py"):
files_processed, report_text, errors = \
upgrader.process_file(in_filename, out_filename)
elif in_filename.endswith(".ipynb"):
files_processed, report_text, errors = \
ipynb.process_file(in_filename, out_filename, upgrader)
else:
raise NotImplementedError(
"Currently converter only supports python or ipynb")
return files_processed, report_text, errors
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file from 1.x to 2.0
Simple usage:
tf_upgrade_v2.py --infile foo.py --outfile bar.py
tf_upgrade_v2.py --infile foo.ipynb --outfile bar.ipynb
tf_upgrade_v2.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=True)
parser.add_argument(
"--inplace",
dest="in_place",
help=("If converting a set of files, whether to "
"allow the conversion to be performed on the "
"input files."),
action="store_true")
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
parser.add_argument(
"--mode",
dest="mode",
choices=[_DEFAULT_MODE, _SAFETY_MODE],
help=("Upgrade script mode. Supported modes:\n"
"%s: Perform only straightforward conversions to upgrade to "
"2.0. In more difficult cases, switch to use compat.v1.\n"
"%s: Keep 1.* code intact and import compat.v1 "
"module. Also disable 2.0 behavior to ensure code "
"that requires 1.X behavior continues to work." %
(_DEFAULT_MODE, _SAFETY_MODE)),
default=_DEFAULT_MODE)
parser.add_argument(
"--print_all",
dest="print_all",
help="Print full log to stdout instead of just printing errors",
action="store_true")
args = parser.parse_args()
if args.mode == _SAFETY_MODE:
change_spec = tf_upgrade_v2_safety.TFAPIChangeSpec()
else:
change_spec = tf_upgrade_v2.TFAPIChangeSpec()
upgrade = ast_edits.ASTCodeUpgrader(change_spec)
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
if not args.in_place and not args.output_file:
raise ValueError(
"--outfile=<output file> argument is required when converting a "
"single file.")
if args.in_place and args.output_file:
raise ValueError(
"--outfile argument is invalid when when converting in place")
output_file = args.input_file if args.in_place else args.output_file
files_processed, report_text, errors = process_file(
args.input_file, output_file, upgrade)
errors = {args.input_file: errors}
files_processed = 1
elif args.input_tree:
if not args.in_place and not args.output_tree:
raise ValueError(
"--outtree=<output directory> argument is required when converting a "
"file tree.")
if args.in_place and args.output_tree:
raise ValueError(
"--outtree argument is invalid when when converting in place")
output_tree = args.input_tree if args.in_place else args.output_tree
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
num_errors = 0
report = []
for f in errors:
if errors[f]:
num_errors += len(errors[f])
report.append("-" * 80 + "\n")
report.append("File: %s\n" % f)
report.append("-" * 80 + "\n")
report.append("\n".join(errors[f]) + "\n")
report = ("TensorFlow 2.0 Upgrade Script\n"
"-----------------------------\n"
"Converted %d files\n" % files_processed +
"Detected %d issues that require attention" % num_errors + "\n" +
"-" * 80 + "\n") + "".join(report)
detailed_report_header = "=" * 80 + "\n"
detailed_report_header += "Detailed log follows:\n\n"
detailed_report_header += "=" * 80 + "\n"
with open(report_filename, "w") as report_file:
report_file.write(report)
report_file.write(detailed_report_header)
report_file.write(report_text)
if args.print_all:
print(report)
print(detailed_report_header)
print(report_text)
else:
print(report)
print("\nMake sure to read the detailed log %r\n" % report_filename)
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/tf_upgrade_v2_main.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module deprecation warnings for TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.compatibility import ast_edits
_CONTRIB_WARNING = (
ast_edits.ERROR,
"<function name> cannot be converted automatically. tf.contrib will not"
" be distributed with TensorFlow 2.0, please consider an alternative in"
" non-contrib TensorFlow, a community-maintained repository such as "
"tensorflow/addons, or fork the required code.")
_FLAGS_WARNING = (
ast_edits.ERROR,
"tf.flags has been removed, please use the argparse or absl"
" modules if you need command line parsing."
)
_CONTRIB_CUDNN_RNN_WARNING = (
ast_edits.WARNING,
"(Manual edit required) tf.contrib.cudnn_rnn.* has been deprecated, "
"and the CuDNN kernel has been integrated with "
"tf.keras.layers.LSTM/GRU in TensorFlow 2.0. Please check the new API "
"and use that instead."
)
_CONTRIB_RNN_WARNING = (
ast_edits.WARNING,
"(Manual edit required) tf.contrib.rnn.* has been deprecated, and "
"widely used cells/functions will be moved to tensorflow/addons "
"repository. Please check it there and file Github issues if necessary."
)
_CONTRIB_DIST_STRAT_WARNING = (
ast_edits.WARNING,
"(Manual edit required) tf.contrib.distribute.* have been migrated to "
"tf.distribute.*. Please check out the new module for updated APIs.")
_CONTRIB_SEQ2SEQ_WARNING = (
ast_edits.WARNING,
"(Manual edit required) tf.contrib.seq2seq.* have been migrated to "
"`tfa.seq2seq.*` in TensorFlow Addons. Please see "
"https://github.com/tensorflow/addons for more info.")
MODULE_DEPRECATIONS = {
"tf.contrib": _CONTRIB_WARNING,
"tf.contrib.cudnn_rnn": _CONTRIB_CUDNN_RNN_WARNING,
"tf.contrib.rnn": _CONTRIB_RNN_WARNING,
"tf.flags": _FLAGS_WARNING,
"tf.contrib.distribute": _CONTRIB_DIST_STRAT_WARNING,
"tf.contrib.seq2seq": _CONTRIB_SEQ2SEQ_WARNING
}
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/module_deprecations_v2.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides a list of renames between TensorFlow 1.* and 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.compatibility import renames_v2
# pylint: disable=line-too-long
# Add additional renames not in renames_v2.py here.
# IMPORTANT: For the renames in here, if you also need to add to
# function_reorders or function_keyword_renames in tf_upgrade_v2.py,
# use the OLD function name.
# These renames happen after the arguments have been processed.
manual_symbol_renames = {
"tf.batch_to_space_nd":
"tf.batch_to_space",
"tf.batch_gather":
"tf.compat.v1.batch_gather",
"tf.space_to_batch_nd":
"tf.space_to_batch",
"tf.nn.space_to_batch":
"tf.space_to_batch",
"tf.estimator.inputs":
"tf.compat.v1.estimator.inputs",
"tf.extract_image_patches":
"tf.image.extract_patches",
"tf.image.extract_image_patches":
"tf.image.extract_patches",
"tf.gfile.Copy":
"tf.io.gfile.copy",
"tf.gfile.DeleteRecursively":
"tf.io.gfile.rmtree",
"tf.gfile.Exists":
"tf.io.gfile.exists",
"tf.gfile.Glob":
"tf.io.gfile.glob",
"tf.gfile.GFile":
"tf.io.gfile.GFile",
"tf.gfile.IsDirectory":
"tf.io.gfile.isdir",
"tf.gfile.ListDirectory":
"tf.io.gfile.listdir",
"tf.gfile.MakeDirs":
"tf.io.gfile.makedirs",
"tf.gfile.MkDir":
"tf.io.gfile.mkdir",
"tf.gfile.Open":
"tf.io.gfile.GFile",
"tf.gfile.Remove":
"tf.io.gfile.remove",
"tf.gfile.Rename":
"tf.io.gfile.rename",
"tf.gfile.Stat":
"tf.io.gfile.stat",
"tf.gfile.Walk":
"tf.io.gfile.walk",
"tf.contrib.cluster_resolver.ClusterResolver":
"tf.distribute.cluster_resolver.ClusterResolver",
"tf.contrib.cluster_resolver.GceClusterResolver":
"tf.distribute.cluster_resolver.GCEClusterResolver",
"tf.contrib.cluster_resolver.KubernetesClusterResolver":
"tf.distribute.cluster_resolver.KubernetesClusterResolver",
"tf.contrib.cluster_resolver.SimpleClusterResolver":
"tf.distribute.cluster_resolver.SimpleClusterResolver",
"tf.contrib.cluster_resolver.SlurmClusterResolver":
"tf.distribute.cluster_resolver.SlurmClusterResolver",
"tf.contrib.cluster_resolver.TFConfigClusterResolver":
"tf.distribute.cluster_resolver.TFConfigClusterResolver",
"tf.contrib.cluster_resolver.TPUClusterResolver":
"tf.distribute.cluster_resolver.TPUClusterResolver",
"tf.contrib.cluster_resolver.UnionClusterResolver":
"tf.distribute.cluster_resolver.UnionClusterResolver",
"tf.contrib.data.AUTOTUNE":
"tf.data.experimental.AUTOTUNE",
"tf.contrib.data.Counter":
"tf.data.experimental.Counter",
"tf.contrib.data.CheckpointInputPipelineHook":
"tf.data.experimental.CheckpointInputPipelineHook",
"tf.contrib.data.CsvDataset":
"tf.data.experimental.CsvDataset",
"tf.contrib.data.Optional":
"tf.data.experimental.Optional",
"tf.contrib.data.RandomDataset":
"tf.data.experimental.RandomDataset",
"tf.contrib.data.Reducer":
"tf.data.experimental.Reducer",
"tf.contrib.data.SqlDataset":
"tf.data.experimental.SqlDataset",
"tf.contrib.data.StatsAggregator":
"tf.data.experimental.StatsAggregator",
"tf.contrib.data.TFRecordWriter":
"tf.data.experimental.TFRecordWriter",
"tf.contrib.data.assert_element_shape":
"tf.data.experimental.assert_element_shape",
"tf.contrib.data.bucket_by_sequence_length":
"tf.data.experimental.bucket_by_sequence_length",
"tf.contrib.data.choose_from_datasets":
"tf.data.experimental.choose_from_datasets",
"tf.contrib.data.copy_to_device":
"tf.data.experimental.copy_to_device",
"tf.contrib.data.dense_to_sparse_batch":
"tf.data.experimental.dense_to_sparse_batch",
"tf.contrib.data.enumerate_dataset":
"tf.data.experimental.enumerate_dataset",
"tf.contrib.data.get_next_as_optional":
"tf.data.experimental.get_next_as_optional",
"tf.contrib.data.get_single_element":
"tf.data.experimental.get_single_element",
"tf.contrib.data.group_by_reducer":
"tf.data.experimental.group_by_reducer",
"tf.contrib.data.group_by_window":
"tf.data.experimental.group_by_window",
"tf.contrib.data.ignore_errors":
"tf.data.experimental.ignore_errors",
"tf.contrib.data.latency_stats":
"tf.data.experimental.latency_stats",
"tf.contrib.data.make_batched_features_dataset":
"tf.data.experimental.make_batched_features_dataset",
"tf.contrib.data.make_csv_dataset":
"tf.data.experimental.make_csv_dataset",
"tf.contrib.data.make_saveable_from_iterator":
"tf.data.experimental.make_saveable_from_iterator",
"tf.contrib.data.map_and_batch":
"tf.data.experimental.map_and_batch",
"tf.contrib.data.parallel_interleave":
"tf.data.experimental.parallel_interleave",
"tf.contrib.data.parse_example_dataset":
"tf.data.experimental.parse_example_dataset",
"tf.contrib.data.prefetch_to_device":
"tf.data.experimental.prefetch_to_device",
"tf.contrib.data.rejection_resample":
"tf.data.experimental.rejection_resample",
"tf.contrib.data.sample_from_datasets":
"tf.data.experimental.sample_from_datasets",
"tf.contrib.data.scan":
"tf.data.experimental.scan",
"tf.contrib.data.set_stats_aggregator":
"tf.data.experimental.set_stats_aggregator",
"tf.contrib.data.shuffle_and_repeat":
"tf.data.experimental.shuffle_and_repeat",
"tf.contrib.data.unbatch":
"tf.data.experimental.unbatch",
"tf.contrib.data.unique":
"tf.data.experimental.unique",
"tf.contrib.distribute.CrossDeviceOps":
"tf.distribute.CrossDeviceOps",
"tf.contrib.distribute.ReductionToOneDeviceCrossDeviceOps":
"tf.distribute.ReductionToOneDevice",
"tf.contrib.estimator.make_early_stopping_hook":
"tf.estimator.experimental.make_early_stopping_hook",
"tf.contrib.estimator.stop_if_higher_hook":
"tf.estimator.experimental.stop_if_higher_hook",
"tf.contrib.estimator.stop_if_lower_hook":
"tf.estimator.experimental.stop_if_lower_hook",
"tf.contrib.estimator.stop_if_no_decrease_hook":
"tf.estimator.experimental.stop_if_no_decrease_hook",
"tf.contrib.estimator.stop_if_no_increase_hook":
"tf.estimator.experimental.stop_if_no_increase_hook",
"tf.contrib.framework.CriticalSection":
"tf.CriticalSection",
"tf.contrib.framework.is_tensor":
"tf.is_tensor",
"tf.contrib.framework.load_variable":
"tf.train.load_variable",
"tf.contrib.framework.nest.assert_same_structure":
"tf.nest.assert_same_structure",
"tf.contrib.framework.nest.flatten":
"tf.nest.flatten",
"tf.contrib.framework.nest.is_sequence":
"tf.nest.is_nested",
"tf.contrib.framework.nest.map_structure":
"tf.nest.map_structure",
"tf.contrib.framework.nest.pack_sequence_as":
"tf.nest.pack_sequence_as",
"tf.contrib.batching.batch_function":
"tf.nondifferentiable_batch_function",
"tf.contrib.util.constant_value":
"tf.get_static_value",
"tf.contrib.saved_model.load_keras_model":
"tf.keras.experimental.load_from_saved_model",
"tf.contrib.saved_model.save_keras_model":
"tf.keras.experimental.export_saved_model",
"tf.contrib.rnn.RNNCell":
"tf.compat.v1.nn.rnn_cell.RNNCell",
"tf.contrib.rnn.LSTMStateTuple":
"tf.nn.rnn_cell.LSTMStateTuple",
"tf.contrib.rnn.BasicLSTMCell":
"tf.compat.v1.nn.rnn_cell.BasicLSTMCell",
"tf.contrib.rnn.BasicRNNCell":
"tf.compat.v1.nn.rnn_cell.BasicRNNCell",
"tf.contrib.rnn.GRUCell":
"tf.compat.v1.nn.rnn_cell.GRUCell",
"tf.contrib.rnn.LSTMCell":
"tf.compat.v1.nn.rnn_cell.LSTMCell",
"tf.contrib.rnn.MultiRNNCell":
"tf.compat.v1.nn.rnn_cell.MultiRNNCell",
"tf.contrib.rnn.static_rnn":
"tf.compat.v1.nn.static_rnn",
"tf.contrib.rnn.static_state_saving_rnn":
"tf.compat.v1.nn.static_state_saving_rnn",
"tf.contrib.rnn.static_bidirectional_rnn":
"tf.compat.v1.nn.static_bidirectional_rnn",
"tf.contrib.framework.sort":
"tf.sort",
"tf.contrib.framework.argsort":
"tf.argsort",
"tf.contrib.summary.all_summary_ops":
"tf.compat.v1.summary.all_v2_summary_ops",
"tf.contrib.summary.always_record_summaries":
"tf.compat.v2.summary.record_if",
"tf.contrib.summary.audio":
"tf.compat.v2.summary.audio",
"tf.contrib.summary.create_file_writer":
"tf.compat.v2.summary.create_file_writer",
"tf.contrib.summary.flush":
"tf.compat.v2.summary.flush",
"tf.contrib.summary.generic":
"tf.compat.v2.summary.write",
"tf.contrib.summary.histogram":
"tf.compat.v2.summary.histogram",
"tf.contrib.summary.image":
"tf.compat.v2.summary.image",
"tf.contrib.summary.initialize":
"tf.compat.v1.summary.initialize",
"tf.contrib.summary.never_record_summaries":
"tf.compat.v2.summary.record_if",
"tf.contrib.summary.scalar":
"tf.compat.v2.summary.scalar",
"tf.contrib.tpu.CrossShardOptimizer":
"tf.compat.v1.tpu.CrossShardOptimizer",
"tf.contrib.tpu.InputPipelineConfig":
"tf.compat.v1.estimator.tpu.InputPipelineConfig",
"tf.contrib.tpu.RunConfig":
"tf.compat.v1.estimator.tpu.RunConfig",
"tf.contrib.tpu.TPUConfig":
"tf.compat.v1.estimator.tpu.TPUConfig",
"tf.contrib.tpu.TPUEstimator":
"tf.compat.v1.estimator.tpu.TPUEstimator",
"tf.contrib.tpu.TPUEstimatorSpec":
"tf.compat.v1.estimator.tpu.TPUEstimatorSpec",
"tf.contrib.tpu.batch_parallel":
"tf.compat.v1.tpu.batch_parallel",
"tf.contrib.tpu.bfloat16_scope":
"tf.compat.v1.tpu.bfloat16_scope",
"tf.contrib.tpu.core":
"tf.compat.v1.tpu.core",
"tf.contrib.tpu.cross_replica_sum":
"tf.compat.v1.tpu.cross_replica_sum",
"tf.contrib.tpu.initialize_system":
"tf.compat.v1.tpu.initialize_system",
"tf.contrib.tpu.outside_compilation":
"tf.compat.v1.tpu.outside_compilation",
"tf.contrib.tpu.replicate":
"tf.compat.v1.tpu.replicate",
"tf.contrib.tpu.rewrite":
"tf.compat.v1.tpu.rewrite",
"tf.contrib.tpu.shard":
"tf.compat.v1.tpu.shard",
"tf.contrib.tpu.shutdown_system":
"tf.compat.v1.tpu.shutdown_system",
"tf.contrib.training.checkpoints_iterator":
"tf.train.checkpoints_iterator",
"tf.contrib.layers.recompute_grad":
"tf.recompute_grad",
"tf.count_nonzero":
"tf.math.count_nonzero",
"tf.manip.batch_to_space_nd":
"tf.batch_to_space",
"tf.quantize_v2":
"tf.quantization.quantize",
"tf.sparse_add":
"tf.sparse.add",
"tf.sparse_concat":
"tf.sparse.concat",
"tf.sparse_split":
"tf.sparse.split",
"tf.sparse_matmul":
"tf.linalg.matmul",
"tf.sparse_reduce_sum":
"tf.sparse.reduce_sum",
"tf.sparse_reduce_max":
"tf.sparse.reduce_max",
"tf.random.stateless_multinomial":
"tf.random.stateless_categorical",
"tf.substr":
"tf.strings.substr",
# TODO(b/129398290)
"tf.string_split":
"tf.compat.v1.string_split",
"tf.string_to_hash_bucket":
"tf.strings.to_hash_bucket",
"tf.string_to_number":
"tf.strings.to_number",
"tf.multinomial":
"tf.random.categorical",
"tf.random.multinomial":
"tf.random.categorical",
"tf.reduce_join":
"tf.strings.reduce_join",
"tf.load_file_system_library":
"tf.load_library",
"tf.bincount":
"tf.math.bincount",
"tf.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.sdca_fprint":
"tf.raw_ops.SdcaFprint",
"tf.train.sdca_optimizer":
"tf.raw_ops.SdcaOptimizer",
"tf.train.sdca_shrink_l1":
"tf.raw_ops.SdcaShrinkL1",
"tf.decode_csv":
"tf.io.decode_csv",
"tf.data.Iterator":
"tf.compat.v1.data.Iterator",
"tf.data.experimental.DatasetStructure":
"tf.data.DatasetSpec",
"tf.data.experimental.OptionalStructure":
"tf.OptionalSpec",
"tf.data.experimental.RaggedTensorStructure":
"tf.RaggedTensorSpec",
"tf.data.experimental.SparseTensorStructure":
"tf.SparseTensorSpec",
"tf.data.experimental.Structure":
"tf.TypeSpec",
"tf.data.experimental.TensorArrayStructure":
"tf.TensorArraySpec",
"tf.data.experimental.TensorStructure":
"tf.TensorSpec",
"tf.parse_example":
"tf.io.parse_example",
"tf.parse_single_example":
"tf.io.parse_single_example",
"tf.nn.fused_batch_norm":
"tf.compat.v1.nn.fused_batch_norm",
"tf.nn.softmax_cross_entropy_with_logits_v2":
"tf.nn.softmax_cross_entropy_with_logits",
"tf.losses.Reduction.MEAN":
"tf.compat.v1.losses.Reduction.MEAN",
"tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS",
"tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS",
"tf.lite.constants.FLOAT":
"tf.float32",
"tf.lite.constants.INT32":
"tf.int32",
"tf.lite.constants.INT64":
"tf.int64",
"tf.lite.constants.STRING":
"tf.string",
"tf.lite.constants.QUANTIZED_UINT8":
"tf.uint8",
"tf.arg_max":
"tf.argmax",
"tf.arg_min":
"tf.argmin",
# tf.nn.ctc_loss is still available in 2.0 but behavior
# changed significantly.
"tf.nn.ctc_loss":
"tf.compat.v1.nn.ctc_loss",
# tf.saved_model.load in 1.x has no equivalent in 2.x, but there is a
# symbol with the same name.
"tf.saved_model.load":
"tf.compat.v1.saved_model.load",
"tf.saved_model.load_v2":
"tf.compat.v2.saved_model.load",
"tf.image.resize_images":
"tf.image.resize",
"tf.random_poisson":
"tf.random.poisson",
"tf.debugging.assert_greater":
"tf.compat.v1.debugging.assert_greater",
"tf.debugging.assert_greater_equal":
"tf.compat.v1.debugging.assert_greater_equal",
"tf.debugging.assert_integer":
"tf.compat.v1.debugging.assert_integer",
"tf.debugging.assert_less":
"tf.compat.v1.debugging.assert_less",
"tf.debugging.assert_less_equal":
"tf.compat.v1.debugging.assert_less_equal",
"tf.debugging.assert_near":
"tf.compat.v1.debugging.assert_near",
"tf.debugging.assert_negative":
"tf.compat.v1.debugging.assert_negative",
"tf.debugging.assert_non_negative":
"tf.compat.v1.debugging.assert_non_negative",
"tf.debugging.assert_non_positive":
"tf.compat.v1.debugging.assert_non_positive",
"tf.debugging.assert_none_equal":
"tf.compat.v1.debugging.assert_none_equal",
"tf.debugging.assert_type":
"tf.compat.v1.debugging.assert_type",
"tf.debugging.assert_positive":
"tf.compat.v1.debugging.assert_positive",
"tf.debugging.assert_equal":
"tf.compat.v1.debugging.assert_equal",
"tf.debugging.assert_scalar":
"tf.compat.v1.debugging.assert_scalar",
"tf.assert_equal":
"tf.compat.v1.assert_equal",
"tf.assert_less":
"tf.compat.v1.assert_less",
"tf.assert_greater":
"tf.compat.v1.assert_greater",
"tf.debugging.assert_rank":
"tf.compat.v1.debugging.assert_rank",
"tf.debugging.assert_rank_at_least":
"tf.compat.v1.debugging.assert_rank_at_least",
"tf.debugging.assert_rank_in":
"tf.compat.v1.debugging.assert_rank_in",
"tf.errors.exception_type_from_error_code":
"tf.compat.v1.errors.exception_type_from_error_code",
"tf.errors.error_code_from_exception_type":
"tf.compat.v1.errors.error_code_from_exception_type",
"tf.errors.raise_exception_on_not_ok_status":
"tf.compat.v1.errors.raise_exception_on_not_ok_status",
"tf.assert_rank":
"tf.compat.v1.assert_rank",
"tf.nn.max_pool":
"tf.nn.max_pool2d",
"tf.nn.avg_pool":
"tf.nn.avg_pool2d",
"tf.keras.initializers.zeros":
"tf.compat.v1.keras.initializers.zeros",
"tf.keras.initializers.Zeros":
"tf.compat.v1.keras.initializers.Zeros",
"tf.keras.initializers.ones":
"tf.compat.v1.keras.initializers.ones",
"tf.keras.initializers.Ones":
"tf.compat.v1.keras.initializers.Ones",
"tf.keras.initializers.constant":
"tf.compat.v1.keras.initializers.constant",
"tf.keras.initializers.Constant":
"tf.compat.v1.keras.initializers.Constant",
"tf.keras.initializers.VarianceScaling":
"tf.compat.v1.keras.initializers.VarianceScaling",
"tf.keras.initializers.Orthogonal":
"tf.compat.v1.keras.initializers.Orthogonal",
"tf.keras.initializers.orthogonal":
"tf.compat.v1.keras.initializers.orthogonal",
"tf.keras.initializers.Identity":
"tf.compat.v1.keras.initializers.Identity",
"tf.keras.initializers.identity":
"tf.compat.v1.keras.initializers.identity",
"tf.keras.initializers.glorot_uniform":
"tf.compat.v1.keras.initializers.glorot_uniform",
"tf.keras.initializers.glorot_normal":
"tf.compat.v1.keras.initializers.glorot_normal",
"tf.keras.initializers.lecun_normal":
"tf.compat.v1.keras.initializers.lecun_normal",
"tf.keras.initializers.lecun_uniform":
"tf.compat.v1.keras.initializers.lecun_uniform",
"tf.keras.initializers.he_normal":
"tf.compat.v1.keras.initializers.he_normal",
"tf.keras.initializers.he_uniform":
"tf.compat.v1.keras.initializers.he_uniform",
"tf.keras.initializers.TruncatedNormal":
"tf.compat.v1.keras.initializers.TruncatedNormal",
"tf.keras.initializers.truncated_normal":
"tf.compat.v1.keras.initializers.truncated_normal",
"tf.keras.initializers.RandomUniform":
"tf.compat.v1.keras.initializers.RandomUniform",
"tf.keras.initializers.uniform":
"tf.compat.v1.keras.initializers.uniform",
"tf.keras.initializers.random_uniform":
"tf.compat.v1.keras.initializers.random_uniform",
"tf.keras.initializers.RandomNormal":
"tf.compat.v1.keras.initializers.RandomNormal",
"tf.keras.initializers.normal":
"tf.compat.v1.keras.initializers.normal",
"tf.keras.initializers.random_normal":
"tf.compat.v1.keras.initializers.random_normal",
"tf.zeros_initializer":
"tf.compat.v1.zeros_initializer",
"tf.initializers.zeros":
"tf.compat.v1.initializers.zeros",
"tf.ones_initializer":
"tf.compat.v1.ones_initializer",
"tf.initializers.ones":
"tf.compat.v1.initializers.ones",
"tf.constant_initializer":
"tf.compat.v1.constant_initializer",
"tf.initializers.constant":
"tf.compat.v1.initializers.constant",
"tf.random_uniform_initializer":
"tf.compat.v1.random_uniform_initializer",
"tf.initializers.random_uniform":
"tf.compat.v1.initializers.random_uniform",
"tf.random_normal_initializer":
"tf.compat.v1.random_normal_initializer",
"tf.initializers.random_normal":
"tf.compat.v1.initializers.random_normal",
"tf.truncated_normal_initializer":
"tf.compat.v1.truncated_normal_initializer",
"tf.initializers.truncated_normal":
"tf.compat.v1.initializers.truncated_normal",
"tf.variance_scaling_initializer":
"tf.compat.v1.variance_scaling_initializer",
"tf.initializers.variance_scaling":
"tf.compat.v1.initializers.variance_scaling",
"tf.orthogonal_initializer":
"tf.compat.v1.orthogonal_initializer",
"tf.initializers.orthogonal":
"tf.compat.v1.initializers.orthogonal",
"tf.glorot_uniform_initializer":
"tf.compat.v1.glorot_uniform_initializer",
"tf.initializers.glorot_uniform":
"tf.compat.v1.initializers.glorot_uniform",
"tf.glorot_normal_initializer":
"tf.compat.v1.glorot_normal_initializer",
"tf.initializers.glorot_normal":
"tf.compat.v1.initializers.glorot_normal",
"tf.initializers.identity":
"tf.compat.v1.initializers.identity",
"tf.initializers.lecun_normal":
"tf.compat.v1.initializers.lecun_normal",
"tf.initializers.lecun_uniform":
"tf.compat.v1.initializers.lecun_uniform",
"tf.initializers.he_normal":
"tf.compat.v1.initializers.he_normal",
"tf.initializers.he_uniform":
"tf.compat.v1.initializers.he_uniform",
"tf.data.experimental.map_and_batch_with_legacy_function":
"tf.compat.v1.data.experimental.map_and_batch_with_legacy_function",
"tf.nn.conv2d_backprop_input":
"tf.nn.conv2d_transpose",
"tf.test.compute_gradient":
"tf.compat.v1.test.compute_gradient",
"tf.floor_div":
"tf.math.floordiv",
"tf.where":
"tf.compat.v1.where",
"tf.where_v2":
"tf.compat.v2.where",
}
# pylint: enable=line-too-long
symbol_renames = renames_v2.renames
symbol_renames.update(manual_symbol_renames)
addons_symbol_mappings = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
"tf.contrib.sparsemax.sparsemax":
"tfa.activations.sparsemax",
"tf.contrib.losses.metric_learning.contrastive_loss":
"tfa.losses.ContrastiveLoss",
"tf.contrib.losses.metric_learning.lifted_struct_loss":
"tfa.losses.LiftedStructLoss",
"tf.contrib.sparsemax.sparsemax_loss":
"tfa.losses.SparsemaxLoss",
"tf.contrib.losses.metric_learning.triplet_semihard_loss":
"tfa.losses.TripletSemiHardLoss",
"tf.contrib.opt.LazyAdamOptimizer":
"tfa.optimizers.LazyAdam",
"tf.contrib.opt.MovingAverageOptimizer":
"tfa.optimizers.MovingAverage",
"tf.contrib.opt.MomentumWOptimizer":
"tfa.optimizers.SGDW",
"tf.contrib.opt.AdamWOptimizer":
"tfa.optimizers.AdamW",
"tf.contrib.opt.extend_with_decoupled_weight_decay":
"tfa.optimizers.extend_with_decoupled_weight_decay",
"tf.contrib.text.skip_gram_sample":
"tfa.text.skip_gram_sample",
"tf.contrib.text.skip_gram_sample_with_text_vocab":
"tfa.text.skip_gram_sample_with_text_vocab",
"tf.contrib.image.dense_image_warp":
"tfa.image.dense_image_warp",
"tf.contrib.image.adjust_hsv_in_yiq":
"tfa.image.adjust_hsv_in_yiq",
"tf.contrib.image.compose_transforms":
"tfa.image.compose_transforms",
"tf.contrib.image.random_hsv_in_yiq":
"tfa.image.random_hsv_in_yiq",
"tf.contrib.image.angles_to_projective_transforms":
"tfa.image.angles_to_projective_transforms",
"tf.contrib.image.matrices_to_flat_transforms":
"tfa.image.matricies_to_flat_transforms",
"tf.contrib.image.rotate":
"tfa.image.rotate",
"tf.contrib.image.transform":
"tfa.image.transform",
"tf.contrib.rnn.NASCell":
"tfa.rnn.NASCell",
"tf.contrib.rnn.LayerNormBasicLSTMCell":
"tfa.rnn.LayerNormLSTMCell"
}
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/all_renames_v2.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
pyformat --in_place third_party/tensorflow/tools/compatibility/renames_v2.py
This file should be updated whenever endpoints are deprecated.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
renames = {
'tf.AUTO_REUSE':
'tf.compat.v1.AUTO_REUSE',
'tf.AttrValue':
'tf.compat.v1.AttrValue',
'tf.COMPILER_VERSION':
'tf.version.COMPILER_VERSION',
'tf.CXX11_ABI_FLAG':
'tf.sysconfig.CXX11_ABI_FLAG',
'tf.ConditionalAccumulator':
'tf.compat.v1.ConditionalAccumulator',
'tf.ConditionalAccumulatorBase':
'tf.compat.v1.ConditionalAccumulatorBase',
'tf.ConfigProto':
'tf.compat.v1.ConfigProto',
'tf.Dimension':
'tf.compat.v1.Dimension',
'tf.Event':
'tf.compat.v1.Event',
'tf.FIFOQueue':
'tf.queue.FIFOQueue',
'tf.FixedLenFeature':
'tf.io.FixedLenFeature',
'tf.FixedLenSequenceFeature':
'tf.io.FixedLenSequenceFeature',
'tf.FixedLengthRecordReader':
'tf.compat.v1.FixedLengthRecordReader',
'tf.GIT_VERSION':
'tf.version.GIT_VERSION',
'tf.GPUOptions':
'tf.compat.v1.GPUOptions',
'tf.GRAPH_DEF_VERSION':
'tf.version.GRAPH_DEF_VERSION',
'tf.GRAPH_DEF_VERSION_MIN_CONSUMER':
'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER',
'tf.GRAPH_DEF_VERSION_MIN_PRODUCER':
'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER',
'tf.GraphDef':
'tf.compat.v1.GraphDef',
'tf.GraphKeys':
'tf.compat.v1.GraphKeys',
'tf.GraphOptions':
'tf.compat.v1.GraphOptions',
'tf.HistogramProto':
'tf.compat.v1.HistogramProto',
'tf.IdentityReader':
'tf.compat.v1.IdentityReader',
'tf.InteractiveSession':
'tf.compat.v1.InteractiveSession',
'tf.LMDBReader':
'tf.compat.v1.LMDBReader',
'tf.LogMessage':
'tf.compat.v1.LogMessage',
'tf.MONOLITHIC_BUILD':
'tf.sysconfig.MONOLITHIC_BUILD',
'tf.MetaGraphDef':
'tf.compat.v1.MetaGraphDef',
'tf.NameAttrList':
'tf.compat.v1.NameAttrList',
'tf.NoGradient':
'tf.no_gradient',
'tf.NodeDef':
'tf.compat.v1.NodeDef',
'tf.NotDifferentiable':
'tf.no_gradient',
'tf.OpError':
'tf.errors.OpError',
'tf.OptimizerOptions':
'tf.compat.v1.OptimizerOptions',
'tf.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.Print':
'tf.compat.v1.Print',
'tf.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.QUANTIZED_DTYPES':
'tf.dtypes.QUANTIZED_DTYPES',
'tf.QueueBase':
'tf.queue.QueueBase',
'tf.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.ReaderBase':
'tf.compat.v1.ReaderBase',
'tf.RunMetadata':
'tf.compat.v1.RunMetadata',
'tf.RunOptions':
'tf.compat.v1.RunOptions',
'tf.Session':
'tf.compat.v1.Session',
'tf.SessionLog':
'tf.compat.v1.SessionLog',
'tf.SparseConditionalAccumulator':
'tf.compat.v1.SparseConditionalAccumulator',
'tf.SparseFeature':
'tf.io.SparseFeature',
'tf.SparseTensorValue':
'tf.compat.v1.SparseTensorValue',
'tf.Summary':
'tf.compat.v1.Summary',
'tf.SummaryMetadata':
'tf.compat.v1.SummaryMetadata',
'tf.TFRecordReader':
'tf.compat.v1.TFRecordReader',
'tf.TensorInfo':
'tf.compat.v1.TensorInfo',
'tf.TextLineReader':
'tf.compat.v1.TextLineReader',
'tf.VERSION':
'tf.version.VERSION',
'tf.VarLenFeature':
'tf.io.VarLenFeature',
'tf.VariableScope':
'tf.compat.v1.VariableScope',
'tf.WholeFileReader':
'tf.compat.v1.WholeFileReader',
'tf.accumulate_n':
'tf.math.accumulate_n',
'tf.add_check_numerics_ops':
'tf.compat.v1.add_check_numerics_ops',
'tf.add_to_collection':
'tf.compat.v1.add_to_collection',
'tf.add_to_collections':
'tf.compat.v1.add_to_collections',
'tf.all_variables':
'tf.compat.v1.all_variables',
'tf.angle':
'tf.math.angle',
'tf.app.run':
'tf.compat.v1.app.run',
'tf.assert_greater_equal':
'tf.compat.v1.assert_greater_equal',
'tf.assert_integer':
'tf.compat.v1.assert_integer',
'tf.assert_less_equal':
'tf.compat.v1.assert_less_equal',
'tf.assert_near':
'tf.compat.v1.assert_near',
'tf.assert_negative':
'tf.compat.v1.assert_negative',
'tf.assert_non_negative':
'tf.compat.v1.assert_non_negative',
'tf.assert_non_positive':
'tf.compat.v1.assert_non_positive',
'tf.assert_none_equal':
'tf.compat.v1.assert_none_equal',
'tf.assert_positive':
'tf.compat.v1.assert_positive',
'tf.assert_proper_iterable':
'tf.debugging.assert_proper_iterable',
'tf.assert_rank_at_least':
'tf.compat.v1.assert_rank_at_least',
'tf.assert_rank_in':
'tf.compat.v1.assert_rank_in',
'tf.assert_same_float_dtype':
'tf.debugging.assert_same_float_dtype',
'tf.assert_scalar':
'tf.compat.v1.assert_scalar',
'tf.assert_type':
'tf.compat.v1.assert_type',
'tf.assert_variables_initialized':
'tf.compat.v1.assert_variables_initialized',
'tf.assign':
'tf.compat.v1.assign',
'tf.assign_add':
'tf.compat.v1.assign_add',
'tf.assign_sub':
'tf.compat.v1.assign_sub',
'tf.batch_scatter_update':
'tf.compat.v1.batch_scatter_update',
'tf.betainc':
'tf.math.betainc',
'tf.ceil':
'tf.math.ceil',
'tf.check_numerics':
'tf.debugging.check_numerics',
'tf.cholesky':
'tf.linalg.cholesky',
'tf.cholesky_solve':
'tf.linalg.cholesky_solve',
'tf.clip_by_average_norm':
'tf.compat.v1.clip_by_average_norm',
'tf.colocate_with':
'tf.compat.v1.colocate_with',
'tf.conj':
'tf.math.conj',
'tf.container':
'tf.compat.v1.container',
'tf.control_flow_v2_enabled':
'tf.compat.v1.control_flow_v2_enabled',
'tf.convert_to_tensor_or_indexed_slices':
'tf.compat.v1.convert_to_tensor_or_indexed_slices',
'tf.convert_to_tensor_or_sparse_tensor':
'tf.compat.v1.convert_to_tensor_or_sparse_tensor',
'tf.count_up_to':
'tf.compat.v1.count_up_to',
'tf.create_partitioned_variables':
'tf.compat.v1.create_partitioned_variables',
'tf.cross':
'tf.linalg.cross',
'tf.cumprod':
'tf.math.cumprod',
'tf.data.get_output_classes':
'tf.compat.v1.data.get_output_classes',
'tf.data.get_output_shapes':
'tf.compat.v1.data.get_output_shapes',
'tf.data.get_output_types':
'tf.compat.v1.data.get_output_types',
'tf.data.make_initializable_iterator':
'tf.compat.v1.data.make_initializable_iterator',
'tf.data.make_one_shot_iterator':
'tf.compat.v1.data.make_one_shot_iterator',
'tf.debugging.is_finite':
'tf.math.is_finite',
'tf.debugging.is_inf':
'tf.math.is_inf',
'tf.debugging.is_nan':
'tf.math.is_nan',
'tf.debugging.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.debugging.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.decode_base64':
'tf.io.decode_base64',
'tf.decode_compressed':
'tf.io.decode_compressed',
'tf.decode_json_example':
'tf.io.decode_json_example',
'tf.decode_raw':
'tf.io.decode_raw',
'tf.delete_session_tensor':
'tf.compat.v1.delete_session_tensor',
'tf.depth_to_space':
'tf.compat.v1.depth_to_space',
'tf.dequantize':
'tf.quantization.dequantize',
'tf.deserialize_many_sparse':
'tf.io.deserialize_many_sparse',
'tf.diag':
'tf.linalg.tensor_diag',
'tf.diag_part':
'tf.linalg.tensor_diag_part',
'tf.digamma':
'tf.math.digamma',
'tf.dimension_at_index':
'tf.compat.dimension_at_index',
'tf.dimension_value':
'tf.compat.dimension_value',
'tf.disable_control_flow_v2':
'tf.compat.v1.disable_control_flow_v2',
'tf.disable_eager_execution':
'tf.compat.v1.disable_eager_execution',
'tf.disable_resource_variables':
'tf.compat.v1.disable_resource_variables',
'tf.disable_tensor_equality':
'tf.compat.v1.disable_tensor_equality',
'tf.disable_v2_behavior':
'tf.compat.v1.disable_v2_behavior',
'tf.disable_v2_tensorshape':
'tf.compat.v1.disable_v2_tensorshape',
'tf.distribute.get_loss_reduction':
'tf.compat.v1.distribute.get_loss_reduction',
'tf.distributions.Bernoulli':
'tf.compat.v1.distributions.Bernoulli',
'tf.distributions.Beta':
'tf.compat.v1.distributions.Beta',
'tf.distributions.Categorical':
'tf.compat.v1.distributions.Categorical',
'tf.distributions.Dirichlet':
'tf.compat.v1.distributions.Dirichlet',
'tf.distributions.DirichletMultinomial':
'tf.compat.v1.distributions.DirichletMultinomial',
'tf.distributions.Distribution':
'tf.compat.v1.distributions.Distribution',
'tf.distributions.Exponential':
'tf.compat.v1.distributions.Exponential',
'tf.distributions.FULLY_REPARAMETERIZED':
'tf.compat.v1.distributions.FULLY_REPARAMETERIZED',
'tf.distributions.Gamma':
'tf.compat.v1.distributions.Gamma',
'tf.distributions.Laplace':
'tf.compat.v1.distributions.Laplace',
'tf.distributions.Multinomial':
'tf.compat.v1.distributions.Multinomial',
'tf.distributions.NOT_REPARAMETERIZED':
'tf.compat.v1.distributions.NOT_REPARAMETERIZED',
'tf.distributions.Normal':
'tf.compat.v1.distributions.Normal',
'tf.distributions.RegisterKL':
'tf.compat.v1.distributions.RegisterKL',
'tf.distributions.ReparameterizationType':
'tf.compat.v1.distributions.ReparameterizationType',
'tf.distributions.StudentT':
'tf.compat.v1.distributions.StudentT',
'tf.distributions.Uniform':
'tf.compat.v1.distributions.Uniform',
'tf.distributions.kl_divergence':
'tf.compat.v1.distributions.kl_divergence',
'tf.div':
'tf.compat.v1.div',
'tf.div_no_nan':
'tf.math.divide_no_nan',
'tf.dtypes.as_string':
'tf.strings.as_string',
'tf.enable_control_flow_v2':
'tf.compat.v1.enable_control_flow_v2',
'tf.enable_eager_execution':
'tf.compat.v1.enable_eager_execution',
'tf.enable_resource_variables':
'tf.compat.v1.enable_resource_variables',
'tf.enable_tensor_equality':
'tf.compat.v1.enable_tensor_equality',
'tf.enable_v2_behavior':
'tf.compat.v1.enable_v2_behavior',
'tf.enable_v2_tensorshape':
'tf.compat.v1.enable_v2_tensorshape',
'tf.encode_base64':
'tf.io.encode_base64',
'tf.erf':
'tf.math.erf',
'tf.erfc':
'tf.math.erfc',
'tf.estimator.experimental.KMeans':
'tf.compat.v1.estimator.experimental.KMeans',
'tf.estimator.experimental.dnn_logit_fn_builder':
'tf.compat.v1.estimator.experimental.dnn_logit_fn_builder',
'tf.estimator.experimental.linear_logit_fn_builder':
'tf.compat.v1.estimator.experimental.linear_logit_fn_builder',
'tf.estimator.inputs.numpy_input_fn':
'tf.compat.v1.estimator.inputs.numpy_input_fn',
'tf.estimator.inputs.pandas_input_fn':
'tf.compat.v1.estimator.inputs.pandas_input_fn',
'tf.estimator.tpu.InputPipelineConfig':
'tf.compat.v1.estimator.tpu.InputPipelineConfig',
'tf.estimator.tpu.RunConfig':
'tf.compat.v1.estimator.tpu.RunConfig',
'tf.estimator.tpu.TPUConfig':
'tf.compat.v1.estimator.tpu.TPUConfig',
'tf.estimator.tpu.TPUEstimator':
'tf.compat.v1.estimator.tpu.TPUEstimator',
'tf.estimator.tpu.TPUEstimatorSpec':
'tf.compat.v1.estimator.tpu.TPUEstimatorSpec',
'tf.estimator.tpu.experimental.EmbeddingSpec':
'tf.compat.v1.estimator.tpu.experimental.EmbeddingSpec',
'tf.experimental.output_all_intermediates':
'tf.compat.v1.experimental.output_all_intermediates',
'tf.expm1':
'tf.math.expm1',
'tf.fake_quant_with_min_max_args':
'tf.quantization.fake_quant_with_min_max_args',
'tf.fake_quant_with_min_max_args_gradient':
'tf.quantization.fake_quant_with_min_max_args_gradient',
'tf.fake_quant_with_min_max_vars':
'tf.quantization.fake_quant_with_min_max_vars',
'tf.fake_quant_with_min_max_vars_gradient':
'tf.quantization.fake_quant_with_min_max_vars_gradient',
'tf.fake_quant_with_min_max_vars_per_channel':
'tf.quantization.fake_quant_with_min_max_vars_per_channel',
'tf.fake_quant_with_min_max_vars_per_channel_gradient':
'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
'tf.feature_column.input_layer':
'tf.compat.v1.feature_column.input_layer',
'tf.feature_column.linear_model':
'tf.compat.v1.feature_column.linear_model',
'tf.feature_column.shared_embedding_columns':
'tf.compat.v1.feature_column.shared_embedding_columns',
'tf.fft':
'tf.signal.fft',
'tf.fft2d':
'tf.signal.fft2d',
'tf.fft3d':
'tf.signal.fft3d',
'tf.fixed_size_partitioner':
'tf.compat.v1.fixed_size_partitioner',
'tf.floor_div':
'tf.compat.v1.floor_div',
'tf.floordiv':
'tf.math.floordiv',
'tf.floormod':
'tf.math.floormod',
'tf.get_collection':
'tf.compat.v1.get_collection',
'tf.get_collection_ref':
'tf.compat.v1.get_collection_ref',
'tf.get_default_graph':
'tf.compat.v1.get_default_graph',
'tf.get_default_session':
'tf.compat.v1.get_default_session',
'tf.get_local_variable':
'tf.compat.v1.get_local_variable',
'tf.get_seed':
'tf.compat.v1.get_seed',
'tf.get_session_handle':
'tf.compat.v1.get_session_handle',
'tf.get_session_tensor':
'tf.compat.v1.get_session_tensor',
'tf.get_variable':
'tf.compat.v1.get_variable',
'tf.get_variable_scope':
'tf.compat.v1.get_variable_scope',
'tf.gfile.FastGFile':
'tf.compat.v1.gfile.FastGFile',
'tf.global_norm':
'tf.linalg.global_norm',
'tf.global_variables':
'tf.compat.v1.global_variables',
'tf.global_variables_initializer':
'tf.compat.v1.global_variables_initializer',
'tf.graph_util.convert_variables_to_constants':
'tf.compat.v1.graph_util.convert_variables_to_constants',
'tf.graph_util.extract_sub_graph':
'tf.compat.v1.graph_util.extract_sub_graph',
'tf.graph_util.must_run_on_cpu':
'tf.compat.v1.graph_util.must_run_on_cpu',
'tf.graph_util.remove_training_nodes':
'tf.compat.v1.graph_util.remove_training_nodes',
'tf.graph_util.tensor_shape_from_node_def_name':
'tf.compat.v1.graph_util.tensor_shape_from_node_def_name',
'tf.ifft':
'tf.signal.ifft',
'tf.ifft2d':
'tf.signal.ifft2d',
'tf.ifft3d':
'tf.signal.ifft3d',
'tf.igamma':
'tf.math.igamma',
'tf.igammac':
'tf.math.igammac',
'tf.imag':
'tf.math.imag',
'tf.image.resize_area':
'tf.compat.v1.image.resize_area',
'tf.image.resize_bicubic':
'tf.compat.v1.image.resize_bicubic',
'tf.image.resize_bilinear':
'tf.compat.v1.image.resize_bilinear',
'tf.image.resize_image_with_crop_or_pad':
'tf.image.resize_with_crop_or_pad',
'tf.image.resize_image_with_pad':
'tf.compat.v1.image.resize_image_with_pad',
'tf.image.resize_nearest_neighbor':
'tf.compat.v1.image.resize_nearest_neighbor',
'tf.image.transpose_image':
'tf.image.transpose',
'tf.initialize_all_tables':
'tf.compat.v1.initialize_all_tables',
'tf.initialize_all_variables':
'tf.compat.v1.initialize_all_variables',
'tf.initialize_local_variables':
'tf.compat.v1.initialize_local_variables',
'tf.initialize_variables':
'tf.compat.v1.initialize_variables',
'tf.initializers.global_variables':
'tf.compat.v1.initializers.global_variables',
'tf.initializers.local_variables':
'tf.compat.v1.initializers.local_variables',
'tf.initializers.tables_initializer':
'tf.compat.v1.initializers.tables_initializer',
'tf.initializers.uniform_unit_scaling':
'tf.compat.v1.initializers.uniform_unit_scaling',
'tf.initializers.variables':
'tf.compat.v1.initializers.variables',
'tf.invert_permutation':
'tf.math.invert_permutation',
'tf.io.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.io.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.io.QueueBase':
'tf.queue.QueueBase',
'tf.io.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.io.TFRecordCompressionType':
'tf.compat.v1.io.TFRecordCompressionType',
'tf.io.tf_record_iterator':
'tf.compat.v1.io.tf_record_iterator',
'tf.is_finite':
'tf.math.is_finite',
'tf.is_inf':
'tf.math.is_inf',
'tf.is_nan':
'tf.math.is_nan',
'tf.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.is_numeric_tensor':
'tf.debugging.is_numeric_tensor',
'tf.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.is_variable_initialized':
'tf.compat.v1.is_variable_initialized',
'tf.keras.backend.get_session':
'tf.compat.v1.keras.backend.get_session',
'tf.keras.backend.set_session':
'tf.compat.v1.keras.backend.set_session',
'tf.keras.layers.CuDNNGRU':
'tf.compat.v1.keras.layers.CuDNNGRU',
'tf.keras.layers.CuDNNLSTM':
'tf.compat.v1.keras.layers.CuDNNLSTM',
'tf.keras.losses.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.losses.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.layers.AveragePooling1D':
'tf.compat.v1.layers.AveragePooling1D',
'tf.layers.AveragePooling2D':
'tf.compat.v1.layers.AveragePooling2D',
'tf.layers.AveragePooling3D':
'tf.compat.v1.layers.AveragePooling3D',
'tf.layers.BatchNormalization':
'tf.compat.v1.layers.BatchNormalization',
'tf.layers.Conv1D':
'tf.compat.v1.layers.Conv1D',
'tf.layers.Conv2D':
'tf.compat.v1.layers.Conv2D',
'tf.layers.Conv2DTranspose':
'tf.compat.v1.layers.Conv2DTranspose',
'tf.layers.Conv3D':
'tf.compat.v1.layers.Conv3D',
'tf.layers.Conv3DTranspose':
'tf.compat.v1.layers.Conv3DTranspose',
'tf.layers.Dense':
'tf.compat.v1.layers.Dense',
'tf.layers.Dropout':
'tf.compat.v1.layers.Dropout',
'tf.layers.Flatten':
'tf.compat.v1.layers.Flatten',
'tf.layers.InputSpec':
'tf.keras.layers.InputSpec',
'tf.layers.Layer':
'tf.compat.v1.layers.Layer',
'tf.layers.MaxPooling1D':
'tf.compat.v1.layers.MaxPooling1D',
'tf.layers.MaxPooling2D':
'tf.compat.v1.layers.MaxPooling2D',
'tf.layers.MaxPooling3D':
'tf.compat.v1.layers.MaxPooling3D',
'tf.layers.SeparableConv1D':
'tf.compat.v1.layers.SeparableConv1D',
'tf.layers.SeparableConv2D':
'tf.compat.v1.layers.SeparableConv2D',
'tf.layers.average_pooling1d':
'tf.compat.v1.layers.average_pooling1d',
'tf.layers.average_pooling2d':
'tf.compat.v1.layers.average_pooling2d',
'tf.layers.average_pooling3d':
'tf.compat.v1.layers.average_pooling3d',
'tf.layers.batch_normalization':
'tf.compat.v1.layers.batch_normalization',
'tf.layers.conv1d':
'tf.compat.v1.layers.conv1d',
'tf.layers.conv2d':
'tf.compat.v1.layers.conv2d',
'tf.layers.conv2d_transpose':
'tf.compat.v1.layers.conv2d_transpose',
'tf.layers.conv3d':
'tf.compat.v1.layers.conv3d',
'tf.layers.conv3d_transpose':
'tf.compat.v1.layers.conv3d_transpose',
'tf.layers.dense':
'tf.compat.v1.layers.dense',
'tf.layers.dropout':
'tf.compat.v1.layers.dropout',
'tf.layers.experimental.keras_style_scope':
'tf.compat.v1.layers.experimental.keras_style_scope',
'tf.layers.experimental.set_keras_style':
'tf.compat.v1.layers.experimental.set_keras_style',
'tf.layers.flatten':
'tf.compat.v1.layers.flatten',
'tf.layers.max_pooling1d':
'tf.compat.v1.layers.max_pooling1d',
'tf.layers.max_pooling2d':
'tf.compat.v1.layers.max_pooling2d',
'tf.layers.max_pooling3d':
'tf.compat.v1.layers.max_pooling3d',
'tf.layers.separable_conv1d':
'tf.compat.v1.layers.separable_conv1d',
'tf.layers.separable_conv2d':
'tf.compat.v1.layers.separable_conv2d',
'tf.lbeta':
'tf.math.lbeta',
'tf.lgamma':
'tf.math.lgamma',
'tf.lin_space':
'tf.linspace',
'tf.linalg.transpose':
'tf.linalg.matrix_transpose',
'tf.lite.OpHint':
'tf.compat.v1.lite.OpHint',
'tf.lite.TocoConverter':
'tf.compat.v1.lite.TocoConverter',
'tf.lite.constants.GRAPHVIZ_DOT':
'tf.compat.v1.lite.constants.GRAPHVIZ_DOT',
'tf.lite.constants.INT8':
'tf.compat.v1.lite.constants.INT8',
'tf.lite.constants.TFLITE':
'tf.compat.v1.lite.constants.TFLITE',
'tf.lite.experimental.convert_op_hints_to_stubs':
'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs',
'tf.lite.experimental.get_potentially_supported_ops':
'tf.compat.v1.lite.experimental.get_potentially_supported_ops',
'tf.lite.experimental.nn.TFLiteLSTMCell':
'tf.compat.v1.lite.experimental.nn.TFLiteLSTMCell',
'tf.lite.experimental.nn.TfLiteRNNCell':
'tf.compat.v1.lite.experimental.nn.TfLiteRNNCell',
'tf.lite.experimental.nn.dynamic_rnn':
'tf.compat.v1.lite.experimental.nn.dynamic_rnn',
'tf.lite.toco_convert':
'tf.compat.v1.lite.toco_convert',
'tf.local_variables':
'tf.compat.v1.local_variables',
'tf.local_variables_initializer':
'tf.compat.v1.local_variables_initializer',
'tf.log':
'tf.math.log',
'tf.log1p':
'tf.math.log1p',
'tf.log_sigmoid':
'tf.math.log_sigmoid',
'tf.logging.DEBUG':
'tf.compat.v1.logging.DEBUG',
'tf.logging.ERROR':
'tf.compat.v1.logging.ERROR',
'tf.logging.FATAL':
'tf.compat.v1.logging.FATAL',
'tf.logging.INFO':
'tf.compat.v1.logging.INFO',
'tf.logging.TaskLevelStatusMessage':
'tf.compat.v1.logging.TaskLevelStatusMessage',
'tf.logging.WARN':
'tf.compat.v1.logging.WARN',
'tf.logging.debug':
'tf.compat.v1.logging.debug',
'tf.logging.error':
'tf.compat.v1.logging.error',
'tf.logging.fatal':
'tf.compat.v1.logging.fatal',
'tf.logging.flush':
'tf.compat.v1.logging.flush',
'tf.logging.get_verbosity':
'tf.compat.v1.logging.get_verbosity',
'tf.logging.info':
'tf.compat.v1.logging.info',
'tf.logging.log':
'tf.compat.v1.logging.log',
'tf.logging.log_every_n':
'tf.compat.v1.logging.log_every_n',
'tf.logging.log_first_n':
'tf.compat.v1.logging.log_first_n',
'tf.logging.log_if':
'tf.compat.v1.logging.log_if',
'tf.logging.set_verbosity':
'tf.compat.v1.logging.set_verbosity',
'tf.logging.vlog':
'tf.compat.v1.logging.vlog',
'tf.logging.warn':
'tf.compat.v1.logging.warn',
'tf.logging.warning':
'tf.compat.v1.logging.warning',
'tf.logical_xor':
'tf.math.logical_xor',
'tf.losses.Reduction':
'tf.compat.v1.losses.Reduction',
'tf.losses.absolute_difference':
'tf.compat.v1.losses.absolute_difference',
'tf.losses.add_loss':
'tf.compat.v1.losses.add_loss',
'tf.losses.compute_weighted_loss':
'tf.compat.v1.losses.compute_weighted_loss',
'tf.losses.cosine_distance':
'tf.compat.v1.losses.cosine_distance',
'tf.losses.get_losses':
'tf.compat.v1.losses.get_losses',
'tf.losses.get_regularization_loss':
'tf.compat.v1.losses.get_regularization_loss',
'tf.losses.get_regularization_losses':
'tf.compat.v1.losses.get_regularization_losses',
'tf.losses.get_total_loss':
'tf.compat.v1.losses.get_total_loss',
'tf.losses.hinge_loss':
'tf.compat.v1.losses.hinge_loss',
'tf.losses.huber_loss':
'tf.compat.v1.losses.huber_loss',
'tf.losses.log_loss':
'tf.compat.v1.losses.log_loss',
'tf.losses.mean_pairwise_squared_error':
'tf.compat.v1.losses.mean_pairwise_squared_error',
'tf.losses.mean_squared_error':
'tf.compat.v1.losses.mean_squared_error',
'tf.losses.sigmoid_cross_entropy':
'tf.compat.v1.losses.sigmoid_cross_entropy',
'tf.losses.softmax_cross_entropy':
'tf.compat.v1.losses.softmax_cross_entropy',
'tf.losses.sparse_softmax_cross_entropy':
'tf.compat.v1.losses.sparse_softmax_cross_entropy',
'tf.make_template':
'tf.compat.v1.make_template',
'tf.make_tensor_proto':
'tf.compat.v1.make_tensor_proto',
'tf.manip.gather_nd':
'tf.compat.v1.manip.gather_nd',
'tf.manip.reshape':
'tf.reshape',
'tf.manip.reverse':
'tf.reverse',
'tf.manip.roll':
'tf.roll',
'tf.manip.scatter_nd':
'tf.scatter_nd',
'tf.manip.space_to_batch_nd':
'tf.space_to_batch_nd',
'tf.manip.tile':
'tf.tile',
'tf.matching_files':
'tf.io.matching_files',
'tf.matrix_band_part':
'tf.linalg.band_part',
'tf.matrix_determinant':
'tf.linalg.det',
'tf.matrix_diag':
'tf.linalg.diag',
'tf.matrix_diag_part':
'tf.linalg.diag_part',
'tf.matrix_inverse':
'tf.linalg.inv',
'tf.matrix_set_diag':
'tf.linalg.set_diag',
'tf.matrix_solve':
'tf.linalg.solve',
'tf.matrix_solve_ls':
'tf.linalg.lstsq',
'tf.matrix_transpose':
'tf.linalg.matrix_transpose',
'tf.matrix_triangular_solve':
'tf.linalg.triangular_solve',
'tf.metrics.accuracy':
'tf.compat.v1.metrics.accuracy',
'tf.metrics.auc':
'tf.compat.v1.metrics.auc',
'tf.metrics.average_precision_at_k':
'tf.compat.v1.metrics.average_precision_at_k',
'tf.metrics.false_negatives':
'tf.compat.v1.metrics.false_negatives',
'tf.metrics.false_negatives_at_thresholds':
'tf.compat.v1.metrics.false_negatives_at_thresholds',
'tf.metrics.false_positives':
'tf.compat.v1.metrics.false_positives',
'tf.metrics.false_positives_at_thresholds':
'tf.compat.v1.metrics.false_positives_at_thresholds',
'tf.metrics.mean':
'tf.compat.v1.metrics.mean',
'tf.metrics.mean_absolute_error':
'tf.compat.v1.metrics.mean_absolute_error',
'tf.metrics.mean_cosine_distance':
'tf.compat.v1.metrics.mean_cosine_distance',
'tf.metrics.mean_iou':
'tf.compat.v1.metrics.mean_iou',
'tf.metrics.mean_per_class_accuracy':
'tf.compat.v1.metrics.mean_per_class_accuracy',
'tf.metrics.mean_relative_error':
'tf.compat.v1.metrics.mean_relative_error',
'tf.metrics.mean_squared_error':
'tf.compat.v1.metrics.mean_squared_error',
'tf.metrics.mean_tensor':
'tf.compat.v1.metrics.mean_tensor',
'tf.metrics.percentage_below':
'tf.compat.v1.metrics.percentage_below',
'tf.metrics.precision':
'tf.compat.v1.metrics.precision',
'tf.metrics.precision_at_k':
'tf.compat.v1.metrics.precision_at_k',
'tf.metrics.precision_at_thresholds':
'tf.compat.v1.metrics.precision_at_thresholds',
'tf.metrics.precision_at_top_k':
'tf.compat.v1.metrics.precision_at_top_k',
'tf.metrics.recall':
'tf.compat.v1.metrics.recall',
'tf.metrics.recall_at_k':
'tf.compat.v1.metrics.recall_at_k',
'tf.metrics.recall_at_thresholds':
'tf.compat.v1.metrics.recall_at_thresholds',
'tf.metrics.recall_at_top_k':
'tf.compat.v1.metrics.recall_at_top_k',
'tf.metrics.root_mean_squared_error':
'tf.compat.v1.metrics.root_mean_squared_error',
'tf.metrics.sensitivity_at_specificity':
'tf.compat.v1.metrics.sensitivity_at_specificity',
'tf.metrics.sparse_average_precision_at_k':
'tf.compat.v1.metrics.sparse_average_precision_at_k',
'tf.metrics.sparse_precision_at_k':
'tf.compat.v1.metrics.sparse_precision_at_k',
'tf.metrics.specificity_at_sensitivity':
'tf.compat.v1.metrics.specificity_at_sensitivity',
'tf.metrics.true_negatives':
'tf.compat.v1.metrics.true_negatives',
'tf.metrics.true_negatives_at_thresholds':
'tf.compat.v1.metrics.true_negatives_at_thresholds',
'tf.metrics.true_positives':
'tf.compat.v1.metrics.true_positives',
'tf.metrics.true_positives_at_thresholds':
'tf.compat.v1.metrics.true_positives_at_thresholds',
'tf.min_max_variable_partitioner':
'tf.compat.v1.min_max_variable_partitioner',
'tf.mod':
'tf.math.mod',
'tf.model_variables':
'tf.compat.v1.model_variables',
'tf.moving_average_variables':
'tf.compat.v1.moving_average_variables',
'tf.nn.avg_pool_v2':
'tf.nn.avg_pool',
'tf.nn.bidirectional_dynamic_rnn':
'tf.compat.v1.nn.bidirectional_dynamic_rnn',
'tf.nn.conv2d_backprop_filter':
'tf.compat.v1.nn.conv2d_backprop_filter',
'tf.nn.conv3d_backprop_filter':
'tf.compat.v1.nn.conv3d_backprop_filter',
'tf.nn.conv3d_backprop_filter_v2':
'tf.compat.v1.nn.conv3d_backprop_filter_v2',
'tf.nn.ctc_beam_search_decoder_v2':
'tf.nn.ctc_beam_search_decoder',
'tf.nn.ctc_loss_v2':
'tf.nn.ctc_loss',
'tf.nn.depthwise_conv2d_native':
'tf.compat.v1.nn.depthwise_conv2d_native',
'tf.nn.depthwise_conv2d_native_backprop_filter':
'tf.nn.depthwise_conv2d_backprop_filter',
'tf.nn.depthwise_conv2d_native_backprop_input':
'tf.nn.depthwise_conv2d_backprop_input',
'tf.nn.dynamic_rnn':
'tf.compat.v1.nn.dynamic_rnn',
'tf.nn.log_uniform_candidate_sampler':
'tf.random.log_uniform_candidate_sampler',
'tf.nn.max_pool_v2':
'tf.nn.max_pool',
'tf.nn.quantized_avg_pool':
'tf.compat.v1.nn.quantized_avg_pool',
'tf.nn.quantized_conv2d':
'tf.compat.v1.nn.quantized_conv2d',
'tf.nn.quantized_max_pool':
'tf.compat.v1.nn.quantized_max_pool',
'tf.nn.quantized_relu_x':
'tf.compat.v1.nn.quantized_relu_x',
'tf.nn.raw_rnn':
'tf.compat.v1.nn.raw_rnn',
'tf.nn.relu_layer':
'tf.compat.v1.nn.relu_layer',
'tf.nn.rnn_cell.BasicLSTMCell':
'tf.compat.v1.nn.rnn_cell.BasicLSTMCell',
'tf.nn.rnn_cell.BasicRNNCell':
'tf.compat.v1.nn.rnn_cell.BasicRNNCell',
'tf.nn.rnn_cell.DeviceWrapper':
'tf.compat.v1.nn.rnn_cell.DeviceWrapper',
'tf.nn.rnn_cell.DropoutWrapper':
'tf.compat.v1.nn.rnn_cell.DropoutWrapper',
'tf.nn.rnn_cell.GRUCell':
'tf.compat.v1.nn.rnn_cell.GRUCell',
'tf.nn.rnn_cell.LSTMCell':
'tf.compat.v1.nn.rnn_cell.LSTMCell',
'tf.nn.rnn_cell.LSTMStateTuple':
'tf.compat.v1.nn.rnn_cell.LSTMStateTuple',
'tf.nn.rnn_cell.MultiRNNCell':
'tf.compat.v1.nn.rnn_cell.MultiRNNCell',
'tf.nn.rnn_cell.RNNCell':
'tf.compat.v1.nn.rnn_cell.RNNCell',
'tf.nn.rnn_cell.ResidualWrapper':
'tf.compat.v1.nn.rnn_cell.ResidualWrapper',
'tf.nn.static_bidirectional_rnn':
'tf.compat.v1.nn.static_bidirectional_rnn',
'tf.nn.static_rnn':
'tf.compat.v1.nn.static_rnn',
'tf.nn.static_state_saving_rnn':
'tf.compat.v1.nn.static_state_saving_rnn',
'tf.nn.uniform_candidate_sampler':
'tf.random.uniform_candidate_sampler',
'tf.nn.xw_plus_b':
'tf.compat.v1.nn.xw_plus_b',
'tf.no_regularizer':
'tf.compat.v1.no_regularizer',
'tf.op_scope':
'tf.compat.v1.op_scope',
'tf.parse_single_sequence_example':
'tf.io.parse_single_sequence_example',
'tf.parse_tensor':
'tf.io.parse_tensor',
'tf.placeholder':
'tf.compat.v1.placeholder',
'tf.placeholder_with_default':
'tf.compat.v1.placeholder_with_default',
'tf.polygamma':
'tf.math.polygamma',
'tf.profiler.AdviceProto':
'tf.compat.v1.profiler.AdviceProto',
'tf.profiler.GraphNodeProto':
'tf.compat.v1.profiler.GraphNodeProto',
'tf.profiler.MultiGraphNodeProto':
'tf.compat.v1.profiler.MultiGraphNodeProto',
'tf.profiler.OpLogProto':
'tf.compat.v1.profiler.OpLogProto',
'tf.profiler.ProfileOptionBuilder':
'tf.compat.v1.profiler.ProfileOptionBuilder',
'tf.profiler.Profiler':
'tf.compat.v1.profiler.Profiler',
'tf.profiler.advise':
'tf.compat.v1.profiler.advise',
'tf.profiler.profile':
'tf.compat.v1.profiler.profile',
'tf.profiler.write_op_log':
'tf.compat.v1.profiler.write_op_log',
'tf.py_func':
'tf.compat.v1.py_func',
'tf.python_io.TFRecordCompressionType':
'tf.compat.v1.python_io.TFRecordCompressionType',
'tf.python_io.TFRecordOptions':
'tf.io.TFRecordOptions',
'tf.python_io.TFRecordWriter':
'tf.io.TFRecordWriter',
'tf.python_io.tf_record_iterator':
'tf.compat.v1.python_io.tf_record_iterator',
'tf.qr':
'tf.linalg.qr',
'tf.quantize':
'tf.quantization.quantize',
'tf.quantized_concat':
'tf.quantization.quantized_concat',
'tf.ragged.RaggedTensorValue':
'tf.compat.v1.ragged.RaggedTensorValue',
'tf.ragged.constant_value':
'tf.compat.v1.ragged.constant_value',
'tf.ragged.placeholder':
'tf.compat.v1.ragged.placeholder',
'tf.random.get_seed':
'tf.compat.v1.random.get_seed',
'tf.random.set_random_seed':
'tf.compat.v1.random.set_random_seed',
'tf.random_crop':
'tf.image.random_crop',
'tf.random_gamma':
'tf.random.gamma',
'tf.random_normal':
'tf.random.normal',
'tf.random_shuffle':
'tf.random.shuffle',
'tf.random_uniform':
'tf.random.uniform',
'tf.read_file':
'tf.io.read_file',
'tf.real':
'tf.math.real',
'tf.reciprocal':
'tf.math.reciprocal',
'tf.regex_replace':
'tf.strings.regex_replace',
'tf.report_uninitialized_variables':
'tf.compat.v1.report_uninitialized_variables',
'tf.reset_default_graph':
'tf.compat.v1.reset_default_graph',
'tf.resource_loader.get_data_files_path':
'tf.compat.v1.resource_loader.get_data_files_path',
'tf.resource_loader.get_path_to_datafile':
'tf.compat.v1.resource_loader.get_path_to_datafile',
'tf.resource_loader.get_root_dir_with_all_resources':
'tf.compat.v1.resource_loader.get_root_dir_with_all_resources',
'tf.resource_loader.load_resource':
'tf.compat.v1.resource_loader.load_resource',
'tf.resource_loader.readahead_file_path':
'tf.compat.v1.resource_loader.readahead_file_path',
'tf.resource_variables_enabled':
'tf.compat.v1.resource_variables_enabled',
'tf.reverse_v2':
'tf.reverse',
'tf.rint':
'tf.math.rint',
'tf.rsqrt':
'tf.math.rsqrt',
'tf.saved_model.Builder':
'tf.compat.v1.saved_model.Builder',
'tf.saved_model.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY',
'tf.saved_model.MAIN_OP_KEY':
'tf.compat.v1.saved_model.MAIN_OP_KEY',
'tf.saved_model.build_signature_def':
'tf.compat.v1.saved_model.build_signature_def',
'tf.saved_model.build_tensor_info':
'tf.compat.v1.saved_model.build_tensor_info',
'tf.saved_model.builder.SavedModelBuilder':
'tf.compat.v1.saved_model.builder.SavedModelBuilder',
'tf.saved_model.classification_signature_def':
'tf.compat.v1.saved_model.classification_signature_def',
'tf.saved_model.constants.ASSETS_DIRECTORY':
'tf.saved_model.ASSETS_DIRECTORY',
'tf.saved_model.constants.ASSETS_KEY':
'tf.saved_model.ASSETS_KEY',
'tf.saved_model.constants.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY',
'tf.saved_model.constants.MAIN_OP_KEY':
'tf.compat.v1.saved_model.constants.MAIN_OP_KEY',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB':
'tf.saved_model.SAVED_MODEL_FILENAME_PB',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT':
'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT',
'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION':
'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION',
'tf.saved_model.constants.VARIABLES_DIRECTORY':
'tf.saved_model.VARIABLES_DIRECTORY',
'tf.saved_model.constants.VARIABLES_FILENAME':
'tf.saved_model.VARIABLES_FILENAME',
'tf.saved_model.experimental.save':
'tf.saved_model.save',
'tf.saved_model.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.get_tensor_from_tensor_info',
'tf.saved_model.is_valid_signature':
'tf.compat.v1.saved_model.is_valid_signature',
'tf.saved_model.loader.load':
'tf.compat.v1.saved_model.loader.load',
'tf.saved_model.loader.maybe_saved_model_directory':
'tf.compat.v1.saved_model.loader.maybe_saved_model_directory',
'tf.saved_model.main_op.main_op':
'tf.compat.v1.saved_model.main_op.main_op',
'tf.saved_model.main_op.main_op_with_restore':
'tf.compat.v1.saved_model.main_op.main_op_with_restore',
'tf.saved_model.main_op_with_restore':
'tf.compat.v1.saved_model.main_op_with_restore',
'tf.saved_model.maybe_saved_model_directory':
'tf.compat.v1.saved_model.maybe_saved_model_directory',
'tf.saved_model.predict_signature_def':
'tf.compat.v1.saved_model.predict_signature_def',
'tf.saved_model.regression_signature_def':
'tf.compat.v1.saved_model.regression_signature_def',
'tf.saved_model.signature_constants.CLASSIFY_INPUTS':
'tf.saved_model.CLASSIFY_INPUTS',
'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME':
'tf.saved_model.CLASSIFY_METHOD_NAME',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES':
'tf.saved_model.CLASSIFY_OUTPUT_CLASSES',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES':
'tf.saved_model.CLASSIFY_OUTPUT_SCORES',
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY':
'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY',
'tf.saved_model.signature_constants.PREDICT_INPUTS':
'tf.saved_model.PREDICT_INPUTS',
'tf.saved_model.signature_constants.PREDICT_METHOD_NAME':
'tf.saved_model.PREDICT_METHOD_NAME',
'tf.saved_model.signature_constants.PREDICT_OUTPUTS':
'tf.saved_model.PREDICT_OUTPUTS',
'tf.saved_model.signature_constants.REGRESS_INPUTS':
'tf.saved_model.REGRESS_INPUTS',
'tf.saved_model.signature_constants.REGRESS_METHOD_NAME':
'tf.saved_model.REGRESS_METHOD_NAME',
'tf.saved_model.signature_constants.REGRESS_OUTPUTS':
'tf.saved_model.REGRESS_OUTPUTS',
'tf.saved_model.signature_def_utils.build_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.build_signature_def',
'tf.saved_model.signature_def_utils.classification_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.classification_signature_def',
'tf.saved_model.signature_def_utils.is_valid_signature':
'tf.compat.v1.saved_model.signature_def_utils.is_valid_signature',
'tf.saved_model.signature_def_utils.predict_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def',
'tf.saved_model.signature_def_utils.regression_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.regression_signature_def',
'tf.saved_model.simple_save':
'tf.compat.v1.saved_model.simple_save',
'tf.saved_model.tag_constants.GPU':
'tf.saved_model.GPU',
'tf.saved_model.tag_constants.SERVING':
'tf.saved_model.SERVING',
'tf.saved_model.tag_constants.TPU':
'tf.saved_model.TPU',
'tf.saved_model.tag_constants.TRAINING':
'tf.saved_model.TRAINING',
'tf.saved_model.utils.build_tensor_info':
'tf.compat.v1.saved_model.utils.build_tensor_info',
'tf.saved_model.utils.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info',
'tf.scatter_add':
'tf.compat.v1.scatter_add',
'tf.scatter_div':
'tf.compat.v1.scatter_div',
'tf.scatter_max':
'tf.compat.v1.scatter_max',
'tf.scatter_min':
'tf.compat.v1.scatter_min',
'tf.scatter_mul':
'tf.compat.v1.scatter_mul',
'tf.scatter_nd_add':
'tf.compat.v1.scatter_nd_add',
'tf.scatter_nd_sub':
'tf.compat.v1.scatter_nd_sub',
'tf.scatter_nd_update':
'tf.compat.v1.scatter_nd_update',
'tf.scatter_sub':
'tf.compat.v1.scatter_sub',
'tf.scatter_update':
'tf.compat.v1.scatter_update',
'tf.segment_max':
'tf.math.segment_max',
'tf.segment_mean':
'tf.math.segment_mean',
'tf.segment_min':
'tf.math.segment_min',
'tf.segment_prod':
'tf.math.segment_prod',
'tf.segment_sum':
'tf.math.segment_sum',
'tf.self_adjoint_eig':
'tf.linalg.eigh',
'tf.self_adjoint_eigvals':
'tf.linalg.eigvalsh',
'tf.serialize_many_sparse':
'tf.compat.v1.serialize_many_sparse',
'tf.serialize_sparse':
'tf.compat.v1.serialize_sparse',
'tf.serialize_tensor':
'tf.io.serialize_tensor',
'tf.set_random_seed':
'tf.compat.v1.set_random_seed',
'tf.setdiff1d':
'tf.compat.v1.setdiff1d',
'tf.sets.set_difference':
'tf.sets.difference',
'tf.sets.set_intersection':
'tf.sets.intersection',
'tf.sets.set_size':
'tf.sets.size',
'tf.sets.set_union':
'tf.sets.union',
'tf.space_to_depth':
'tf.compat.v1.space_to_depth',
'tf.sparse.SparseConditionalAccumulator':
'tf.compat.v1.sparse.SparseConditionalAccumulator',
'tf.sparse.matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse.merge':
'tf.compat.v1.sparse.merge',
'tf.sparse.placeholder':
'tf.compat.v1.sparse.placeholder',
'tf.sparse.reduce_max_sparse':
'tf.compat.v1.sparse.reduce_max_sparse',
'tf.sparse.reduce_sum_sparse':
'tf.compat.v1.sparse.reduce_sum_sparse',
'tf.sparse_fill_empty_rows':
'tf.sparse.fill_empty_rows',
'tf.sparse_mask':
'tf.sparse.mask',
'tf.sparse_maximum':
'tf.sparse.maximum',
'tf.sparse_merge':
'tf.compat.v1.sparse_merge',
'tf.sparse_minimum':
'tf.sparse.minimum',
'tf.sparse_placeholder':
'tf.compat.v1.sparse_placeholder',
'tf.sparse_reduce_max_sparse':
'tf.compat.v1.sparse_reduce_max_sparse',
'tf.sparse_reduce_sum_sparse':
'tf.compat.v1.sparse_reduce_sum_sparse',
'tf.sparse_reorder':
'tf.sparse.reorder',
'tf.sparse_reset_shape':
'tf.sparse.reset_shape',
'tf.sparse_reshape':
'tf.sparse.reshape',
'tf.sparse_retain':
'tf.sparse.retain',
'tf.sparse_segment_mean':
'tf.compat.v1.sparse_segment_mean',
'tf.sparse_segment_sqrt_n':
'tf.compat.v1.sparse_segment_sqrt_n',
'tf.sparse_segment_sum':
'tf.compat.v1.sparse_segment_sum',
'tf.sparse_slice':
'tf.sparse.slice',
'tf.sparse_softmax':
'tf.sparse.softmax',
'tf.sparse_tensor_dense_matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse_tensor_to_dense':
'tf.sparse.to_dense',
'tf.sparse_to_dense':
'tf.compat.v1.sparse_to_dense',
'tf.sparse_to_indicator':
'tf.sparse.to_indicator',
'tf.sparse_transpose':
'tf.sparse.transpose',
'tf.spectral.dct':
'tf.signal.dct',
'tf.spectral.fft':
'tf.signal.fft',
'tf.spectral.fft2d':
'tf.signal.fft2d',
'tf.spectral.fft3d':
'tf.signal.fft3d',
'tf.spectral.idct':
'tf.signal.idct',
'tf.spectral.ifft':
'tf.signal.ifft',
'tf.spectral.ifft2d':
'tf.signal.ifft2d',
'tf.spectral.ifft3d':
'tf.signal.ifft3d',
'tf.spectral.irfft':
'tf.signal.irfft',
'tf.spectral.irfft2d':
'tf.signal.irfft2d',
'tf.spectral.irfft3d':
'tf.signal.irfft3d',
'tf.spectral.rfft':
'tf.signal.rfft',
'tf.spectral.rfft2d':
'tf.signal.rfft2d',
'tf.spectral.rfft3d':
'tf.signal.rfft3d',
'tf.squared_difference':
'tf.math.squared_difference',
'tf.string_join':
'tf.strings.join',
'tf.string_strip':
'tf.strings.strip',
'tf.string_to_hash_bucket_fast':
'tf.strings.to_hash_bucket_fast',
'tf.string_to_hash_bucket_strong':
'tf.strings.to_hash_bucket_strong',
'tf.summary.Event':
'tf.compat.v1.summary.Event',
'tf.summary.FileWriter':
'tf.compat.v1.summary.FileWriter',
'tf.summary.FileWriterCache':
'tf.compat.v1.summary.FileWriterCache',
'tf.summary.SessionLog':
'tf.compat.v1.summary.SessionLog',
'tf.summary.Summary':
'tf.compat.v1.summary.Summary',
'tf.summary.SummaryDescription':
'tf.compat.v1.summary.SummaryDescription',
'tf.summary.TaggedRunMetadata':
'tf.compat.v1.summary.TaggedRunMetadata',
'tf.summary.all_v2_summary_ops':
'tf.compat.v1.summary.all_v2_summary_ops',
'tf.summary.audio':
'tf.compat.v1.summary.audio',
'tf.summary.get_summary_description':
'tf.compat.v1.summary.get_summary_description',
'tf.summary.histogram':
'tf.compat.v1.summary.histogram',
'tf.summary.image':
'tf.compat.v1.summary.image',
'tf.summary.initialize':
'tf.compat.v1.summary.initialize',
'tf.summary.merge':
'tf.compat.v1.summary.merge',
'tf.summary.merge_all':
'tf.compat.v1.summary.merge_all',
'tf.summary.scalar':
'tf.compat.v1.summary.scalar',
'tf.summary.tensor_summary':
'tf.compat.v1.summary.tensor_summary',
'tf.summary.text':
'tf.compat.v1.summary.text',
'tf.svd':
'tf.linalg.svd',
'tf.tables_initializer':
'tf.compat.v1.tables_initializer',
'tf.tensor_scatter_add':
'tf.tensor_scatter_nd_add',
'tf.tensor_scatter_sub':
'tf.tensor_scatter_nd_sub',
'tf.tensor_scatter_update':
'tf.tensor_scatter_nd_update',
'tf.test.StubOutForTesting':
'tf.compat.v1.test.StubOutForTesting',
'tf.test.compute_gradient_error':
'tf.compat.v1.test.compute_gradient_error',
'tf.test.get_temp_dir':
'tf.compat.v1.test.get_temp_dir',
'tf.test.mock':
'tf.compat.v1.test.mock',
'tf.test.test_src_dir_path':
'tf.compat.v1.test.test_src_dir_path',
'tf.to_bfloat16':
'tf.compat.v1.to_bfloat16',
'tf.to_complex128':
'tf.compat.v1.to_complex128',
'tf.to_complex64':
'tf.compat.v1.to_complex64',
'tf.to_double':
'tf.compat.v1.to_double',
'tf.to_float':
'tf.compat.v1.to_float',
'tf.to_int32':
'tf.compat.v1.to_int32',
'tf.to_int64':
'tf.compat.v1.to_int64',
'tf.tpu.CrossShardOptimizer':
'tf.compat.v1.tpu.CrossShardOptimizer',
'tf.tpu.batch_parallel':
'tf.compat.v1.tpu.batch_parallel',
'tf.tpu.bfloat16_scope':
'tf.compat.v1.tpu.bfloat16_scope',
'tf.tpu.core':
'tf.compat.v1.tpu.core',
'tf.tpu.cross_replica_sum':
'tf.compat.v1.tpu.cross_replica_sum',
'tf.tpu.experimental.AdagradParameters':
'tf.compat.v1.tpu.experimental.AdagradParameters',
'tf.tpu.experimental.AdamParameters':
'tf.compat.v1.tpu.experimental.AdamParameters',
'tf.tpu.experimental.StochasticGradientDescentParameters':
'tf.compat.v1.tpu.experimental.StochasticGradientDescentParameters',
'tf.tpu.experimental.embedding_column':
'tf.compat.v1.tpu.experimental.embedding_column',
'tf.tpu.experimental.shared_embedding_columns':
'tf.compat.v1.tpu.experimental.shared_embedding_columns',
'tf.tpu.initialize_system':
'tf.compat.v1.tpu.initialize_system',
'tf.tpu.outside_compilation':
'tf.compat.v1.tpu.outside_compilation',
'tf.tpu.replicate':
'tf.compat.v1.tpu.replicate',
'tf.tpu.rewrite':
'tf.compat.v1.tpu.rewrite',
'tf.tpu.shard':
'tf.compat.v1.tpu.shard',
'tf.tpu.shutdown_system':
'tf.compat.v1.tpu.shutdown_system',
'tf.trace':
'tf.linalg.trace',
'tf.train.AdadeltaOptimizer':
'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer':
'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer':
'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer':
'tf.compat.v1.train.AdamOptimizer',
'tf.train.CheckpointSaverHook':
'tf.estimator.CheckpointSaverHook',
'tf.train.CheckpointSaverListener':
'tf.estimator.CheckpointSaverListener',
'tf.train.ChiefSessionCreator':
'tf.compat.v1.train.ChiefSessionCreator',
'tf.train.FeedFnHook':
'tf.estimator.FeedFnHook',
'tf.train.FinalOpsHook':
'tf.estimator.FinalOpsHook',
'tf.train.FtrlOptimizer':
'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GlobalStepWaiterHook':
'tf.estimator.GlobalStepWaiterHook',
'tf.train.GradientDescentOptimizer':
'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.LoggingTensorHook':
'tf.estimator.LoggingTensorHook',
'tf.train.LooperThread':
'tf.compat.v1.train.LooperThread',
'tf.train.MomentumOptimizer':
'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredSession':
'tf.compat.v1.train.MonitoredSession',
'tf.train.MonitoredTrainingSession':
'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NanLossDuringTrainingError':
'tf.estimator.NanLossDuringTrainingError',
'tf.train.NanTensorHook':
'tf.estimator.NanTensorHook',
'tf.train.NewCheckpointReader':
'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer':
'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook':
'tf.estimator.ProfilerHook',
'tf.train.ProximalAdagradOptimizer':
'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.ProximalGradientDescentOptimizer':
'tf.compat.v1.train.ProximalGradientDescentOptimizer',
'tf.train.QueueRunner':
'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer':
'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver':
'tf.compat.v1.train.Saver',
'tf.train.SaverDef':
'tf.compat.v1.train.SaverDef',
'tf.train.Scaffold':
'tf.compat.v1.train.Scaffold',
'tf.train.SecondOrStepTimer':
'tf.estimator.SecondOrStepTimer',
'tf.train.Server':
'tf.distribute.Server',
'tf.train.SessionCreator':
'tf.compat.v1.train.SessionCreator',
'tf.train.SessionManager':
'tf.compat.v1.train.SessionManager',
'tf.train.SessionRunArgs':
'tf.estimator.SessionRunArgs',
'tf.train.SessionRunContext':
'tf.estimator.SessionRunContext',
'tf.train.SessionRunHook':
'tf.estimator.SessionRunHook',
'tf.train.SessionRunValues':
'tf.estimator.SessionRunValues',
'tf.train.SingularMonitoredSession':
'tf.compat.v1.train.SingularMonitoredSession',
'tf.train.StepCounterHook':
'tf.estimator.StepCounterHook',
'tf.train.StopAtStepHook':
'tf.estimator.StopAtStepHook',
'tf.train.SummarySaverHook':
'tf.estimator.SummarySaverHook',
'tf.train.Supervisor':
'tf.compat.v1.train.Supervisor',
'tf.train.SyncReplicasOptimizer':
'tf.compat.v1.train.SyncReplicasOptimizer',
'tf.train.VocabInfo':
'tf.estimator.VocabInfo',
'tf.train.WorkerSessionCreator':
'tf.compat.v1.train.WorkerSessionCreator',
'tf.train.add_queue_runner':
'tf.compat.v1.train.add_queue_runner',
'tf.train.assert_global_step':
'tf.compat.v1.train.assert_global_step',
'tf.train.basic_train_loop':
'tf.compat.v1.train.basic_train_loop',
'tf.train.batch':
'tf.compat.v1.train.batch',
'tf.train.batch_join':
'tf.compat.v1.train.batch_join',
'tf.train.checkpoint_exists':
'tf.compat.v1.train.checkpoint_exists',
'tf.train.cosine_decay':
'tf.compat.v1.train.cosine_decay',
'tf.train.cosine_decay_restarts':
'tf.compat.v1.train.cosine_decay_restarts',
'tf.train.create_global_step':
'tf.compat.v1.train.create_global_step',
'tf.train.do_quantize_training_on_graphdef':
'tf.compat.v1.train.do_quantize_training_on_graphdef',
'tf.train.experimental.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer',
'tf.train.exponential_decay':
'tf.compat.v1.train.exponential_decay',
'tf.train.export_meta_graph':
'tf.compat.v1.train.export_meta_graph',
'tf.train.generate_checkpoint_state_proto':
'tf.compat.v1.train.generate_checkpoint_state_proto',
'tf.train.get_checkpoint_mtimes':
'tf.compat.v1.train.get_checkpoint_mtimes',
'tf.train.get_global_step':
'tf.compat.v1.train.get_global_step',
'tf.train.get_or_create_global_step':
'tf.compat.v1.train.get_or_create_global_step',
'tf.train.global_step':
'tf.compat.v1.train.global_step',
'tf.train.import_meta_graph':
'tf.compat.v1.train.import_meta_graph',
'tf.train.init_from_checkpoint':
'tf.compat.v1.train.init_from_checkpoint',
'tf.train.input_producer':
'tf.compat.v1.train.input_producer',
'tf.train.inverse_time_decay':
'tf.compat.v1.train.inverse_time_decay',
'tf.train.limit_epochs':
'tf.compat.v1.train.limit_epochs',
'tf.train.linear_cosine_decay':
'tf.compat.v1.train.linear_cosine_decay',
'tf.train.match_filenames_once':
'tf.io.match_filenames_once',
'tf.train.maybe_batch':
'tf.compat.v1.train.maybe_batch',
'tf.train.maybe_batch_join':
'tf.compat.v1.train.maybe_batch_join',
'tf.train.maybe_shuffle_batch':
'tf.compat.v1.train.maybe_shuffle_batch',
'tf.train.maybe_shuffle_batch_join':
'tf.compat.v1.train.maybe_shuffle_batch_join',
'tf.train.natural_exp_decay':
'tf.compat.v1.train.natural_exp_decay',
'tf.train.noisy_linear_cosine_decay':
'tf.compat.v1.train.noisy_linear_cosine_decay',
'tf.train.piecewise_constant':
'tf.compat.v1.train.piecewise_constant',
'tf.train.piecewise_constant_decay':
'tf.compat.v1.train.piecewise_constant_decay',
'tf.train.polynomial_decay':
'tf.compat.v1.train.polynomial_decay',
'tf.train.queue_runner.QueueRunner':
'tf.compat.v1.train.queue_runner.QueueRunner',
'tf.train.queue_runner.add_queue_runner':
'tf.compat.v1.train.queue_runner.add_queue_runner',
'tf.train.queue_runner.start_queue_runners':
'tf.compat.v1.train.queue_runner.start_queue_runners',
'tf.train.range_input_producer':
'tf.compat.v1.train.range_input_producer',
'tf.train.remove_checkpoint':
'tf.compat.v1.train.remove_checkpoint',
'tf.train.replica_device_setter':
'tf.compat.v1.train.replica_device_setter',
'tf.train.shuffle_batch':
'tf.compat.v1.train.shuffle_batch',
'tf.train.shuffle_batch_join':
'tf.compat.v1.train.shuffle_batch_join',
'tf.train.slice_input_producer':
'tf.compat.v1.train.slice_input_producer',
'tf.train.start_queue_runners':
'tf.compat.v1.train.start_queue_runners',
'tf.train.string_input_producer':
'tf.compat.v1.train.string_input_producer',
'tf.train.summary_iterator':
'tf.compat.v1.train.summary_iterator',
'tf.train.update_checkpoint_state':
'tf.compat.v1.train.update_checkpoint_state',
'tf.train.warm_start':
'tf.compat.v1.train.warm_start',
'tf.train.write_graph':
'tf.io.write_graph',
'tf.trainable_variables':
'tf.compat.v1.trainable_variables',
'tf.truncated_normal':
'tf.random.truncated_normal',
'tf.uniform_unit_scaling_initializer':
'tf.compat.v1.uniform_unit_scaling_initializer',
'tf.unsorted_segment_max':
'tf.math.unsorted_segment_max',
'tf.unsorted_segment_mean':
'tf.math.unsorted_segment_mean',
'tf.unsorted_segment_min':
'tf.math.unsorted_segment_min',
'tf.unsorted_segment_prod':
'tf.math.unsorted_segment_prod',
'tf.unsorted_segment_sqrt_n':
'tf.math.unsorted_segment_sqrt_n',
'tf.unsorted_segment_sum':
'tf.math.unsorted_segment_sum',
'tf.variable_axis_size_partitioner':
'tf.compat.v1.variable_axis_size_partitioner',
'tf.variable_op_scope':
'tf.compat.v1.variable_op_scope',
'tf.variable_scope':
'tf.compat.v1.variable_scope',
'tf.variables_initializer':
'tf.compat.v1.variables_initializer',
'tf.verify_tensor_all_finite':
'tf.compat.v1.verify_tensor_all_finite',
'tf.where_v2':
'tf.where',
'tf.wrap_function':
'tf.compat.v1.wrap_function',
'tf.write_file':
'tf.io.write_file',
'tf.zeta':
'tf.math.zeta'
}
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/renames_v2.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from pre-1.0 TensorFlow to 1.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from tensorflow.tools.compatibility import ast_edits
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.batch_matmul": {
"adj_x": "adjoint_a",
"adj_y": "adjoint_b",
},
"tf.count_nonzero": {
"reduction_indices": "axis"
},
"tf.reduce_all": {
"reduction_indices": "axis"
},
"tf.reduce_any": {
"reduction_indices": "axis"
},
"tf.reduce_max": {
"reduction_indices": "axis"
},
"tf.reduce_mean": {
"reduction_indices": "axis"
},
"tf.reduce_min": {
"reduction_indices": "axis"
},
"tf.reduce_prod": {
"reduction_indices": "axis"
},
"tf.reduce_sum": {
"reduction_indices": "axis"
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis"
},
"tf.expand_dims": {
"dim": "axis"
},
"tf.argmax": {
"dimension": "axis"
},
"tf.argmin": {
"dimension": "axis"
},
"tf.reduce_join": {
"reduction_indices": "axis"
},
"tf.sparse_concat": {
"concat_dim": "axis"
},
"tf.sparse_split": {
"split_dim": "axis"
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis"
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis"
},
"tf.sparse_reduce_sum_sparse": {
"reduction_axes": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis"
},
"tf.split": {
"split_dim": "axis",
"num_split": "num_or_size_splits"
},
"tf.concat": {
"concat_dim": "axis"
},
}
# Mapping from function to the new name of the function
self.symbol_renames = {
"tf.inv": "tf.reciprocal",
"tf.contrib.deprecated.scalar_summary": "tf.summary.scalar",
"tf.contrib.deprecated.histogram_summary": "tf.summary.histogram",
"tf.listdiff": "tf.setdiff1d",
"tf.list_diff": "tf.setdiff1d",
"tf.mul": "tf.multiply",
"tf.neg": "tf.negative",
"tf.sub": "tf.subtract",
"tf.train.SummaryWriter": "tf.summary.FileWriter",
"tf.scalar_summary": "tf.summary.scalar",
"tf.histogram_summary": "tf.summary.histogram",
"tf.audio_summary": "tf.summary.audio",
"tf.image_summary": "tf.summary.image",
"tf.merge_summary": "tf.summary.merge",
"tf.merge_all_summaries": "tf.summary.merge_all",
"tf.image.per_image_whitening": "tf.image.per_image_standardization",
"tf.all_variables": "tf.global_variables",
"tf.VARIABLES": "tf.GLOBAL_VARIABLES",
"tf.initialize_all_variables": "tf.global_variables_initializer",
"tf.initialize_variables": "tf.variables_initializer",
"tf.initialize_local_variables": "tf.local_variables_initializer",
"tf.batch_matrix_diag": "tf.matrix_diag",
"tf.batch_band_part": "tf.band_part",
"tf.batch_set_diag": "tf.set_diag",
"tf.batch_matrix_transpose": "tf.matrix_transpose",
"tf.batch_matrix_determinant": "tf.matrix_determinant",
"tf.batch_matrix_inverse": "tf.matrix_inverse",
"tf.batch_cholesky": "tf.cholesky",
"tf.batch_cholesky_solve": "tf.cholesky_solve",
"tf.batch_matrix_solve": "tf.matrix_solve",
"tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve",
"tf.batch_matrix_solve_ls": "tf.matrix_solve_ls",
"tf.batch_self_adjoint_eig": "tf.self_adjoint_eig",
"tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals",
"tf.batch_svd": "tf.svd",
"tf.batch_fft": "tf.fft",
"tf.batch_ifft": "tf.ifft",
"tf.batch_fft2d": "tf.fft2d",
"tf.batch_ifft2d": "tf.ifft2d",
"tf.batch_fft3d": "tf.fft3d",
"tf.batch_ifft3d": "tf.ifft3d",
"tf.select": "tf.where",
"tf.complex_abs": "tf.abs",
"tf.batch_matmul": "tf.matmul",
"tf.pack": "tf.stack",
"tf.unpack": "tf.unstack",
"tf.op_scope": "tf.name_scope",
}
self.change_to_function = {
"tf.ones_initializer",
"tf.zeros_initializer",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = {
"tf.split": ["axis", "num_or_size_splits", "value", "name"],
"tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"],
"tf.concat": ["concat_dim", "values", "name"],
"tf.svd": ["tensor", "compute_uv", "full_matrices", "name"],
"tf.nn.softmax_cross_entropy_with_logits": [
"logits", "labels", "dim", "name"
],
"tf.nn.sparse_softmax_cross_entropy_with_logits": [
"logits", "labels", "name"
],
"tf.nn.sigmoid_cross_entropy_with_logits": ["logits", "labels", "name"],
"tf.op_scope": ["values", "name", "default_name"],
}
# Warnings that should be printed if corresponding functions are used.
self.function_warnings = {
"tf.reverse": (
ast_edits.ERROR,
"tf.reverse has had its argument semantics changed "
"significantly. The converter cannot detect this reliably, so "
"you need to inspect this usage manually.\n"),
}
self.module_deprecations = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 1.0
Simple usage:
tf_convert.py --infile foo.py --outfile bar.py
tf_convert.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=False)
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec())
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 1.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/tf_upgrade.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader in safety mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2_safety
class TfUpgradeV2SafetyTest(test_util.TensorFlowTestCase):
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2_safety.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testContribWarning(self):
text = "tf.contrib.foo()"
_, report, _, _ = self._upgrade(text)
expected_info = "tf.contrib will not be distributed"
self.assertIn(expected_info, report)
def testTensorFlowImport(self):
text = "import tensorflow as tf"
expected_text = ("import tensorflow.compat.v1 as tf" + os.linesep +
"tf.disable_v2_behavior()" + os.linesep)
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow as tf, other_import as y"
expected_text = ("import tensorflow.compat.v1 as tf, other_import as y" +
os.linesep + "tf.disable_v2_behavior()" + os.linesep)
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow"
expected_text = ("import tensorflow.compat.v1 as tensorflow" + os.linesep +
"tensorflow.disable_v2_behavior()" + os.linesep)
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.foo"
expected_text = "import tensorflow.compat.v1.foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.foo as bar"
expected_text = "import tensorflow.compat.v1.foo as bar"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowImportInIndent(self):
text = """
try:
import tensorflow as tf # import line
tf.ones([4, 5])
except AttributeError:
pass
"""
expected_text = """
try:
import tensorflow.compat.v1 as tf # import line
tf.disable_v2_behavior()
tf.ones([4, 5])
except AttributeError:
pass
"""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowFromImport(self):
text = "from tensorflow import foo"
expected_text = "from tensorflow.compat.v1 import foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "from tensorflow.foo import bar"
expected_text = "from tensorflow.compat.v1.foo import bar"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "from tensorflow import *"
expected_text = "from tensorflow.compat.v1 import *"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowImportAlreadyHasCompat(self):
text = "import tensorflow.compat.v1 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "import tensorflow.compat.v2 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "from tensorflow.compat import v2 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
def testTensorFlowDontChangeContrib(self):
text = "import tensorflow.contrib as foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "from tensorflow import contrib"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
def test_contrib_to_addons_move(self):
small_mapping = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
}
for symbol, replacement in small_mapping.items():
text = "{}('stuff', *args, **kwargs)".format(symbol)
_, report, _, _ = self._upgrade(text)
self.assertIn(replacement, report)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/tf_upgrade_v2_safety_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_edits which is used in tf upgraders.
All of the tests assume that we want to change from an API containing
import foo as f
def f(a, b, kw1, kw2): ...
def g(a, b, kw1, c, kw1_alias): ...
def g2(a, b, kw1, c, d, kw1_alias): ...
def h(a, kw1, kw2, kw1_alias, kw2_alias): ...
and the changes to the API consist of renaming, reordering, and/or removing
arguments. Thus, we want to be able to generate changes to produce each of the
following new APIs:
import bar as f
def f(a, b, kw1, kw3): ...
def f(a, b, kw2, kw1): ...
def f(a, b, kw3, kw1): ...
def g(a, b, kw1, c): ...
def g(a, b, c, kw1): ...
def g2(a, b, kw1, c, d): ...
def g2(a, b, c, d, kw1): ...
def h(a, kw1, kw2): ...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
class ModuleDeprecationSpec(ast_edits.NoUpdateSpec):
"""A specification which deprecates 'a.b'."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.module_deprecations.update({"a.b": (ast_edits.ERROR, "a.b is evil.")})
class RenameKeywordSpec(ast_edits.NoUpdateSpec):
"""A specification where kw2 gets renamed to kw3.
The new API is
def f(a, b, kw1, kw3): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.update_renames()
def update_renames(self):
self.function_keyword_renames["f"] = {"kw2": "kw3"}
class ReorderKeywordSpec(ast_edits.NoUpdateSpec):
"""A specification where kw2 gets moved in front of kw1.
The new API is
def f(a, b, kw2, kw1): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.update_reorders()
def update_reorders(self):
# Note that these should be in the old order.
self.function_reorders["f"] = ["a", "b", "kw1", "kw2"]
class ReorderAndRenameKeywordSpec(ReorderKeywordSpec, RenameKeywordSpec):
"""A specification where kw2 gets moved in front of kw1 and is changed to kw3.
The new API is
def f(a, b, kw3, kw1): ...
"""
def __init__(self):
ReorderKeywordSpec.__init__(self)
RenameKeywordSpec.__init__(self)
self.update_renames()
self.update_reorders()
class RemoveDeprecatedAliasKeyword(ast_edits.NoUpdateSpec):
"""A specification where kw1_alias is removed in g.
The new API is
def g(a, b, kw1, c): ...
def g2(a, b, kw1, c, d): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_keyword_renames["g"] = {"kw1_alias": "kw1"}
self.function_keyword_renames["g2"] = {"kw1_alias": "kw1"}
class RemoveDeprecatedAliasAndReorderRest(RemoveDeprecatedAliasKeyword):
"""A specification where kw1_alias is removed in g.
The new API is
def g(a, b, c, kw1): ...
def g2(a, b, c, d, kw1): ...
"""
def __init__(self):
RemoveDeprecatedAliasKeyword.__init__(self)
# Note that these should be in the old order.
self.function_reorders["g"] = ["a", "b", "kw1", "c"]
self.function_reorders["g2"] = ["a", "b", "kw1", "c", "d"]
class RemoveMultipleKeywordArguments(ast_edits.NoUpdateSpec):
"""A specification where both keyword aliases are removed from h.
The new API is
def h(a, kw1, kw2): ...
"""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_keyword_renames["h"] = {
"kw1_alias": "kw1",
"kw2_alias": "kw2",
}
class RenameImports(ast_edits.NoUpdateSpec):
"""Specification for renaming imports."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.import_renames = {
"foo": ast_edits.ImportRename(
"bar",
excluded_prefixes=["foo.baz"])
}
class TestAstEdits(test_util.TensorFlowTestCase):
def _upgrade(self, spec, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(spec)
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return (count, report, errors), out_file.getvalue()
def testModuleDeprecation(self):
text = "a.b.c(a.b.x)"
(_, _, errors), new_text = self._upgrade(ModuleDeprecationSpec(), text)
self.assertEqual(text, new_text)
self.assertIn("Using member a.b.c", errors[0])
self.assertIn("1:0", errors[0])
self.assertIn("Using member a.b.c", errors[0])
self.assertIn("1:6", errors[1])
def testNoTransformIfNothingIsSupplied(self):
text = "f(a, b, kw1=c, kw2=d)\n"
_, new_text = self._upgrade(ast_edits.NoUpdateSpec(), text)
self.assertEqual(new_text, text)
text = "f(a, b, c, d)\n"
_, new_text = self._upgrade(ast_edits.NoUpdateSpec(), text)
self.assertEqual(new_text, text)
def testKeywordRename(self):
"""Test that we get the expected result if renaming kw2 to kw3."""
text = "f(a, b, kw1=c, kw2=d)\n"
expected = "f(a, b, kw1=c, kw3=d)\n"
(_, report, _), new_text = self._upgrade(RenameKeywordSpec(), text)
self.assertEqual(new_text, expected)
self.assertNotIn("Manual check required", report)
# No keywords specified, no reordering, so we should get input as output
text = "f(a, b, c, d)\n"
(_, report, _), new_text = self._upgrade(RenameKeywordSpec(), text)
self.assertEqual(new_text, text)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, *args)\n"
(_, report, _), _ = self._upgrade(RenameKeywordSpec(), text)
self.assertNotIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(RenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
def testKeywordReorderWithParens(self):
"""Test that we get the expected result if there are parens around args."""
text = "f((a), ( ( b ) ))\n"
acceptable_outputs = [
# No change is a valid output
text,
# Also cases where all arguments are fully specified are allowed
"f(a=(a), b=( ( b ) ))\n",
# Making the parens canonical is ok
"f(a=(a), b=((b)))\n",
]
_, new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
def testKeywordReorder(self):
"""Test that we get the expected result if kw2 is now before kw1."""
text = "f(a, b, kw1=c, kw2=d)\n"
acceptable_outputs = [
# No change is a valid output
text,
# Just reordering the kw.. args is also ok
"f(a, b, kw2=d, kw1=c)\n",
# Also cases where all arguments are fully specified are allowed
"f(a=a, b=b, kw1=c, kw2=d)\n",
"f(a=a, b=b, kw2=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Keywords are reordered, so we should reorder arguments too
text = "f(a, b, c, d)\n"
acceptable_outputs = [
"f(a, b, d, c)\n",
"f(a=a, b=b, kw1=c, kw2=d)\n",
"f(a=a, b=b, kw2=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, b, *args)\n"
(_, report, _), _ = self._upgrade(ReorderKeywordSpec(), text)
self.assertIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(ReorderKeywordSpec(), text)
self.assertNotIn("Manual check required", report)
def testKeywordReorderAndRename(self):
"""Test that we get the expected result if kw2 is renamed and moved."""
text = "f(a, b, kw1=c, kw2=d)\n"
acceptable_outputs = [
"f(a, b, kw3=d, kw1=c)\n",
"f(a=a, b=b, kw1=c, kw3=d)\n",
"f(a=a, b=b, kw3=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(
ReorderAndRenameKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Keywords are reordered, so we should reorder arguments too
text = "f(a, b, c, d)\n"
acceptable_outputs = [
"f(a, b, d, c)\n",
"f(a=a, b=b, kw1=c, kw3=d)\n",
"f(a=a, b=b, kw3=d, kw1=c)\n",
]
(_, report, _), new_text = self._upgrade(
ReorderAndRenameKeywordSpec(), text)
self.assertIn(new_text, acceptable_outputs)
self.assertNotIn("Manual check required", report)
# Positional *args passed in that we cannot inspect, should warn
text = "f(a, *args, kw1=c)\n"
(_, report, _), _ = self._upgrade(ReorderAndRenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
# **kwargs passed in that we cannot inspect, should warn
text = "f(a, b, kw1=c, **kwargs)\n"
(_, report, _), _ = self._upgrade(ReorderAndRenameKeywordSpec(), text)
self.assertIn("Manual check required", report)
def testRemoveDeprecatedKeywordAlias(self):
"""Test that we get the expected result if a keyword alias is removed."""
text = "g(a, b, kw1=x, c=c)\n"
acceptable_outputs = [
# Not using deprecated alias, so original is ok
text,
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# No keyword used, should be no change
text = "g(a, b, x, c)\n"
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertEqual(new_text, text)
# If we used the alias, it should get renamed
text = "g(a, b, kw1_alias=x, c=c)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed even if it's last
text = "g(a, b, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAndReorder(self):
"""Test for when a keyword alias is removed and args are reordered."""
text = "g(a, b, kw1=x, c=c)\n"
acceptable_outputs = [
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "g(a, b, x, c)\n"
# Don't accept an output which doesn't reorder c and d
acceptable_outputs = [
"g(a, b, c, x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# If we used the alias, it should get renamed
text = "g(a, b, kw1_alias=x, c=c)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed and reordered even if it's last
text = "g(a, b, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g(a, b, kw1=x, c=c)\n",
"g(a, b, c=c, kw1=x)\n",
"g(a=a, b=b, kw1=x, c=c)\n",
"g(a=a, b=b, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveDeprecatedKeywordAndReorder2(self):
"""Same as testRemoveDeprecatedKeywordAndReorder but on g2 (more args)."""
text = "g2(a, b, kw1=x, c=c, d=d)\n"
acceptable_outputs = [
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# Keywords are reordered, so we should reorder arguments too
text = "g2(a, b, x, c, d)\n"
# Don't accept an output which doesn't reorder c and d
acceptable_outputs = [
"g2(a, b, c, d, x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasAndReorderRest(), text)
self.assertIn(new_text, acceptable_outputs)
# If we used the alias, it should get renamed
text = "g2(a, b, kw1_alias=x, c=c, d=d)\n"
acceptable_outputs = [
"g2(a, b, kw1=x, c=c, d=d)\n",
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
"g2(a=a, b=b, c=c, d=d, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
# It should get renamed and reordered even if it's not in order
text = "g2(a, b, d=d, c=c, kw1_alias=x)\n"
acceptable_outputs = [
"g2(a, b, kw1=x, c=c, d=d)\n",
"g2(a, b, c=c, d=d, kw1=x)\n",
"g2(a, b, d=d, c=c, kw1=x)\n",
"g2(a=a, b=b, kw1=x, c=c, d=d)\n",
"g2(a=a, b=b, c=c, d=d, kw1=x)\n",
"g2(a=a, b=b, d=d, c=c, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveDeprecatedAliasKeyword(), text)
self.assertIn(new_text, acceptable_outputs)
def testRemoveMultipleKeywords(self):
"""Remove multiple keywords at once."""
# Not using deprecated keywords -> no rename
text = "h(a, kw1=x, kw2=y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertEqual(new_text, text)
# Using positional arguments (in proper order) -> no change
text = "h(a, x, y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertEqual(new_text, text)
# Use only the old names, in order
text = "h(a, kw1_alias=x, kw2_alias=y)\n"
acceptable_outputs = [
"h(a, x, y)\n",
"h(a, kw1=x, kw2=y)\n",
"h(a=a, kw1=x, kw2=y)\n",
"h(a, kw2=y, kw1=x)\n",
"h(a=a, kw2=y, kw1=x)\n",
]
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
# Use only the old names, in reverse order, should give one of same outputs
text = "h(a, kw2_alias=y, kw1_alias=x)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
# Mix old and new names
text = "h(a, kw1=x, kw2_alias=y)\n"
_, new_text = self._upgrade(RemoveMultipleKeywordArguments(), text)
self.assertIn(new_text, acceptable_outputs)
def testUnrestrictedFunctionWarnings(self):
class FooWarningSpec(ast_edits.NoUpdateSpec):
"""Usages of function attribute foo() prints out a warning."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.function_warnings = {"*.foo": (ast_edits.WARNING, "not good")}
texts = ["object.foo()", "get_object().foo()",
"get_object().foo()", "object.foo().bar()"]
for text in texts:
(_, report, _), _ = self._upgrade(FooWarningSpec(), text)
self.assertIn("not good", report)
# Note that foo() won't result in a warning, because in this case foo is
# not an attribute, but a name.
false_alarms = ["foo", "foo()", "foo.bar()", "obj.run_foo()", "obj.foo"]
for text in false_alarms:
(_, report, _), _ = self._upgrade(FooWarningSpec(), text)
self.assertNotIn("not good", report)
def testFullNameNode(self):
t = ast_edits.full_name_node("a.b.c")
self.assertEquals(
ast.dump(t),
"Attribute(value=Attribute(value=Name(id='a', ctx=Load()), attr='b', "
"ctx=Load()), attr='c', ctx=Load())"
)
def testImport(self):
# foo should be renamed to bar.
text = "import foo as f"
expected_text = "import bar as f"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo"
expected_text = "import bar as foo"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo.test"
expected_text = "import bar.test"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo.test as t"
expected_text = "import bar.test as t"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "import foo as f, a as b"
expected_text = "import bar as f, a as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testFromImport(self):
# foo should be renamed to bar.
text = "from foo import a"
expected_text = "from bar import a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo.a import b"
expected_text = "from bar.a import b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import *"
expected_text = "from bar import *"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import a, b"
expected_text = "from bar import a, b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testImport_NoChangeNeeded(self):
text = "import bar as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testFromImport_NoChangeNeeded(self):
text = "from bar import a as b"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testExcludedImport(self):
# foo.baz module is excluded from changes.
text = "import foo.baz"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "import foo.baz as a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "from foo import baz as a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
text = "from foo.baz import a"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(text, new_text)
def testMultipleImports(self):
text = "import foo.bar as a, foo.baz as b, foo.baz.c, foo.d"
expected_text = "import bar.bar as a, foo.baz as b, foo.baz.c, bar.d"
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
text = "from foo import baz, a, c"
expected_text = """from foo import baz
from bar import a, c"""
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testImportInsideFunction(self):
text = """
def t():
from c import d
from foo import baz, a
from e import y
"""
expected_text = """
def t():
from c import d
from foo import baz
from bar import a
from e import y
"""
_, new_text = self._upgrade(RenameImports(), text)
self.assertEqual(expected_text, new_text)
def testUpgradeInplaceWithSymlink(self):
upgrade_dir = os.path.join(self.get_temp_dir(), "foo")
os.mkdir(upgrade_dir)
file_a = os.path.join(upgrade_dir, "a.py")
file_b = os.path.join(upgrade_dir, "b.py")
with open(file_a, "a") as f:
f.write("import foo as f")
os.symlink(file_a, file_b)
upgrader = ast_edits.ASTCodeUpgrader(RenameImports())
upgrader.process_tree_inplace(upgrade_dir)
self.assertTrue(os.path.islink(file_b))
self.assertEqual(file_a, os.readlink(file_b))
with open(file_a, "r") as f:
self.assertEqual("import bar as f", f.read())
def testUpgradeInPlaceWithSymlinkInDifferentDir(self):
upgrade_dir = os.path.join(self.get_temp_dir(), "foo")
other_dir = os.path.join(self.get_temp_dir(), "bar")
os.mkdir(upgrade_dir)
os.mkdir(other_dir)
file_c = os.path.join(other_dir, "c.py")
file_d = os.path.join(upgrade_dir, "d.py")
with open(file_c, "a") as f:
f.write("import foo as f")
os.symlink(file_c, file_d)
upgrader = ast_edits.ASTCodeUpgrader(RenameImports())
upgrader.process_tree_inplace(upgrade_dir)
self.assertTrue(os.path.islink(file_d))
self.assertEqual(file_c, os.readlink(file_d))
# File pointed to by symlink is in a different directory.
# Therefore, it should not be upgraded.
with open(file_c, "r") as f:
self.assertEqual("import foo as f", f.read())
def testUpgradeCopyWithSymlink(self):
upgrade_dir = os.path.join(self.get_temp_dir(), "foo")
output_dir = os.path.join(self.get_temp_dir(), "bar")
os.mkdir(upgrade_dir)
file_a = os.path.join(upgrade_dir, "a.py")
file_b = os.path.join(upgrade_dir, "b.py")
with open(file_a, "a") as f:
f.write("import foo as f")
os.symlink(file_a, file_b)
upgrader = ast_edits.ASTCodeUpgrader(RenameImports())
upgrader.process_tree(upgrade_dir, output_dir, copy_other_files=True)
new_file_a = os.path.join(output_dir, "a.py")
new_file_b = os.path.join(output_dir, "b.py")
self.assertTrue(os.path.islink(new_file_b))
self.assertEqual(new_file_a, os.readlink(new_file_b))
with open(new_file_a, "r") as f:
self.assertEqual("import bar as f", f.read())
def testUpgradeCopyWithSymlinkInDifferentDir(self):
upgrade_dir = os.path.join(self.get_temp_dir(), "foo")
other_dir = os.path.join(self.get_temp_dir(), "bar")
output_dir = os.path.join(self.get_temp_dir(), "baz")
os.mkdir(upgrade_dir)
os.mkdir(other_dir)
file_a = os.path.join(other_dir, "a.py")
file_b = os.path.join(upgrade_dir, "b.py")
with open(file_a, "a") as f:
f.write("import foo as f")
os.symlink(file_a, file_b)
upgrader = ast_edits.ASTCodeUpgrader(RenameImports())
upgrader.process_tree(upgrade_dir, output_dir, copy_other_files=True)
new_file_b = os.path.join(output_dir, "b.py")
self.assertTrue(os.path.islink(new_file_b))
self.assertEqual(file_a, os.readlink(new_file_b))
with open(file_a, "r") as f:
self.assertEqual("import foo as f", f.read())
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/ast_edits_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* to 2.0 TensorFlow using SAFETY mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.compatibility import all_renames_v2
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
self.function_keyword_renames = {}
self.symbol_renames = {}
self.change_to_function = {}
self.function_reorders = {}
self.function_warnings = {}
self.function_transformers = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
## Inform about the addons mappings
for symbol, replacement in all_renames_v2.addons_symbol_mappings.items():
warning = (
ast_edits.WARNING, (
"(Manual edit required) `{}` has been migrated to `{}` in "
"TensorFlow Addons. The API spec may have changed during the "
"migration. Please see https://github.com/tensorflow/addons "
"for more info.").format(symbol, replacement))
self.function_warnings[symbol] = warning
# List module renames. Right now, we just support renames from a module
# names that don't contain '.'.
self.import_renames = {
"tensorflow": ast_edits.ImportRename(
"tensorflow.compat.v1",
excluded_prefixes=["tensorflow.contrib",
"tensorflow.flags",
"tensorflow.compat.v1",
"tensorflow.compat.v2"])
}
self.inserts_after_imports = {
("tensorflow", None): ["tensorflow.disable_v2_behavior()"],
("tensorflow", "tf"): ["tf.disable_v2_behavior()"],
}
# TODO(kaftan,annarev): specify replacement from TensorFlow import to
# compat.v1 import.
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/tf_upgrade_v2_safety.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for all_renames_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import all_renames_v2
class AllRenamesV2Test(test_util.TensorFlowTestCase):
def test_no_identity_renames(self):
identity_renames = [
old_name
for old_name, new_name in six.iteritems(all_renames_v2.symbol_renames)
if old_name == new_name
]
self.assertEmpty(identity_renames)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/all_renames_v2_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import tempfile
from absl.testing import parameterized
import six
import tensorflow as tf
# OSS TF V2 import placeholder.
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
def get_symbol_for_name(root, name):
name_parts = name.split(".")
symbol = root
# Iterate starting with second item since 1st item is "tf.".
for part in name_parts[1:]:
symbol = getattr(symbol, part)
return symbol
def get_args(symbol):
if hasattr(inspect, "signature"):
signature = inspect.signature(symbol)
# Ignore *args and **kwargs for now.
return [param.name for param in signature.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
return tf_inspect.getargspec(symbol)[0]
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = call_str.find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:call_str.find("(")]
args = call_str[open_paren_index+1:close_paren_index].split(",")
args = [arg.split("=")[0].strip() for arg in args]
args = [arg for arg in args if arg] # filter out empty strings
return function_name, args
class TestUpgrade(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
@classmethod
def setUpClass(cls):
super(TestUpgrade, cls).setUpClass()
cls.v2_symbols = {}
cls.v1_symbols = {}
if hasattr(tf.compat, "v2"):
def symbol_collector(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
cls.v2_symbols["tf." + name] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector)
visitor.private_map["tf.compat"] = ["v1"]
traverse.traverse(tf.compat.v2, visitor)
if hasattr(tf.compat, "v1"):
def symbol_collector_v1(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
for name in api_names_v1:
cls.v1_symbols["tf." + name] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector_v1)
traverse.traverse(tf.compat.v1, visitor)
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def _upgrade_multiple(self, old_file_texts):
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
results = []
for old_file_text in old_file_texts:
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
results.append([count, report, errors, out_file.getvalue()])
return results
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.angle(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.angle` to "
"`tf.math.angle`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testAllAPI(self):
if not hasattr(tf.compat, "v2"):
return
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v2 namespace.
# Please regenerate the renames file or edit any manual renames if this
# test fails.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
_, _, _, text = self._upgrade("tf." + name)
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
text not in self.v2_symbols and
# Builds currently install old version of estimator that doesn't
# have some 2.0 symbols.
not text.startswith("tf.estimator")):
self.assertFalse(
True, "Symbol %s generated from %s not in v2 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testAllAPIV1(self):
collect = True
v1_symbols = set([])
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v1 namespace.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
if collect:
v1_symbols.add("tf." + name)
else:
_, _, _, text = self._upgrade("tf." + name)
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
not text.startswith("tf.estimator") and
text not in v1_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v1 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
collect = False
traverse.traverse(tf.compat.v1, visitor)
def testV1KeywordArgNames(self):
all_keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that verifies V1 argument names.
def arg_test_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
names_v1 = tf_export.get_v1_names(attr)
for name in names_v1:
name = "tf.%s" % name
if name not in all_keyword_renames:
continue
arg_names_v1 = tf_inspect.getargspec(attr)[0]
keyword_renames = all_keyword_renames[name]
self.assertEqual(type(keyword_renames), dict)
# Assert that v1 function has valid v1 argument names.
for from_name, _ in keyword_renames.items():
self.assertIn(
from_name, arg_names_v1,
"%s not found in %s arguments: %s" %
(from_name, name, str(arg_names_v1)))
visitor = public_api.PublicAPIVisitor(arg_test_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testV2KeywordArgNames(self):
# This test converts a call of the form:
# tf.foo(arg1=0, arg2=1, ...)
# to 2.0. Then, checks that converted function has valid argument names.
if not hasattr(tf.compat, "v2"):
return
v2_arg_exceptions = {
"verify_shape_is_now_always_true",
# These arguments should not be used, they just specify
# that a function takes named arguments.
"keyword_required",
"_sentinel",
}
v1_name_exceptions = {
"tf.print", # requires print_function import
}
function_warnings = (
tf_upgrade_v2.TFAPIChangeSpec().function_warnings)
function_transformers = (
tf_upgrade_v2.TFAPIChangeSpec().function_transformers)
keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that converts to V2 and checks V2 argument names.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
if not tf_inspect.isfunction(attr):
continue
names_v1 = tf_export.get_v1_names(attr)
arg_names_v1 = get_args(attr)
for name in names_v1:
tf_name = "tf.%s" % name
if tf_name in function_warnings or tf_name in function_transformers:
continue # These require manual change
if tf_name in v1_name_exceptions:
continue
# Assert that arg names after converting to v2 are present in
# v2 function.
# 1. First, create an input of the form:
# tf.foo(arg1=val1, arg2=val2, ...)
args = ",".join(
["%s=%d" % (from_name, from_index)
for from_index, from_name in enumerate(arg_names_v1)])
text_input = "%s(%s)" % (tf_name, args)
# 2. Convert the input to V2.
_, _, _, text = self._upgrade(text_input)
new_function_name, new_args = get_func_and_args_from_str(text)
if new_function_name == "tf.compat.v1.%s" % name:
if tf_name in keyword_renames:
# If we rename arguments, new function must be available in 2.0.
# We should not be using compat.v1 in this case.
self.assertFalse(
"Function '%s' is not in 2.0 when converting\n%s\nto\n%s" %
(new_function_name, text_input, text))
continue
if new_function_name.startswith("tf.compat.v2"):
self.assertIn(new_function_name.replace("tf.compat.v2.", "tf."),
self.v2_symbols)
continue
# 3. Verify V2 function and arguments.
args_v2 = get_args(self.v2_symbols[new_function_name])
args_v2.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v2,
"Invalid argument '%s' in 2.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v2)))
# 4. Verify that the argument exists in v1 as well.
if new_function_name in set(["tf.nn.ctc_loss",
"tf.saved_model.save"]):
continue
args_v1 = get_args(self.v1_symbols[new_function_name])
args_v1.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v1,
"Invalid argument '%s' in 1.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v1)))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testPositionsMatchArgGiven(self):
full_dict = tf_upgrade_v2.TFAPIChangeSpec().function_arg_warnings
method_names = full_dict.keys()
for method_name in method_names:
args = full_dict[method_name].keys()
# special case for optimizer methods
if method_name.startswith("*."):
method = method_name.replace("*", "tf.train.Optimizer")
else:
method = method_name
method = get_symbol_for_name(tf, method)
arg_spec = tf_inspect.getfullargspec(method)
for (arg, pos) in args:
# to deal with the self argument on methods on objects
if method_name.startswith("*."):
pos += 1
self.assertEqual(arg_spec[0][pos], arg)
def testReorderFileNeedsUpdate(self):
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().function_reorders)
manual_function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().manual_function_reorders)
added_names_message = """Some function names in
self.reordered_function_names are not in reorders_v2.py.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
removed_names_message = """%s in self.reorders_v2 does not match
any name in self.reordered_function_names.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
self.assertTrue(
reordered_function_names.issubset(function_reorders),
added_names_message)
# function_reorders should contain reordered_function_names
# and their TensorFlow V1 aliases.
for name in function_reorders:
if name in manual_function_reorders:
continue
# get other names for this function
attr = get_symbol_for_name(tf.compat.v1, name)
_, attr = tf_decorator.unwrap(attr)
v1_names = tf_export.get_v1_names(attr)
self.assertTrue(v1_names)
v1_names = ["tf.%s" % n for n in v1_names]
# check if any other name is in
self.assertTrue(
any(n in reordered_function_names for n in v1_names),
removed_names_message % name)
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay",
"tf.train.piecewise_constant_decay",
]:
text = "%s(a, b)\n" % decay
_, report, unused_errors, _ = self._upgrade(text)
self.assertIn("switch to the schedules in "
"`tf.keras.optimizers.schedules`", report)
def verify_compat_v1_rename_correctness(self, values, ns_prefix=""):
if ns_prefix:
ns_prefix += "."
for v in values:
text = "tf." + ns_prefix + v + "(a, b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1." + ns_prefix + v + "(a, b)", new_text)
def testIntializers(self):
initializers = [
"zeros",
"ones",
"constant",
"random_uniform",
"random_normal",
"truncated_normal",
"variance_scaling",
"orthogonal",
"glorot_uniform",
"glorot_normal",
"identity",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="initializers")
initializers = [
"zeros_initializer",
"ones_initializer",
"constant_initializer",
"random_uniform_initializer",
"random_normal_initializer",
"truncated_normal_initializer",
"variance_scaling_initializer",
"orthogonal_initializer",
"glorot_uniform_initializer",
"glorot_normal_initializer",
]
self.verify_compat_v1_rename_correctness(initializers)
initializers = [
"zeros",
"ones",
"Ones",
"Zeros",
"constant",
"Constant",
"VarianceScaling",
"Orthogonal",
"orthogonal",
"Identity",
"identity",
"glorot_uniform",
"glorot_normal",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
"TruncatedNormal",
"truncated_normal",
"RandomUniform",
"uniform",
"random_uniform",
"RandomNormal",
"normal",
"random_normal",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="keras.initializers")
def testContribXavierInitializer(self):
text = "tf.contrib.layers.xavier_initializer()\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=\"uniform\")\n",
)
text = "slim.xavier_initializer(True or False)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "slim.xavier_initializer(uniform=(True or False))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "tf.contrib.layers.xavier_initializer_conv2d(False, 12)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12)\n",
)
text = ("tf.contrib.layers.xavier_initializer_conv2d("
"False, 12, tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtype=tf.float32)\n",
)
text = ("tf.contrib.layers.xavier_initializer("
"False, 12, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtypes=tf.float32)\n",
)
def testVarianceScalingInitializer(self):
text = ("tf.contrib.layers.variance_scaling_initializer("
"mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = ("slim.variance_scaling_initializer("
"uniform=(True or False), mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"distribution=(\"uniform\" if True or False else \"truncated_normal\"),"
" mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = "tf.contrib.layers.variance_scaling_initializer(factor=1.0)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0)\n",
)
text = ("tf.contrib.layers.variance_scaling_initializer("
"12.0, \"FAN_AVG\", True, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(12.0, "
"(\"FAN_AVG\").lower(), "
"(\"uniform\" if True else \"truncated_normal\"), "
"dtypes=tf.float32)\n",
)
def testMetrics(self):
metrics = [
"accuracy",
"auc",
"average_precision_at_k",
"false_negatives",
"false_negatives_at_thresholds",
"false_positives",
"false_positives_at_thresholds",
"mean",
"mean_absolute_error",
"mean_cosine_distance",
"mean_iou",
"mean_per_class_accuracy",
"mean_relative_error",
"mean_squared_error",
"mean_tensor",
"percentage_below",
"precision",
"precision_at_k",
"precision_at_thresholds",
"precision_at_top_k",
"recall",
"recall_at_k",
"recall_at_thresholds",
"recall_at_top_k",
"root_mean_squared_error",
"sensitivity_at_specificity",
"sparse_average_precision_at_k",
"sparse_precision_at_k",
"specificity_at_sensitivity",
"true_negatives",
"true_negatives_at_thresholds",
"true_positives",
"true_positives_at_thresholds",
]
for m in metrics:
text = "tf.metrics." + m + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.metrics." + m + "(a, b)", new_text)
self.assertIn(
"tf.metrics have been replaced with object oriented versions", report)
def testLosses(self):
losses = [
"absolute_difference",
"add_loss",
"compute_weighted_loss",
"cosine_distance",
"get_losses",
"get_regularization_loss",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"huber_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy",
]
for l in losses:
text = "tf.losses." + l + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.losses." + l + "(a, b)", new_text)
self.assertIn(
"tf.losses have been replaced with object oriented versions", report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "()"
expected_text = ns + "(loss_reduction=tf.keras.losses.Reduction.SUM)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ns + "(loss_reduction=TEST)"
expected_text = ns + "(loss_reduction=TEST)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "tf.estimator.BaselineClassifier(m, c, w, v, o, c, lr)"
expected_text = (
"tf.compat.v1.estimator.BaselineClassifier("
"model_dir=m, n_classes=c, weight_column=w, label_vocabulary=v, "
"optimizer=o, config=c, loss_reduction=lr)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.estimator.BaselineClassifier(model_dir=model_dir)"
expected_text = ("tf.estimator.BaselineClassifier(" +
"model_dir=model_dir, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testBaseEstimatorPartitioner(self):
classes = ["LinearEstimator", "DNNLinearCombinedEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitioner(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorOptimizer(self):
classes = ["BaselineEstimator", "LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorOptimizer(self):
classes = [
"BaselineClassifier", "BaselineRegressor", "LinearClassifier",
"LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
suffix = ("(optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
suffix = ("(dnn_optimizer=TEST, linear_optimizer=Test, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorPartitionerAndOptimizer(self):
classes = ["LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorPartitionerAndOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitionerAndOptimizer(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedPartitionerAndOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractGlimpse(self):
text = ("tf.image.extract_glimpse(x, size, off, False, "
"False, False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, False, "
"False, 'uniform' if (False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, uniform_noise=True if uniform_noise else "
"False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, noise='uniform' if (True if uniform_noise else "
"False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" uniform_noise=False,\n"
" name=\"foo\")# Stuff after\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" noise='uniform' if (False) else 'gaussian',\n"
" name=\"foo\")# Stuff after\n")
text = "tf.image.extract_glimpse(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertEqual(errors, [])
def testDropout(self):
text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (keep_prob), name=\"foo\")\n",
)
text = "tf.nn.dropout(x, keep_prob=.4, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (.4), name=\"foo\")\n",
)
text = (
"tf.nn.dropout(x, # Stuff before\n"
" keep_prob=.4, # Stuff after\n"
" name=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, # Stuff before\n"
" rate=1 - (.4), # Stuff after\n"
" name=\"foo\")\n",
)
text = "tf.nn.dropout(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertIn("tf.nn.dropout called without arguments", errors[0])
def testDropoutExpr(self):
text = "tf.nn.dropout(x, 1 - func(3 + 4.), name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - (1 - func(3 + 4.)), name=\"foo\")\n",
)
def testContribL1(self):
text = "tf.contrib.layers.l1_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l1_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l1_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1( # Stuff before\n"
" l=.4)\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2(self):
text = "tf.contrib.layers.l2_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l2_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l2_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2( # Stuff before\n"
" l=0.5 * (.4))\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2Expr(self):
text = "tf.contrib.layers.l2_regularizer(1 - func(3 + 4.), scope=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (1 - func(3 + 4.)))\n",
)
def testMathCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testCountNonZeroChanges(self):
text = (
"tf.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomMultinomialToRandomCategorical(self):
text = (
"tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
text = (
"tf.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomPoissonConversion(self):
text1 = "tf.random_poisson(lam, shape, dtype)"
text2 = "tf.random.poisson(lam, shape, dtype)"
expected_text = "tf.random.poisson(lam=lam, shape=shape, dtype=dtype)"
_, unused_report, unused_errors, new_text1 = self._upgrade(text1)
self.assertEqual(new_text1, expected_text)
_, unused_report, unused_errors, new_text2 = self._upgrade(text2)
self.assertEqual(new_text2, expected_text)
def testConvolutionOpUpdate(self):
text = (
"tf.nn.convolution(input, filter, padding, strides, dilation_rate, "
"name, data_format)"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.nn.convolution(input=input, filters=filter, padding=padding, "
"strides=strides, dilations=dilation_rate, name=name, "
"data_format=data_format)"
)
self.assertEqual(new_text, expected_text)
def test_substr(self):
text = "tf.substr(input, pos, len, name, unit)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.strings.substr(input=input, pos=pos, len=len, "
"name=name, unit=unit)\n", new_text)
self.assertEqual(errors, [])
def testColocateGradientsWithOps(self):
text = "tf.gradients(yx=a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "tf.gradients(yx=a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.gradients(yx=a)\n", new_text)
self.assertIn("tf.gradients no longer takes", report)
text = "tf.gradients(y, x, grad_ys, name, colocate, gate)\n"
expected = ("tf.gradients(ys=y, xs=x, grad_ys=grad_ys, name=name, "
"gate_gradients=gate)\n")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def testColocateGradientsWithOpsMinimize(self):
text = "optimizer.minimize(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.minimize(a)\n", new_text)
self.assertIn("Optimizer.minimize no longer takes", report)
def testColocateGradientsWithOpsComputeGradients(self):
text = "optimizer.compute_gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.compute_gradients(a)\n", new_text)
self.assertIn("Optimizer.compute_gradients no longer takes", report)
def testExportSavedModelRename(self):
text = "self.est.export_savedmodel(path)"
_, report, unused_errors, unused_new_text = self._upgrade(text)
self.assertIn(
"rename the method export_savedmodel() to export_saved_model()",
report)
def testArgmin(self):
text = "tf.argmin(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmin(input, 0)"
expected_text = "tf.argmin(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_min(input, 0)"
expected_text = "tf.argmin(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testArgmax(self):
text = "tf.argmax(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmax(input, 0)"
expected_text = "tf.argmax(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_max(input, 0)"
expected_text = "tf.argmax(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testAutograph(self):
text = "tf.autograph.to_graph(f, True, arg_values=None, arg_types=None)"
expected_text = "tf.autograph.to_graph(f, True)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.autograph.to_code"
"(f, False, arg_values=None, arg_types=None, indentation=' ')")
expected_text = "tf.autograph.to_code(f, False)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEstimatorInputs(self):
text = "tf.estimator.inputs.numpy_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.numpy_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.estimator.inputs.pandas_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.pandas_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchToSpace(self):
text = "tf.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.batch_to_space(input, crops, block_size, name)"
expected_text = (
"tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractImagePatches(self):
text = (
"tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
expected_text = (
"tf.image.extract_patches(images, sizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testKerasSavedModel(self):
text = (
"tf.contrib.saved_model.save_keras_model(model, './saved_models')\n"
"tf.contrib.saved_model.load_keras_model(saved_model_path)\n")
expected_text = (
"tf.keras.experimental.export_saved_model(model, './saved_models')\n"
"tf.keras.experimental.load_from_saved_model(saved_model_path)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testStatelessMultinomial(self):
text = (
"tf.random.stateless_multinomial(logits, num_samples, seed, "
"output_dtype=dtype, name=name)")
expected_text = (
"tf.random.stateless_categorical(logits, num_samples, seed, "
"dtype=dtype, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSoftMaxCrossEntropyWithLogitsV2(self):
text = (
"tf.nn.softmax_cross_entropy_with_logits_v2("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, axis=2)")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertFalse(errors)
def testSoftMaxCrossEntropyWithLogits(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo(bar))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSoftMaxCrossEntropyWithLogitsDoesntNest(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo().zz())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo().zz()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSparseMatmul(self):
text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")
expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "
"a_is_sparse=e, b_is_sparse=f, name=g)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testWeightedMoments(self):
text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"
expected_text = (
"tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "
"name=name, keepdims=kd)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseAdd(self):
text = "tf.sparse.add(a, b, t)"
expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseConcat(self):
text = "tf.sparse.concat(ax, inp, name, exp, concat)"
expected_text = (
"tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "
"expand_nonconcat_dims=exp, axis=concat)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSeparableConv2D(self):
text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"
expected_text = (
"tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "
"pointwise_filter=pt, strides=strides, padding=pad, "
"dilations=rate, name=name, data_format=fmt)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2D(self):
text = (
"tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, "
"data_format)")
expected_text = (
"tf.nn.conv2d(input=input, filters=filter, strides=strides, "
"padding=padding, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.nn.conv2d(input, filter=filter, strides=strides, padding=padding, "
"use_cudnn_on_gpu=use_cudnn_on_gpu)")
expected_text = ("tf.nn.conv2d(input=input, filters=filter, "
"strides=strides, padding=padding)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropFilter(self):
text = (
"tf.nn.conv2d_backprop_filter(input, filter_sizes, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.compat.v1.nn.conv2d_backprop_filter(input, filter_sizes, "
"out_backprop, strides, padding, use_cudnn_on_gpu, data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropInput(self):
text = (
"tf.nn.conv2d_backprop_input(input_sizes, filter, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.nn.conv2d_transpose(output_shape=input_sizes, filters=filter, "
"input=out_backprop, strides=strides, padding=padding, "
"data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpacetoBatch(self):
text = "tf.space_to_batch_nd(input, shape, paddings, name)"
expected_text = "tf.space_to_batch(input, shape, paddings, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.nn.space_to_batch(input, paddings, block_size, name)"
expected_text = (
"tf.space_to_batch(input=input, paddings=paddings, "
"block_shape=block_size, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testInTopK(self):
text = "tf.math.in_top_k(a, b, c, n)"
expected_text = (
"tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDepthToSpace(self):
text = "tf.nn.depth_to_space(input, block_size, name, data_format)"
expected_text = (
"tf.nn.depth_to_space(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookup(self):
text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "
"validate_indices, max_norm)")
expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "
"partition_strategy=partition_strategy, name=name, "
"max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookupSparse(self):
text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "
"partition_strategy, name, combiner, max_norm)")
expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "
"sp_ids=sp_ids, sp_weights=sp_weights, "
"partition_strategy=partition_strategy, name=name, "
"combiner=combiner, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnInTopK(self):
text = "tf.nn.in_top_k(predictions, targets, k, name)"
expected_text = ("tf.nn.in_top_k(predictions=predictions, "
"targets=targets, k=k, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpaceToDepth(self):
text = "tf.nn.space_to_depth(input, block_size, name, data_format)"
expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPrint(self):
# tf.print() cannot be parsed unless we import print_function
text = """from __future__ import print_function
tf.print()
tf.print('abc')
"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text) # Text should stay the same
def testSparseSplit(self):
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testIterators(self):
for (text, expected) in [
("(expr + yielding(data)).make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator((expr + yielding(data)))"),
("dataset.make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("dataset.make_one_shot_iterator(shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("dataset.make_one_shot_iterator(x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("dataset.make_initializable_iterator()",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("ds.make_initializable_iterator(shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("dataset.make_initializable_iterator(x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)")]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testStructure(self):
for (text, expected) in [
("tf.data.experimental.DatasetStructure", "tf.data.DatasetSpec"),
("tf.data.experimental.OptionalStructure", "tf.OptionalSpec"),
("tf.data.experimental.RaggedTensorStructure", "tf.RaggedTensorSpec"),
("tf.data.experimental.SparseTensorStructure", "tf.SparseTensorSpec"),
("tf.data.experimental.Structure", "tf.TypeSpec"),
("tf.data.experimental.TensorArrayStructure", "tf.TensorArraySpec"),
("tf.data.experimental.TensorStructure", "tf.TensorSpec"),
]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testMapAndBatch(self):
suffix = ".data.experimental.map_and_batch_with_legacy_function(args)"
text = "tf" + suffix
expected = "tf.compat.v1" + suffix
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testCast(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, name='test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testCastPositionalSecondArgument(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, 'test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResize(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s)" % method
expected_text = ("tf.image.resize(i, s, "
"method=tf.image.ResizeMethod.%s)" % method.upper())
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResizeExtraPositionalArgs(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s, a, p)" % method
expected_text = [
"tf.image.resize(i, s, ", "preserve_aspect_ratio=p, ",
"method=tf.image.ResizeMethod.%s)" % method.upper()
]
_, unused_report, unused_errors, new_text = self._upgrade(text)
for s in expected_text:
self.assertIn(s, new_text)
def testCond(self):
text = "tf.cond(a, b, c, True)"
expected_text = "tf.cond(pred=a, true_fn=b, false_fn=c)"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("tf.cond", errors[0])
self.assertIn("requires manual check", errors[0])
def testParens(self):
text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
expected_text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testAssertStatements(self):
for name in ["assert_greater", "assert_equal", "assert_none_equal",
"assert_less", "assert_negative", "assert_positive",
"assert_non_negative", "assert_non_positive", "assert_near",
"assert_less", "assert_less_equal", "assert_greater",
"assert_greater_equal", "assert_integer", "assert_type",
"assert_scalar"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def testAssertRankStatements(self):
for name in ["assert_rank", "assert_rank_at_least", "assert_rank_in"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def test_assert_equal_graph_def(self):
text = ("tf.test.assert_equal_graph_def(a, b, checkpoint_v2=x, "
"hash_table_shared_name=y)")
expected = "tf.test.assert_equal_graph_def(actual=a, expected=b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_upgrade(self):
text = "tf.contrib.framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_CriticalSection_upgrade(self):
text = "tf.contrib.framework.CriticalSection(shared_name='blah')"
expected = "tf.CriticalSection(shared_name='blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_sample_distorted_bounding_box(self):
# pylint: disable=line-too-long
text = "tf.image.sample_distorted_bounding_box(a, b, c, d, e, f, g, h, i, j)"
expected = "tf.image.sample_distorted_bounding_box(image_size=a, bounding_boxes=b, seed=c, min_object_covered=e, aspect_ratio_range=f, area_range=g, max_attempts=h, use_image_if_no_bounding_boxes=i, name=j)"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_initialize(self):
text = "tf.contrib.summary.initialize"
expected = "tf.compat.v1.summary.initialize"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_framework_argsort(self):
text = "tf.contrib.framework.argsort"
expected = "tf.argsort"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_flags_bare(self):
_, _, errors, _ = self._upgrade("tf.flags")
self.assertIn("tf.flags has been removed", errors[0])
def test_flags_flags(self):
_, _, errors, _ = self._upgrade("tf.flags.FLAGS")
self.assertIn("tf.flags has been removed", errors[0])
def test_contrib_estimator_head_deprecation(self):
api_symbols = ["binary_classification_head", "logistic_regression_head",
"multi_class_head", "multi_head", "multi_label_head",
"poisson_regression_head", "regression_head"]
for symbol in api_symbols:
text = "tf.contrib.estimator." + symbol
_, report, _, _ = self._upgrade(text)
self.assertIn("`tf.contrib.estimator.*_head` has been deprecated", report)
def test_contrib_layers_layer_norm_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.layers.layer_norm")
self.assertIn("`tf.contrib.layers.layer_norm` has been deprecated", report)
def test_contrib_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.rnn")
self.assertIn("tf.contrib.rnn.* has been deprecated", report)
def test_contrib_cudnn_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.cudnn_rnn")
self.assertIn("tf.contrib.cudnn_rnn.* has been deprecated", report)
def test_max_pool_2d(self):
text = "tf.nn.max_pool(value=4)"
expected_text = "tf.nn.max_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_estimator_early_stopping(self):
api_symbols = [
"make_early_stopping_hook", "stop_if_higher_hook", "stop_if_lower_hook",
"stop_if_no_decrease_hook", "stop_if_no_increase_hook"
]
for symbol in api_symbols:
text = "tf.contrib.estimator." + symbol
expected_text = "tf.estimator.experimental." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_cell(self):
api_symbols = ["RNNCell", "BasicLSTMCell", "BasicRNNCell", "GRUCell",
"LSTMCell", "MultiRNNCell"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn.rnn_cell." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_function(self):
api_symbols = ["static_rnn", "static_state_saving_rnn",
"static_bidirectional_rnn"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_summary_generic(self):
text = "tf.contrib.summary.generic('foo', myval, meta, 'fam', 42)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"metadata=meta, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
# Arg errors come in alphabetical order of arguments, not appearance order.
self.assertIn("'family' argument", errors[0])
self.assertIn("'name' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio(self):
text = "tf.contrib.summary.audio('foo', myval, 44100, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram(self):
text = "tf.contrib.summary.histogram('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image(self):
text = "tf.contrib.summary.image('foo', myval, red, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'bad_color' argument", errors[0])
self.assertIn("'family' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_scalar(self):
text = "tf.contrib.summary.scalar('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_generic_nostep(self):
text = "tf.contrib.summary.generic('foo', myval)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("'step' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio_nostep(self):
text = "tf.contrib.summary.audio('foo', myval, 44100)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram_nostep(self):
text = "tf.contrib.summary.histogram('foo', myval)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image_nostep(self):
text = "tf.contrib.summary.image('foo', myval)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_scalar_nostep(self):
text = "tf.contrib.summary.scalar('foo', myval)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_graph(self):
text = "tf.contrib.summary.graph(my_graph)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.trace"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_import_event(self):
text = "tf.contrib.summary.import_event(my_event)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.experimental.write_raw_pb"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_flush(self):
text = "tf.contrib.summary.flush(writer=foo)"
expected = "tf.compat.v2.summary.flush(writer=foo)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_create_file_writer(self):
text = ("tf.contrib.summary.create_file_writer('my_logdir', 0, 1000, "
"'.foo', 'shared-name')")
expected = ("tf.compat.v2.summary.create_file_writer(logdir='my_logdir', "
"max_queue=0, flush_millis=1000, filename_suffix='.foo')")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("no longer re-uses existing event files", errors[1])
def test_contrib_summary_always_record_summaries(self):
text = "tf.contrib.summary.always_record_summaries()"
expected = "tf.compat.v2.summary.record_if(True)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_never_record_summaries(self):
text = "tf.contrib.summary.never_record_summaries()"
expected = "tf.compat.v2.summary.record_if(False)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_record_summaries_every_n_global_steps(self):
text = "tf.contrib.summary.record_summaries_every_n_global_steps(10)"
_, _, errors, _ = self._upgrade(text)
expected_error = "replaced by a call to tf.compat.v2.summary.record_if()"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_all_summary_ops(self):
text = "tf.contrib.summary.all_summary_ops()"
expected = "tf.compat.v1.summary.all_v2_summary_ops()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_full_example(self):
deindent = lambda n, s: "\n".join(line[n:] for line in s.split("\n"))
text = deindent(4, """
import tensorflow as tf
tf.enable_eager_execution()
writer = tf.contrib.summary.create_file_writer(
"/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", 0.42)
tf.contrib.summary.histogram("weights", [1.0, 2.0], step=7)
tf.contrib.summary.flush()
""")
expected = deindent(4, """
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
writer = tf.compat.v2.summary.create_file_writer(
logdir="/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.compat.v2.summary.record_if(True):
tf.compat.v2.summary.scalar(name="loss", data=0.42, step=tf.compat.v1.train.get_or_create_global_step())
tf.compat.v2.summary.histogram(name="weights", data=[1.0, 2.0], step=7)
tf.compat.v2.summary.flush()
""")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_summary_api_warning(self):
text = "tf.summary.scalar('foo', 42)"
_, report, _, _ = self._upgrade(text)
expected_info = "TF 1.x summary API cannot be automatically migrated"
self.assertIn(expected_info, report)
def test_avg_pool_2d(self):
text = "tf.nn.avg_pool(value=4)"
expected_text = "tf.nn.avg_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_saved_model_load(self):
text = "tf.saved_model.load(sess, ['foo_graph'])"
expected = "tf.compat.v1.saved_model.load(sess, ['foo_graph'])"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_saved_model_load_v2(self):
text = "tf.saved_model.load_v2('/tmp/blah')"
expected = "tf.compat.v2.saved_model.load('/tmp/blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_uniform_unit_scaling_initializer(self):
text = "tf.uniform_unit_scaling_initializer(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.initializers.uniform_unit_scaling(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_name_scope(self):
text = "tf.name_scope(None, default_name, [some, values])"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(default_name=default_name, values=stuff)"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(name=n, default_name=d, values=s)"
expected_text = "tf.compat.v1.name_scope(name=n, default_name=d, values=s)"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("`name` passed to `name_scope`", report)
text = "tf.name_scope(name=None, values=stuff)"
_, _, errors, _ = self._upgrade(text)
self.assertIn("name_scope call with neither name nor default_name",
errors[0])
@parameterized.parameters(
# Rename parameter: delimiter -> sep and add .to_sparse()
["tf.string_split('test', delimiter=' ')",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Rename parameter: source -> input
["tf.strings.split(source='test1')",
"tf.strings.split(input='test1').to_sparse()"],
# Use compat.v1 for skip_empty parameter.
["tf.string_split('test', ' ', True)",
"tf.compat.v1.string_split(source='test', sep=' ', skip_empty=True)"],
["tf.string_split('test', ' ', skip_empty=False)",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Split behavior for sep=None changed. (In particular, it now splits on
# all whitespace, not just the space character)
["tf.string_split(x)",
"tf.compat.v1.string_split(source=x)"],
# Split behavior for sep='' changed:
["tf.string_split(x, '')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, sep='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, delimiter='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, '', result_type='RaggedTensor')",
"tf.strings.bytes_split(input=x)"],
# If sep is a variable, we can't tell if it's empty:
["tf.string_split(x, sep)",
"tf.compat.v1.string_split(source=x, sep=sep)"],
# If sep is a non-empty string literal, then we don't need compat.v1.
["tf.string_split(x, 'non-empty-sep')",
"tf.strings.split(input=x, sep='non-empty-sep').to_sparse()"],
# Add to_sparse unless result_type is RaggedTensor:
["tf.string_split(x, ' ')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='SparseTensor')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='RaggedTensor')",
"tf.strings.split(input=x, sep=' ')"],
["tf.string_split(x, ' ', result_type=x)",
"tf.compat.v1.string_split(source=x, sep=' ', result_type=x)"],
) # pyformat: disable
# TODO(b/129398290)
def DISABLED_test_string_split(self, text, expected_text):
"""Tests for transforming from tf.string_split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
@parameterized.parameters(
# Add to_sparse unless result_type is RaggedTensor:
["tf.strings.split(x, sep)",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='SparseTensor')",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='RaggedTensor')",
"tf.strings.split(x, sep)"],
["tf.strings.split(x, sep, result_type=x)",
"tf.compat.v1.strings.split(x, sep, result_type=x)"],
) # pyformat: disable
def test_strings_split(self, text, expected_text):
"""Tests for transforming from tf.strings.split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_sdca_to_raw_ops(self):
text = "tf.train.sdca_fprint(input_tensor)"
expected_text = "tf.raw_ops.SdcaFprint(input=input_tensor)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_fprint(input, name=n)"
expected_text = "tf.raw_ops.SdcaFprint(input=input, name=n)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_shrink_l1(w, l, ll)"
expected_text = "tf.raw_ops.SdcaShrinkL1(weights=w, l1=l, l2=ll)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = (
"tf.train.sdca_optimizer(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o)")
expected_text = (
"tf.raw_ops.SdcaOptimizer(sparse_example_indices=a, "
"sparse_feature_indices=b, sparse_feature_values=c, dense_features=d, "
"example_weights=e, example_labels=f, sparse_indices=g, "
"sparse_weights=h, dense_weights=i, example_state_data=j, loss_type=k, "
"l1=l, l2=m, num_loss_partitions=n, num_inner_iterations=o)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_to_addons_move(self):
small_mapping = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
}
for symbol, replacement in small_mapping.items():
text = "{}('stuff', *args, **kwargs)".format(symbol)
_, report, _, _ = self._upgrade(text)
self.assertIn(replacement, report)
def testXlaExperimental(self):
text = "tf.xla.experimental.jit_scope(0)"
expected_text = "tf.xla.experimental.jit_scope(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.xla.experimental.compile(0)"
expected_text = "tf.xla.experimental.compile(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnErosion2d(self):
text = "tf.nn.erosion2d(v, k, s, r, p)"
expected_text = "tf.nn.erosion2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnDilation2d(self):
text = "tf.nn.dilation2d(v, k, s, r, p)"
expected_text = "tf.nn.dilation2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPywrapTensorflowWarning(self):
text = "tf.pywrap_tensorflow.foo()"
expected = "tf.pywrap_tensorflow.foo()"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("`tf.pywrap_tensorflow` will not be distributed", errors[0])
def testKerasSaveModelFormat(self):
text = "tf.keras.models.save_model(model, path)"
expected_text = "tf.keras.models.save_model(model, path, save_format='h5')"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertNotIn(
"saves to the Tensorflow SavedModel format by default", report)
_, report, _, _ = self._upgrade("model.save(path)")
self.assertIn(
"saves to the Tensorflow SavedModel format by default", report)
def test_distribute_strategy(self):
text = "tf.contrib.distribute.CrossDeviceOps()"
expected = "tf.distribute.CrossDeviceOps()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
text = "tf.contrib.distribute.MirroredStrategy"
expected = "tf.contrib.distribute.MirroredStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.MirroredStrategy", errors[0])
text = "tf.distribute.MirroredStrategy"
expected = "tf.distribute.MirroredStrategy"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.distribute.MirroredStrategy API has changed", report)
self.assertIn("make_dataset_iterator->experimental_distribute_dataset",
report)
text = "tf.contrib.distribute.TPUStrategy"
expected = "tf.contrib.distribute.TPUStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.experimental.TPUStrategy",
errors[0])
text = "tf.contrib.distribute.foo"
expected = "tf.contrib.distribute.foo"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.contrib.distribute.* have been migrated", report)
def test_decode_raw(self):
text = "tf.io.decode_raw(bytes=[1,2,3], output_dtype=tf.int32)"
expected_text = (
"tf.io.decode_raw(input_bytes=[1,2,3], output_dtype=tf.int32)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testRecomputeGrad(self):
text = "tf.contrib.layers.recompute_grad()"
expected = "tf.recompute_grad()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_load_variable(self):
text = "tf.contrib.framework.load_variable('a')"
expected_text = (
"tf.train.load_variable('a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.contrib.framework.load_variable(checkpoint_dir='a')"
expected_text = (
"tf.train.load_variable(ckpt_dir_or_file='a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_import_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
# We upgrade the base un-versioned tensorflow aliased as tf
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
# We don't handle unaliased tensorflow imports currently,
# So the upgrade script show log errors
import_header = "import tensorflow\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("unaliased `import tensorflow`", "\n".join(errors))
# Upgrading explicitly-versioned tf code is unsafe, but we don't
# need to throw errors when we detect explicitly-versioned tf.
import_header = "import tensorflow.compat.v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf, v2 as tf2\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "import tensorflow.compat.v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf1, v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
def test_api_spec_reset_between_files(self):
for old_symbol, new_symbol in [
("tf.conj(a)", "tf.math.conj(a)"),
("tf.to_int32(x)", "tf.cast(x, dtype=tf.int32)")]:
## Test that the api spec is reset in between files:
import_header = "import tensorflow.compat.v2 as tf\n"
text_a = import_header + old_symbol
expected_text_a = import_header + old_symbol
text_b = old_symbol
expected_text_b = new_symbol
results = self._upgrade_multiple([text_a, text_b])
result_a, result_b = results[0], results[1]
self.assertEqual(result_a[3], expected_text_a)
self.assertEqual(result_b[3], expected_text_b)
def test_model_to_estimator_checkpoint_warning(self):
text = "tf.keras.estimator.model_to_estimator(model)"
_, report, _, _ = self._upgrade(text)
expected_info = "will save object-based checkpoints"
self.assertIn(expected_info, report)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/tf_upgrade_v2_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import os
import re
import shutil
import sys
import tempfile
import traceback
import pasta
import six
# Some regular expressions we will need for parsing
FIND_OPEN = re.compile(r"^\s*(\[).*$")
FIND_STRING_CHARS = re.compile(r"['\"]")
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
ImportRename = collections.namedtuple(
"ImportRename", ["new_name", "excluded_prefixes"])
def full_name_node(name, ctx=ast.Load()):
"""Make an Attribute or Name node for name.
Translate a qualified name into nested Attribute nodes (and a Name node).
Args:
name: The name to translate to a node.
ctx: What context this name is used in. Defaults to Load()
Returns:
A Name or Attribute node.
"""
names = name.split(".")
names.reverse()
node = ast.Name(id=names.pop(), ctx=ast.Load())
while names:
node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())
# Change outermost ctx to the one given to us (inner ones should be Load).
node.ctx = ctx
return node
def get_arg_value(node, arg_name, arg_pos=None):
"""Get the value of an argument from a ast.Call node.
This function goes through the positional and keyword arguments to check
whether a given argument was used, and if so, returns its value (the node
representing its value).
This cannot introspect *args or **args, but it safely handles *args in
Python3.5+.
Args:
node: The ast.Call node to extract arg values from.
arg_name: The name of the argument to extract.
arg_pos: The position of the argument (in case it's passed as a positional
argument).
Returns:
A tuple (arg_present, arg_value) containing a boolean indicating whether
the argument is present, and its value in case it is.
"""
# Check keyword args
if arg_name is not None:
for kw in node.keywords:
if kw.arg == arg_name:
return (True, kw.value)
# Check positional args
if arg_pos is not None:
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't parse Starred
if idx == arg_pos:
return (True, arg)
idx += 1
return (False, None)
def uses_star_args_in_call(node):
"""Check if an ast.Call node uses arbitrary-length positional *args.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for an *args usage in python 3.5+
for arg in node.args:
if isinstance(arg, ast.Starred):
return True
else:
if node.starargs:
return True
return False
def uses_star_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for a **kwarg usage in python 3.5+
for keyword in node.keywords:
if keyword.arg is None:
return True
else:
if node.kwargs:
return True
return False
def uses_star_args_or_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length *args or **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)
def excluded_from_module_rename(module, import_rename_spec):
"""Check if this module import should not be renamed.
Args:
module: (string) module name.
import_rename_spec: ImportRename instance.
Returns:
True if this import should not be renamed according to the
import_rename_spec.
"""
for excluded_prefix in import_rename_spec.excluded_prefixes:
if module.startswith(excluded_prefix):
return True
return False
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `symbol_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_warnings`: maps full names of functions to warnings that will be
printed out if the function is used. (e.g. tf.nn.convolution())
* `function_transformers`: maps function names to custom handlers
* `module_deprecations`: maps module names to warnings that will be printed
if the module is still used after all other transformations have run
* `import_renames`: maps import name (must be a short name without '.')
to ImportRename instance.
For an example, see `TFAPIChangeSpec`.
"""
def preprocess(self, root_node): # pylint: disable=unused-argument
"""Preprocess a parse tree. Return any produced logs and errors."""
return [], []
def clear_preprocessing(self):
"""Restore this APIChangeSpec to before it preprocessed a file.
This is needed if preprocessing a file changed any rewriting rules.
"""
pass
class NoUpdateSpec(APIChangeSpec):
"""A specification of an API change which doesn't change anything."""
def __init__(self):
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = {}
self.function_transformers = {}
self.import_renames = {}
class _PastaEditVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, api_change_spec):
self._api_change_spec = api_change_spec
self._log = [] # Holds 4-tuples: severity, line, col, msg.
self._stack = [] # Allow easy access to parents.
# Overridden to maintain a stack of nodes to allow for parent access
def visit(self, node):
self._stack.append(node)
super(_PastaEditVisitor, self).visit(node)
self._stack.pop()
@property
def errors(self):
return [log for log in self._log if log[0] == ERROR]
@property
def warnings(self):
return [log for log in self._log if log[0] == WARNING]
@property
def warnings_and_errors(self):
return [log for log in self._log if log[0] in (WARNING, ERROR)]
@property
def info(self):
return [log for log in self._log if log[0] == INFO]
@property
def log(self):
return self._log
def add_log(self, severity, lineno, col, msg):
self._log.append((severity, lineno, col, msg))
print("%s line %d:%d: %s" % (severity, lineno, col, msg))
def add_logs(self, logs):
"""Record a log and print it.
The log should be a tuple `(severity, lineno, col_offset, msg)`, which will
be printed and recorded. It is part of the log available in the `self.log`
property.
Args:
logs: The logs to add. Must be a list of tuples
`(severity, lineno, col_offset, msg)`.
"""
self._log.extend(logs)
for log in logs:
print("%s line %d:%d: %s" % log)
def _get_applicable_entries(self, transformer_field, full_name, name):
"""Get all list entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = []
if full_name in function_transformers:
transformers.append(function_transformers[full_name])
if glob_name in function_transformers:
transformers.append(function_transformers[glob_name])
if "*" in function_transformers:
transformers.append(function_transformers["*"])
return transformers
def _get_applicable_dict(self, transformer_field, full_name, name):
"""Get all dict entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = function_transformers.get("*", {}).copy()
transformers.update(function_transformers.get(glob_name, {}))
transformers.update(function_transformers.get(full_name, {}))
return transformers
def _get_full_name(self, node):
"""Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar".
This is the inverse of `full_name_node`.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if node was not Attribute or Name.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _maybe_add_warning(self, node, full_name):
"""Adds an error to be printed about full_name at node."""
function_warnings = self._api_change_spec.function_warnings
if full_name in function_warnings:
level, message = function_warnings[full_name]
message = message.replace("<function name>", full_name)
self.add_log(level, node.lineno, node.col_offset,
"%s requires manual check. %s" % (full_name, message))
return True
else:
return False
def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name):
"""Adds a warning if full_name is a deprecated module."""
warnings = self._api_change_spec.module_deprecations
if full_name in warnings:
level, message = warnings[full_name]
message = message.replace("<function name>", whole_name)
self.add_log(level, node.lineno, node.col_offset,
"Using member %s in deprecated module %s. %s" % (whole_name,
full_name,
message))
return True
else:
return False
def _maybe_add_call_warning(self, node, full_name, name):
"""Print a warning when specific functions are called with selected args.
The function _print_warning_for_function matches the full name of the called
function, e.g., tf.foo.bar(). This function matches the function name that
is called, as long as the function is an attribute. For example,
`tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.
Args:
node: ast.Call object
full_name: The precomputed full name of the callable, if one exists, None
otherwise.
name: The precomputed name of the callable, if one exists, None otherwise.
Returns:
Whether an error was recorded.
"""
# Only look for *.-warnings here, the other will be handled by the Attribute
# visitor. Also, do not warn for bare functions, only if the call func is
# an attribute.
warned = False
if isinstance(node.func, ast.Attribute):
warned = self._maybe_add_warning(node, "*." + name)
# All arg warnings are handled here, since only we have the args
arg_warnings = self._get_applicable_dict("function_arg_warnings",
full_name, name)
variadic_args = uses_star_args_or_kwargs_in_call(node)
for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):
present, _ = get_arg_value(node, kwarg, arg) or variadic_args
if present:
warned = True
warning_message = warning.replace("<function name>", full_name or name)
template = "%s called with %s argument, requires manual check: %s"
if variadic_args:
template = ("%s called with *args or **kwargs that may include %s, "
"requires manual check: %s")
self.add_log(level, node.lineno, node.col_offset,
template % (full_name or name, kwarg, warning_message))
return warned
def _maybe_rename(self, parent, node, full_name):
"""Replace node (Attribute or Name) with a node representing full_name."""
new_name = self._api_change_spec.symbol_renames.get(full_name, None)
if new_name:
self.add_log(INFO, node.lineno, node.col_offset,
"Renamed %r to %r" % (full_name, new_name))
new_node = full_name_node(new_name, node.ctx)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
return True
else:
return False
def _maybe_change_to_function_call(self, parent, node, full_name):
"""Wraps node (typically, an Attribute or Expr) in a Call."""
if full_name in self._api_change_spec.change_to_function:
if not isinstance(parent, ast.Call):
# ast.Call's constructor is really picky about how many arguments it
# wants, and also, it changed between Py2 and Py3.
if six.PY2:
new_node = ast.Call(node, [], [], None, None)
else:
new_node = ast.Call(node, [], [])
pasta.ast_utils.replace_child(parent, node, new_node)
ast.copy_location(new_node, node)
self.add_log(INFO, node.lineno, node.col_offset,
"Changed %r to a function call" % full_name)
return True
return False
def _maybe_add_arg_names(self, node, full_name):
"""Make args into keyword args if function called full_name requires it."""
function_reorders = self._api_change_spec.function_reorders
if full_name in function_reorders:
if uses_star_args_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"re-ordering the call arguments, but it was passed "
"variable-length positional *args. The upgrade "
"script cannot handle these automatically." % full_name)
reordered = function_reorders[full_name]
new_keywords = []
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't move Starred to keywords
keyword_arg = reordered[idx]
keyword = ast.keyword(arg=keyword_arg, value=arg)
new_keywords.append(keyword)
idx += 1
if new_keywords:
self.add_log(INFO, node.lineno, node.col_offset,
"Added keywords to args of function %r" % full_name)
node.args = []
node.keywords = new_keywords + (node.keywords or [])
return True
return False
def _maybe_modify_args(self, node, full_name, name):
"""Rename keyword args if the function called full_name requires it."""
renamed_keywords = self._get_applicable_dict("function_keyword_renames",
full_name, name)
if not renamed_keywords:
return False
if uses_star_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"renaming or removing call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
modified = False
new_keywords = []
for keyword in node.keywords:
argkey = keyword.arg
if argkey in renamed_keywords:
modified = True
if renamed_keywords[argkey] is None:
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
argkey, full_name or name))
else:
keyword.arg = renamed_keywords[argkey]
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Renamed keyword argument for %s from %s to %s" % (
full_name, argkey, renamed_keywords[argkey]))
new_keywords.append(keyword)
else:
new_keywords.append(keyword)
if modified:
node.keywords = new_keywords
return modified
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
assert self._stack[-1] is node
# Get the name for this call, so we can index stuff with it.
full_name = self._get_full_name(node.func)
if full_name:
name = full_name.split(".")[-1]
elif isinstance(node.func, ast.Name):
name = node.func.id
elif isinstance(node.func, ast.Attribute):
name = node.func.attr
else:
name = None
# Call standard transformers for this node.
# Make sure warnings come first, since args or names triggering warnings
# may be removed by the other transformations.
self._maybe_add_call_warning(node, full_name, name)
# Make all args into kwargs
self._maybe_add_arg_names(node, full_name)
# Argument name changes or deletions
self._maybe_modify_args(node, full_name, name)
# Call transformers. These have the ability to modify the node, and if they
# do, will return the new node they created (or the same node if they just
# changed it). The are given the parent, but we will take care of
# integrating their changes into the parent if they return a new node.
#
# These are matched on the old name, since renaming is performed by the
# Attribute visitor, which happens later.
transformers = self._get_applicable_entries("function_transformers",
full_name, name)
parent = self._stack[-2]
if transformers:
if uses_star_args_or_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"modifying call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
for transformer in transformers:
logs = []
new_node = transformer(parent, node, full_name, name, logs)
self.add_logs(logs)
if new_node and new_node is not node:
pasta.ast_utils.replace_child(parent, node, new_node)
node = new_node
self._stack[-1] = node
self.generic_visit(node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
assert self._stack[-1] is node
full_name = self._get_full_name(node)
if full_name:
parent = self._stack[-2]
# Make sure the warning comes first, otherwise the name may have changed
self._maybe_add_warning(node, full_name)
# Once we did a modification, node is invalid and not worth inspecting
# further. Also, we only perform modifications for simple nodes, so
# There'd be no point in descending further.
if self._maybe_rename(parent, node, full_name):
return
if self._maybe_change_to_function_call(parent, node, full_name):
return
# The isinstance check is enough -- a bare Attribute is never root.
i = 2
while isinstance(self._stack[-i], ast.Attribute):
i += 1
whole_name = pasta.dump(self._stack[-(i-1)])
self._maybe_add_module_deprecation_warning(node, full_name, whole_name)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
new_aliases = []
import_updated = False
import_renames = getattr(self._api_change_spec, "import_renames", {})
inserts_after_imports = getattr(self._api_change_spec,
"inserts_after_imports", {})
# This loop processes imports in the format
# import foo as f, bar as b
for import_alias in node.names:
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
import_first_component = import_alias.name.split(".")[0]
import_rename_spec = import_renames.get(import_first_component, None)
if not import_rename_spec or excluded_from_module_rename(
import_alias.name, import_rename_spec):
new_aliases.append(import_alias) # no change needed
continue
new_name = (
import_rename_spec.new_name +
import_alias.name[len(import_first_component):])
# If current import is
# import foo
# then new import should preserve imported name:
# import new_foo as foo
# This happens when module has just one component.
new_asname = import_alias.asname
if not new_asname and "." not in import_alias.name:
new_asname = import_alias.name
new_alias = ast.alias(name=new_name, asname=new_asname)
new_aliases.append(new_alias)
import_updated = True
# Insert any followup lines that should happen after this import.
full_import = (import_alias.name, import_alias.asname)
insert_offset = 1
for line_to_insert in inserts_after_imports.get(full_import, []):
assert self._stack[-1] is node
parent = self._stack[-2]
new_line_node = pasta.parse(line_to_insert)
ast.copy_location(new_line_node, node)
parent.body.insert(
parent.body.index(node) + insert_offset, new_line_node)
insert_offset += 1
# Insert a newline after the import if necessary
old_suffix = pasta.base.formatting.get(node, "suffix")
if old_suffix is None:
old_suffix = os.linesep
if os.linesep not in old_suffix:
pasta.base.formatting.set(node, "suffix", old_suffix + os.linesep)
# Apply indentation to new node.
pasta.base.formatting.set(new_line_node, "prefix",
pasta.base.formatting.get(node, "prefix"))
pasta.base.formatting.set(new_line_node, "suffix", os.linesep)
self.add_log(
INFO, node.lineno, node.col_offset,
"Adding `%s` after import of %s" %
(new_line_node, import_alias.name))
# Replace the node if at least one import needs to be updated.
if import_updated:
assert self._stack[-1] is node
parent = self._stack[-2]
new_node = ast.Import(new_aliases)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r." %
(pasta.dump(node), pasta.dump(new_node)))
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
from_import_first_component = from_import.split(".")[0]
import_renames = getattr(self._api_change_spec, "import_renames", {})
import_rename_spec = import_renames.get(from_import_first_component, None)
if not import_rename_spec:
self.generic_visit(node)
return
# Split module aliases into the ones that require import update
# and those that don't. For e.g. if we want to rename "a" to "b"
# unless we import "a.c" in the following:
# from a import c, d
# we want to update import for "d" but not for "c".
updated_aliases = []
same_aliases = []
for import_alias in node.names:
full_module_name = "%s.%s" % (from_import, import_alias.name)
if excluded_from_module_rename(full_module_name, import_rename_spec):
same_aliases.append(import_alias)
else:
updated_aliases.append(import_alias)
if not updated_aliases:
self.generic_visit(node)
return
assert self._stack[-1] is node
parent = self._stack[-2]
# Replace first component of from-import with new name.
new_from_import = (
import_rename_spec.new_name +
from_import[len(from_import_first_component):])
updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)
ast.copy_location(updated_node, node)
pasta.ast_utils.replace_child(parent, node, updated_node)
# If some imports had to stay the same, add another import for them.
additional_import_log = ""
if same_aliases:
same_node = ast.ImportFrom(from_import, same_aliases, node.level,
col_offset=node.col_offset, lineno=node.lineno)
ast.copy_location(same_node, node)
parent.body.insert(parent.body.index(updated_node), same_node)
# Apply indentation to new node.
pasta.base.formatting.set(
same_node, "prefix",
pasta.base.formatting.get(updated_node, "prefix"))
additional_import_log = " and %r" % pasta.dump(same_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r%s." %
(pasta.dump(node),
pasta.dump(updated_node),
additional_import_log))
self.generic_visit(node)
class AnalysisResult(object):
"""This class represents an analysis result and how it should be logged.
This class must provide the following fields:
* `log_level`: The log level to which this detection should be logged
* `log_message`: The message that should be logged for this detection
For an example, see `VersionedTFImport`.
"""
class APIAnalysisSpec(object):
"""This class defines how `AnalysisResult`s should be generated.
It specifies how to map imports and symbols to `AnalysisResult`s.
This class must provide the following fields:
* `symbols_to_detect`: maps function names to `AnalysisResult`s
* `imports_to_detect`: maps imports represented as (full module name, alias)
tuples to `AnalysisResult`s
notifications)
For an example, see `TFAPIImportAnalysisSpec`.
"""
class PastaAnalyzeVisitor(_PastaEditVisitor):
"""AST Visitor that looks for specific API usage without editing anything.
This is used before any rewriting is done to detect if any symbols are used
that require changing imports or disabling rewriting altogether.
"""
def __init__(self, api_analysis_spec):
super(PastaAnalyzeVisitor, self).__init__(NoUpdateSpec())
self._api_analysis_spec = api_analysis_spec
self._results = [] # Holds AnalysisResult objects
@property
def results(self):
return self._results
def add_result(self, analysis_result):
self._results.append(analysis_result)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
full_name = self._get_full_name(node)
if full_name:
detection = self._api_analysis_spec.symbols_to_detect.get(full_name, None)
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
for import_alias in node.names:
# Detect based on full import name and alias)
full_import = (import_alias.name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
for import_alias in node.names:
# Detect based on full import name(to & as)
full_module_name = "%s.%s" % (from_import, import_alias.name)
full_import = (full_module_name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
# pylint: disable=g-backslash-continuation
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(in_filename, in_file, out_filename,
temp_file)
# pylint: enable=g-backslash-continuation
shutil.move(temp_file.name, out_filename)
return ret
def format_log(self, log, in_filename):
log_string = "%d:%d: %s: %s" % (log[1], log[2], log[0], log[3])
if in_filename:
return in_filename + ":" + log_string
else:
return log_string
def update_string_pasta(self, text, in_filename):
"""Updates a file using pasta."""
try:
t = pasta.parse(text)
except (SyntaxError, ValueError, TypeError):
log = ["ERROR: Failed to parse.\n" + traceback.format_exc()]
return 0, "", log, []
preprocess_logs, preprocess_errors = self._api_change_spec.preprocess(t)
visitor = _PastaEditVisitor(self._api_change_spec)
visitor.visit(t)
self._api_change_spec.clear_preprocessing()
logs = [self.format_log(log, None) for log in (preprocess_logs +
visitor.log)]
errors = [self.format_log(error, in_filename)
for error in (preprocess_errors +
visitor.warnings_and_errors)]
return 1, pasta.dump(t), logs, errors
def _format_log(self, log, in_filename, out_filename):
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
text += "\n".join(log) + "\n"
text += "-" * 80 + "\n\n"
return text
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
lines = in_file.readlines()
processed_file, new_file_content, log, process_errors = (
self.update_string_pasta("".join(lines), in_filename))
if out_file and processed_file:
out_file.write(new_file_content)
return (processed_file,
self._format_log(log, in_filename, out_filename),
process_errors)
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string for all files, and a dict
mapping filenames to errors encountered in that file.
"""
if output_root_directory == root_directory:
return self.process_tree_inplace(root_directory)
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." %
(output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" %
(root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(fullpath,
root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(
fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
if os.path.islink(input_path):
link_target = os.readlink(input_path)
link_target_output = os.path.join(
output_root_directory, os.path.relpath(link_target, root_directory))
if (link_target, link_target_output) in files_to_process:
# Create a link to the new location of the target file
os.symlink(link_target_output, output_path)
else:
report += "Copying symlink %s without modifying its target %s" % (
input_path, link_target)
os.symlink(link_target, output_path)
continue
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors[input_path] = l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
def process_tree_inplace(self, root_directory):
"""Process a directory of python files in place."""
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [os.path.join(dir_name,
f) for f in file_list if f.endswith(".py")]
files_to_process += py_files
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for path in files_to_process:
if os.path.islink(path):
report += "Skipping symlink %s.\n" % path
continue
file_count += 1
_, l_report, l_errors = self.process_file(path, path)
tree_errors[path] = l_errors
report += l_report
return file_count, report, tree_errors
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/ast_edits.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import copy
import functools
import sys
import pasta
from tensorflow.tools.compatibility import all_renames_v2
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
from tensorflow.tools.compatibility import reorders_v2
# These pylint warnings are a mistake.
# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison
class UnaliasedTFImport(ast_edits.AnalysisResult):
def __init__(self):
self.log_level = ast_edits.ERROR
self.log_message = ("The tf_upgrade_v2 script detected an unaliased "
"`import tensorflow`. The script can only run when "
"importing with `import tensorflow as tf`.")
class VersionedTFImport(ast_edits.AnalysisResult):
def __init__(self, version):
self.log_level = ast_edits.INFO
self.log_message = ("Not upgrading symbols because `tensorflow." + version
+ "` was directly imported as `tf`.")
class TFAPIImportAnalysisSpec(ast_edits.APIAnalysisSpec):
def __init__(self):
self.symbols_to_detect = {}
self.imports_to_detect = {
("tensorflow", None): UnaliasedTFImport(),
("tensorflow.compat.v1", "tf"): VersionedTFImport("compat.v1"),
("tensorflow.compat.v2", "tf"): VersionedTFImport("compat.v2"),
}
class TFAPIChangeSpec(ast_edits.NoUpdateSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
# TODO(b/129398290)
# "tf.string_split": {
# "delimiter": "sep",
# },
"tf.test.assert_equal_graph_def": {
"checkpoint_v2": None,
"hash_table_shared_name": None,
},
"tf.autograph.to_code": {
"arg_types": None,
"arg_values": None,
"indentation": None,
},
"tf.autograph.to_graph": {
"arg_types": None,
"arg_values": None,
},
"tf.nn.embedding_lookup": {
"validate_indices": None,
},
"tf.image.sample_distorted_bounding_box": {
"seed2": None,
},
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.resize": {
"align_corners": None,
},
"tf.image.resize_images": {
"align_corners": None,
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.nn.max_pool": {
"value": "input"
},
"tf.nn.avg_pool": {
"value": "input"
},
"tf.nn.avg_pool2d": {
"value": "input"
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
"tf.nn.conv1d": {
"value": "input",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_input": {
"use_cudnn_on_gpu": None,
"input_sizes": "output_shape",
"out_backprop": "input",
"filter": "filters",
},
"tf.contrib.summary.audio": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.create_file_writer": {
"name": None,
},
"tf.contrib.summary.generic": {
"name": "tag",
"tensor": "data",
"family": None,
},
"tf.contrib.summary.histogram": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.image": {
"tensor": "data",
"bad_color": None,
"max_images": "max_outputs",
"family": None,
},
"tf.contrib.summary.scalar": {
"tensor": "data",
"family": None,
},
"tf.nn.weighted_cross_entropy_with_logits": {
"targets": "labels",
},
"tf.decode_raw": {
"bytes": "input_bytes",
},
"tf.io.decode_raw": {
"bytes": "input_bytes",
},
"tf.contrib.framework.load_variable": {
"checkpoint_dir": "ckpt_dir_or_file",
}
}
# Mapping from function to the new name of the function
# Add additional renames not in renames_v2.py to all_renames_v2.py.
self.symbol_renames = all_renames_v2.symbol_renames
self.import_renames = {}
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.conv1d",
"tf.nn.conv2d",
"tf.nn.conv2d_backprop_input",
"tf.nn.ctc_beam_search_decoder",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
# TODO(b/129398290)
# "tf.string_split",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.test.assert_equal_graph_def",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
"tf.nn.fractional_avg_pool",
"tf.nn.fractional_max_pool",
"tf.image.sample_distorted_bounding_box",
"tf.gradients",
"tf.hessians",
"tf.nn.max_pool",
"tf.nn.avg_pool",
"tf.estimator.LinearClassifier",
"tf.estimator.LinearRegressor",
"tf.estimator.DNNLinearCombinedClassifier",
"tf.estimator.DNNLinearCombinedRegressor",
"tf.estimator.DNNRegressor",
"tf.estimator.DNNClassifier",
"tf.estimator.BaselineClassifier",
"tf.estimator.BaselineRegressor",
"tf.initializers.uniform_unit_scaling",
"tf.uniform_unit_scaling_initializer",
"tf.train.sdca_fprint",
"tf.train.sdca_optimizer",
"tf.train.sdca_shrink_l1",
"tf.data.experimental.TensorStructure",
"tf.data.experimental.SparseTensorStructure",
"tf.data.experimental.RaggedTensorStructure",
"tf.data.experimental.TensorArrayStructure",
}
# Manual mapping of function names to be reordered to their list of argument
# names, in order. Only use this if argument names cannot be autodetected,
# e.g. if the functions are in contrib.
self.manual_function_reorders = {
"tf.contrib.summary.audio": [
"name", "tensor", "sample_rate", "max_outputs", "family", "step"],
"tf.contrib.summary.create_file_writer": [
"logdir", "max_queue", "flush_millis", "filename_suffix", "name"],
"tf.contrib.summary.generic": [
"name", "tensor", "metadata", "family", "step"],
"tf.contrib.summary.histogram": [
"name", "tensor", "family", "step"],
"tf.contrib.summary.image": [
"name", "tensor", "bad_color", "max_images", "family", "step"],
"tf.contrib.summary.scalar": [
"name", "tensor", "family", "step"],
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = dict(reorders_v2.reorders)
self.function_reorders.update(self.manual_function_reorders)
decay_function_comment = (
ast_edits.INFO,
"To use learning rate decay schedules with TensorFlow 2.0, switch to "
"the schedules in `tf.keras.optimizers.schedules`.\n"
)
assert_return_type_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
contrib_layers_layer_norm_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.layers.layer_norm` has been "
"deprecated, and its implementation has been integrated with "
"`tf.keras.layers.LayerNormalization` in TensorFlow 2.0. "
"Note that, the default value of `epsilon` is changed to `1e-3` in the "
"new API from `1e-12`, and this may introduce numerical differences. "
"Please check the new API and use that instead."
)
contrib_estimator_head_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.estimator.*_head` has been "
"deprecated, and its implementation has been integrated with "
"`tf.estimator.*Head` in TensorFlow 2.0. "
"Please check the new API and use that instead."
)
initializers_no_dtype_comment = (
ast_edits.INFO, "Initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"__call__ method.\nThe calls have been converted to compat.v1 for "
"safety (even though they may already have been correct).")
metrics_comment = (
ast_edits.INFO,
"tf.metrics have been replaced with object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
ast_edits.INFO,
"tf.losses have been replaced with object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# This could be done with a _rename_if_arg_not_found_transformer
deprecate_partition_strategy_comment = (
ast_edits.WARNING,
"`partition_strategy` has been removed from <function name>. "
" The 'div' strategy will be used by default.")
# make change instead
uniform_unit_scaling_initializer_comment = (
ast_edits.ERROR,
"uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
# Make change instead (issue warning about strip_...)
export_saved_model_renamed = (
ast_edits.ERROR,
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
summary_api_comment = (
ast_edits.INFO,
"The TF 1.x summary API cannot be automatically migrated to TF 2.0, so "
"symbols have been converted to tf.compat.v1.summary.* and must be "
"migrated manually. Typical usage will only require changes to the "
"summary writing logic, not to individual calls like scalar(). "
"For examples of the new summary API, see the Effective TF 2.0 "
"migration document or check the TF 2.0 TensorBoard tutorials.")
contrib_summary_comment = (
ast_edits.WARNING,
"tf.contrib.summary.* functions have been migrated best-effort to "
"tf.compat.v2.summary.* equivalents where possible, but the resulting "
"code is not guaranteed to work, so please check carefully. For more "
"information about the new summary API, see the Effective TF 2.0 "
"migration document or check the updated TensorBoard tutorials.")
contrib_summary_family_arg_comment = (
ast_edits.WARNING,
"<function name> replacement does not accept a 'family' argument; "
"instead regular name scoping should be used. This call site specifies "
"a family argument that has been removed on conversion, so the emitted "
"tag names may be incorrect without manual editing.")
contrib_create_file_writer_comment = (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() has been ported to the new "
"tf.compat.v2.summary.create_file_writer(), which no longer re-uses "
"existing event files for the same logdir; instead it always opens a "
"new writer/file. The python writer objects must be re-used explicitly "
"if the reusing behavior is desired.")
contrib_summary_record_every_n_comment = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.summary.record_summaries_every_n_global_steps(n, step) "
"should be replaced by a call to tf.compat.v2.summary.record_if() with "
"the argument `lambda: tf.math.equal(0, global_step % n)` (or in graph "
"mode, the lambda body can be used directly). If no global step was "
"passed, instead use tf.compat.v1.train.get_or_create_global_step().")
contrib_summary_graph_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.graph() has no direct "
"equivalent in TF 2.0 because manual graph construction has been "
"superseded by use of tf.function. To log tf.function execution graphs "
"to the summary writer, use the new tf.compat.v2.summary.trace_* "
"functions instead.")
contrib_summary_import_event_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.import_event() has no "
"direct equivalent in TF 2.0. For a similar experimental feature, try "
"tf.compat.v2.summary.experimental.write_raw_pb() which also accepts "
"serialized summary protocol buffer input, but for tf.Summary "
"protobufs rather than tf.Events.")
keras_default_save_format_comment = (
ast_edits.WARNING,
"(This warning is only applicable if the code saves a tf.Keras model) "
"Keras model.save now saves to the Tensorflow SavedModel format by "
"default, instead of HDF5. To continue saving to HDF5, add the "
"argument save_format='h5' to the save() function.")
distribute_strategy_api_changes = (
"If you're using the strategy with a "
"custom training loop, note the following changes in methods: "
"make_dataset_iterator->experimental_distribute_dataset, "
"experimental_make_numpy_iterator->experimental_make_numpy_dataset, "
"extended.call_for_each_replica->experimental_run_v2, "
"reduce requires an axis argument, "
"unwrap->experimental_local_results "
"experimental_initialize and experimental_finalize no longer needed ")
contrib_mirrored_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.MirroredStrategy has "
"been migrated to tf.distribute.MirroredStrategy. Things to note: "
"Constructor arguments have changed. If you are using "
"MirroredStrategy with Keras training framework, the input provided to "
"`model.fit` will be assumed to have global batch size and split "
"across the replicas. " + distribute_strategy_api_changes)
core_mirrored_strategy_warning = (
ast_edits.WARNING,
"(Manual edit may be required) tf.distribute.MirroredStrategy API has "
"changed. " + distribute_strategy_api_changes)
contrib_one_device_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.OneDeviceStrategy has "
"been migrated to tf.distribute.OneDeviceStrategy. " +
distribute_strategy_api_changes)
contrib_tpu_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.TPUStrategy has "
"been migrated to tf.distribute.experimental.TPUStrategy. Note the "
"slight changes in constructor. " + distribute_strategy_api_changes)
contrib_collective_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.CollectiveAllReduceStrategy has "
"been migrated to "
"tf.distribute.experimental.MultiWorkerMirroredStrategy. Note the "
"changes in constructor. " + distribute_strategy_api_changes)
contrib_ps_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.ParameterServerStrategy has "
"been migrated to "
"tf.distribute.experimental.ParameterServerStrategy (multi machine) "
" and tf.distribute.experimental.CentralStorageStrategy (one machine). "
"Note the changes in constructors. " + distribute_strategy_api_changes)
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"*.save":
keras_default_save_format_comment,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.contrib.layers.layer_norm":
contrib_layers_layer_norm_comment,
"tf.contrib.estimator.binary_classification_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.logistic_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_class_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_label_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.poisson_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.regression_head":
contrib_estimator_head_comment,
"tf.contrib.summary.all_summary_ops":
contrib_summary_comment,
"tf.contrib.summary.audio":
contrib_summary_comment,
"tf.contrib.summary.create_file_writer":
contrib_create_file_writer_comment,
"tf.contrib.summary.generic":
contrib_summary_comment,
"tf.contrib.summary.graph":
contrib_summary_graph_comment,
"tf.contrib.summary.histogram":
contrib_summary_comment,
"tf.contrib.summary.import_event":
contrib_summary_import_event_comment,
"tf.contrib.summary.image":
contrib_summary_comment,
"tf.contrib.summary.record_summaries_every_n_global_steps":
contrib_summary_record_every_n_comment,
"tf.contrib.summary.scalar":
contrib_summary_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.nn.embedding_lookup":
deprecate_partition_strategy_comment,
"tf.nn.embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.nce_loss":
deprecate_partition_strategy_comment,
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment,
"tf.keras.estimator.model_to_estimator":
(ast_edits.WARNING,
"Estimators from <function name> will save object-based "
"checkpoints (format used by `keras_model.save_weights` and "
"`keras_model.load_weights`) by default in 2.0. To continue "
"saving name-based checkpoints, set `checkpoint_format='saver'`."),
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
"tf.get_variable":
(ast_edits.WARNING,
"<function name> returns ResourceVariables by default in 2.0, "
"which have well-defined semantics and are stricter about shapes. "
"You can disable this behavior by passing use_resource=False, or "
"by calling tf.compat.v1.disable_resource_variables()."),
"tf.pywrap_tensorflow":
(ast_edits.ERROR,
"<function name> cannot be converted automatically. "
"`tf.pywrap_tensorflow` will not be distributed with "
"TensorFlow 2.0, please consider an alternative in public "
"TensorFlow APIs."),
"tf.contrib.distribute.MirroredStrategy":
contrib_mirrored_strategy_warning,
"tf.distribute.MirroredStrategy":
core_mirrored_strategy_warning,
"tf.contrib.distribute.OneDeviceStrategy":
contrib_one_device_strategy_warning,
"tf.contrib.distribute.TPUStrategy":
contrib_tpu_strategy_warning,
"tf.contrib.distribute.CollectiveAllReduceStrategy":
contrib_collective_strategy_warning,
"tf.contrib.distribute.ParameterServerStrategy":
contrib_ps_strategy_warning,
"tf.summary.FileWriter": summary_api_comment,
"tf.summary.FileWriterCache": summary_api_comment,
"tf.summary.Summary": summary_api_comment,
"tf.summary.audio": summary_api_comment,
"tf.summary.histogram": summary_api_comment,
"tf.summary.image": summary_api_comment,
"tf.summary.merge": summary_api_comment,
"tf.summary.merge_all": summary_api_comment,
"tf.summary.scalar": summary_api_comment,
"tf.summary.tensor_summary": summary_api_comment,
"tf.summary.text": summary_api_comment,
}
for symbol, replacement in all_renames_v2.addons_symbol_mappings.items():
warning = (
ast_edits.WARNING, (
"(Manual edit required) `{}` has been migrated to `{}` in "
"TensorFlow Addons. The API spec may have changed during the "
"migration. Please see https://github.com/tensorflow/addons "
"for more info.").format(symbol, replacement))
self.function_warnings[symbol] = warning
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.nn.conv1d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_filter": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_input": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.minimize": {
("colocate_gradients_with_ops", 5): (
ast_edits.INFO,
"Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True."),
},
"tf.cond": {
("strict", 3): (
ast_edits.WARNING,
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True.")
},
"tf.contrib.summary.audio": {
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.create_file_writer": {
("name", 4): (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() no longer supports "
"implicit writer re-use based on shared logdirs or resource "
"names; this call site passed a 'name' argument that has been "
"removed. The new tf.compat.v2.summary.create_file_writer() "
"replacement has a 'name' parameter but the semantics are "
"the usual ones to name the op itself and do not control "
"writer re-use; writers must be manually re-used if desired.")
},
"tf.contrib.summary.generic": {
("name", 0): (
ast_edits.WARNING,
"tf.contrib.summary.generic() takes a 'name' argument for the "
"op name that also determines the emitted tag (prefixed by any "
"active name scopes), but tf.compat.v2.summary.write(), which "
"replaces it, separates these into 'tag' and 'name' arguments. "
"The 'name' argument here has been converted to 'tag' to "
"preserve a meaningful tag, but any name scopes will not be "
"reflected in the tag without manual editing."),
("family", 3): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.histogram": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.image": {
("bad_color", 2): (
ast_edits.WARNING,
"tf.contrib.summary.image no longer takes the 'bad_color' "
"argument; caller must now preprocess if needed. This call "
"site specifies a bad_color argument so it cannot be converted "
"safely."),
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.scalar": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.image.resize": {
("align_corners",
3): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize."),
},
"tf.image.resize_bilinear": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bilinear."),
},
"tf.image.resize_area": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_area."),
},
"tf.image.resize_bicubic": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bicubic."),
},
"tf.image.resize_nearest_neighbor": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_nearest_neighbor."),
},
}
# Specially handled functions
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs)
# Where logs is a list to which (level, line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
canned_estimator_msg_optimizer = (
"tf.keras.optimizers.* only, so the call was converted to compat.v1. "
"Please note that tf.train.Optimizers have one-to-one correspondents "
"in tf.keras.optimizers, so you may be able to convert to the new "
"optimizers directly (See https://www.tensorflow.org/api_docs/python"
"/tf/keras/optimizers). Checkpoint compatibility is not guaranteed, "
"but there is a checkpoint converter tool that you can use.")
canned_estimator_msg = (
"no longer takes `input_layer_partitioner` arg, and it supports "
+ canned_estimator_msg_optimizer)
self.function_transformers = {
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
"tf.to_double": _cast_transformer,
"tf.to_float": _cast_transformer,
"tf.to_int32": _cast_transformer,
"tf.to_int64": _cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
_softmax_cross_entropy_with_logits_transformer,
"tf.image.extract_glimpse": _extract_glimpse_transformer,
"tf.image.resize_area": _image_resize_transformer,
"tf.image.resize_bicubic": _image_resize_transformer,
"tf.image.resize_bilinear": _image_resize_transformer,
"tf.image.resize_nearest_neighbor": _image_resize_transformer,
"tf.nn.fractional_avg_pool": _pool_seed_transformer,
"tf.nn.fractional_max_pool": _pool_seed_transformer,
"tf.name_scope": _name_scope_transformer,
# TODO(b/129398290)
# "tf.string_split": _string_split_transformer,
"tf.strings.split": _string_split_rtype_transformer,
"tf.estimator.BaselineEstimator":
functools.partial(
_rename_if_arg_found_transformer,
arg_name="optimizer",
message=("tf.estimator.BaselineEstimator supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["optimizer"],
message=("tf.estimator.BaselineClassifier supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message=("tf.estimator.BaselineRegressor supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.DNNEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNEstimator no longer takes "
"input_layer_partitioner, so the call was converted to "
"compat.v1."
),
"tf.estimator.DNNClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNClassifier " + canned_estimator_msg,
),
"tf.estimator.DNNRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNRegressor " + canned_estimator_msg,
),
"tf.estimator.LinearEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearEstimator " + canned_estimator_msg,
),
"tf.estimator.LinearClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearClassifier " + canned_estimator_msg,
),
"tf.estimator.LinearRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearRegressor " + canned_estimator_msg,
),
"tf.estimator.DNNLinearCombinedEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedEstimator "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedClassifier "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedRegressor "
+ canned_estimator_msg),
),
"tf.device": functools.partial(
_rename_if_arg_found_transformer, arg_name="device_name",
arg_ok_predicate=_is_ast_str, remove_if_ok=False,
message="tf.device no longer takes functions as an argument. "
"We could not determine that the argument value is a string, so "
"the call was converted to compat.v1."),
"tf.zeros_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.zeros_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.ones_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.ones_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.while_loop": functools.partial(
_rename_if_arg_found_transformer,
arg_name="return_same_structure",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.while_loop no longer takes 'return_same_structure' "
"argument and behaves as if return_same_structure=True. This call "
"site specifies something other than return_same_structure=True, "
"so it was converted to compat.v1."),
"tf.nn.ctc_beam_search_decoder": functools.partial(
_rename_if_arg_found_transformer,
arg_name="merge_repeated",
arg_ok_predicate=_is_ast_false, remove_if_ok=True,
message="tf.nn.ctc_beam_search_decoder no longer takes the "
"'merge_repeated' argument and behaves as if merge_repeated=False. "
"This call site specifies something other than "
"merge_repeated=False, so it was converted to compat.v1."),
"tf.nn.dilation2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.nn.erosion2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.contrib.summary.always_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="True"),
"tf.contrib.summary.audio": _add_summary_step_transformer,
"tf.contrib.summary.generic": _add_summary_step_transformer,
"tf.contrib.summary.histogram": _add_summary_step_transformer,
"tf.contrib.summary.image": _add_summary_step_transformer,
"tf.contrib.summary.never_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="False"),
"tf.contrib.summary.scalar": _add_summary_step_transformer,
"tf.contrib.layers.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"tf.contrib.layers.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"tf.contrib.layers.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.initializers.uniform_unit_scaling":
_add_uniform_scaling_initializer_transformer,
"tf.uniform_unit_scaling_initializer":
_add_uniform_scaling_initializer_transformer,
"slim.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"slim.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"slim.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"slim.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"slim.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.keras.models.save_model": functools.partial(
_add_argument_transformer,
arg_name="save_format",
arg_value_ast=ast.Str("h5")),
}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
def preprocess(self, root_node):
visitor = ast_edits.PastaAnalyzeVisitor(TFAPIImportAnalysisSpec())
visitor.visit(root_node)
detections = set(visitor.results)
# If we have detected the presence of imports of specific TF versions,
# We want to modify the update spec to check only module deprecations
# and skip all other conversions.
if detections:
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
self.function_transformers = {}
self.import_renames = {}
return visitor.log, visitor.warnings_and_errors
def clear_preprocessing(self):
self.__init__()
def _is_ast_str(node):
"""Determine whether this node represents a string."""
allowed_types = [ast.Str]
if hasattr(ast, "Bytes"):
allowed_types += [ast.Bytes]
if hasattr(ast, "JoinedStr"):
allowed_types += [ast.JoinedStr]
if hasattr(ast, "FormattedValue"):
allowed_types += [ast.FormattedValue]
return isinstance(node, allowed_types)
def _is_ast_true(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is True
else:
return isinstance(node, ast.Name) and node.id == "True"
def _is_ast_false(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is False
else:
return isinstance(node, ast.Name) and node.id == "False"
# Lots of unused arguments below, since these are called in a standard manner.
# pylint: disable=unused-argument
def _rename_if_arg_found_transformer(parent, node, full_name, name, logs,
arg_name=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if the given arg is found.
This requires the function to be called with all named args, so for using
this transformer, the function should also be added to renames.
If the arg is not found, the call site is left alone.
If the arg is found, and if arg_ok_predicate is given, it is called with
the ast Expression representing the argument value found. If it returns
True, the function is left alone.
If the arg is found, arg_ok_predicate is not None and returns ok, and
remove_if_ok is True, the argument is removed from the call.
Otherwise, `compat.v1` is inserted between tf and the function name.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_name: name of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
# Check whether arg is there.
arg_present, arg_value = ast_edits.get_arg_value(node, arg_name)
if not arg_present:
return
# Check whether arg is problematic (and if not, maybe remove it).
if arg_ok_predicate and arg_ok_predicate(arg_value):
if remove_if_ok:
for i, kw in enumerate(node.keywords):
if kw.arg == arg_name:
node.keywords.pop(i)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument %s for function %s" % (
arg_name, full_name or name)))
break
return node
else:
return
# All conditions met, insert v1 and log what we did.
# We must have a full name, so the func is an attribute.
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
node.func = ast_edits.full_name_node(new_name)
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Renaming %s to %s because argument %s is present. %s" %
(full_name, new_name, arg_name, message if message is not None else "")
))
return node
def _add_argument_transformer(parent, node, full_name, name, logs,
arg_name, arg_value_ast):
"""Adds an argument (as a final kwarg arg_name=arg_value_ast)."""
node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1]),
full_name or name)
))
return node
def _iterator_transformer(parent, node, full_name, name, logs):
"""Transform iterator methods to compat function calls."""
# First, check that node.func.value is not already something we like
# (tf.compat.v1.data), or something which is handled in the rename
# (tf.data). This transformer only handles the method call to function call
# conversion.
if full_name and (full_name.startswith("tf.compat.v1.data") or
full_name.startswith("tf.data")):
return
# This should never happen, since we're only called for Attribute nodes.
if not isinstance(node.func, ast.Attribute):
return
# Transform from x.f(y) to tf.compat.v1.data.f(x, y)
# Fortunately, node.func.value should already have valid position info
node.args = [node.func.value] + node.args
node.func.value = ast_edits.full_name_node("tf.compat.v1.data")
logs.append((ast_edits.WARNING, node.lineno, node.col_offset,
"Changing dataset.%s() to tf.compat.v1.data.%s(dataset). "
"Please check this transformation.\n" % (name, name)))
return node
def _dropout_transformer(parent, node, full_name, name, logs):
"""Replace keep_prob with 1-rate."""
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
_replace_keep_prob_node(node, node.args[1])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
return node
def _cast_transformer(parent, node, full_name, name, logs):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs):
"""Wrap labels argument with stop_gradients."""
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
already_stop_grad = (isinstance(old_value, ast.Call) and
isinstance(old_value.func, ast.Attribute) and
old_value.func.attr == "stop_gradient" and
isinstance(old_value.func.value, ast.Name) and
old_value.func.value.id == "tf")
if already_stop_grad:
return False
try:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
except TypeError:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
return True
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
if _wrap_label(karg, karg.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
return node
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
new_keywords = []
for kw in node.keywords:
if kw.arg != "align_corners":
new_keywords.append(kw)
node.keywords = new_keywords
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
def _pool_seed_transformer(parent, node, full_name, name, logs):
"""Removes seed2 and deterministic, and adds non-zero seed if needed."""
# This requires that this function uses all kwargs (add to renames!).
seed_arg = None
deterministic = False
modified = False
new_keywords = []
for kw in node.keywords:
if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred):
pass
elif kw.arg == "seed":
seed_arg = kw
elif kw.arg == "seed2" or kw.arg == "deterministic":
lineno = getattr(kw, "lineno", node.lineno)
col_offset = getattr(kw, "col_offset", node.col_offset)
logs.append((ast_edits.INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
kw.arg, full_name or name)))
if kw.arg == "deterministic":
if not _is_ast_false(kw.value):
deterministic = True
modified = True
continue
new_keywords.append(kw)
if deterministic:
if seed_arg is None:
new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42)))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding seed=42 to call to %s since determinism was requested" % (
full_name or name)
))
else:
logs.add((
ast_edits.WARNING, node.lineno, node.col_offset,
"The deterministic argument is deprecated for %s, pass a "
"non-zero seed for determinism. The deterministic argument is "
"present, possibly not False, and the seed is already set. The "
"converter cannot determine whether it is nonzero, please check."
))
if modified:
node.keywords = new_keywords
return node
else:
return
def _extract_glimpse_transformer(parent, node, full_name, name, logs):
def _replace_uniform_noise_node(parent, old_value):
"""Replaces old_value with 'uniform' or 'guassian'."""
uniform = ast.Str(s="uniform")
gaussian = ast.Str(s="gaussian")
new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around noise.value.test (and remove the old prefix/
# suffix, they should only be around new_value.test), so that:
# "uniform" if (a if b else c) else "gaussian" is valid.
pasta.base.formatting.set(new_value.test, "prefix", "(")
pasta.base.formatting.set(new_value.test, "suffix", ")")
# Check if we have a uniform_noise keyword arg
for uniform_noise in node.keywords:
if uniform_noise.arg == "uniform_noise":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse "
"to noise, and recomputing value. Please check this "
"transformation.\n"))
uniform_noise.arg = "noise"
value = "uniform" if uniform_noise.value else "gaussian"
_replace_uniform_noise_node(uniform_noise, uniform_noise.value)
return node
# Since `noise`/`uniform_noise` is optional arg, nothing needs to be
# done if len(node.args) < 5.
if len(node.args) >= 5:
_replace_uniform_noise_node(node, node.args[5])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse to "
"noise, and recomputing value.\n"))
return node
def _add_summary_step_transformer(parent, node, full_name, name, logs):
"""Adds a step argument to the summary API call if not specified.
The inserted argument value is tf.compat.v1.train.get_or_create_global_step().
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "step":
return node
default_value = "tf.compat.v1.train.get_or_create_global_step()"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="step", value=ast_value))
logs.append((
ast_edits.WARNING, node.lineno, node.col_offset,
"Summary API writing function %s now requires a 'step' argument; "
"inserting default of %s." % (full_name or name, default_value)))
return node
def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs,
cond):
"""Adds cond argument to tf.contrib.summary.xxx_record_summaries().
This is in anticipation of them being renamed to tf.summary.record_if(), which
requires the cond argument.
"""
node.args.append(pasta.parse(cond))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding `%s` argument to %s in anticipation of it being renamed to "
"tf.compat.v2.summary.record_if()" % (cond, full_name or name)))
return node
def _add_loss_reduction_transformer(parent, node, full_name, name, logs):
"""Adds a loss_reduction argument if not specified.
Default value for tf.estimator.*Classifier and tf.estimator.*Regressor
loss_reduction argument changed to SUM_OVER_BATCH_SIZE. So, we update
existing calls to use the old default value `tf.keras.losses.Reduction.SUM`.
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "loss_reduction":
return node
default_value = "tf.keras.losses.Reduction.SUM"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="loss_reduction", value=ast_value))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"%s: Default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE; inserting old default value %s.\n"
% (full_name or name, default_value)))
return node
def _rename_if_any_arg_found_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if any of the arg_names is found.
Args:
parent: Parent of node.
node: ast.Call node to modify.
full_name: full name of function to modify.
name: name of function to modify.
logs: list of logs to append to.
arg_names: list of names of the argument to look for.
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node,
full_name, name, logs,
arg_name, arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _rename_if_arg_found_and_add_loss_reduction_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Combination of _rename_if_arg_found and _add_loss_reduction transformers.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_names: list of names of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
node = _add_loss_reduction_transformer(parent, node, full_name, name, logs)
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node, full_name,
name, logs, arg_name,
arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _add_uniform_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to uniform_unit_scaling_initializer.
Transforms:
tf.uniform_unit_scaling_initializer(factor, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, distribution="uniform", seed=seed)
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
distribution_value = "\"uniform\""
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(distribution_value)
node.keywords.append(ast.keyword(arg="distribution", value=ast_value))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
return node
def _contrib_layers_xavier_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.xavier_initializer.
Transforms:
tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=1.0, mode="fan_avg",
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
Returns: The new node
"""
def _get_distribution(old_value):
"""Returns an AST matching the following:
("uniform" if (old_value) else "truncated_normal")
"""
dist = pasta.parse("\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = dist.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.base.formatting.set(dist, "prefix", "(")
pasta.base.formatting.set(dist, "suffix", ")")
return dist
found_distribution = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "uniform":
found_distribution = True
keyword_arg.arg = "distribution"
old_value = keyword_arg.value
new_value = _get_distribution(keyword_arg.value)
pasta.ast_utils.replace_child(keyword_arg, old_value, new_value)
pasta.base.formatting.set(keyword_arg.value, "prefix", "(")
pasta.base.formatting.set(keyword_arg.value, "suffix", ")")
new_keywords = []
scale = pasta.parse("1.0")
new_keywords.append(ast.keyword(arg="scale", value=scale))
mode = pasta.parse("\"fan_avg\"")
new_keywords.append(ast.keyword(arg="mode", value=mode))
if len(node.args) >= 1:
found_distribution = True
dist = _get_distribution(node.args[0])
new_keywords.append(ast.keyword(arg="distribution", value=dist))
if not found_distribution:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
uniform_dist = pasta.parse("\"uniform\"")
new_keywords.append(ast.keyword(arg="distribution", value=uniform_dist))
if len(node.args) >= 2:
new_keywords.append(ast.keyword(arg="seed", value=node.args[1]))
if len(node.args) >= 3:
new_keywords.append(ast.keyword(arg="dtype", value=node.args[2]))
node.args = []
node.keywords = new_keywords + node.keywords
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers xavier initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_variance_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.variance_scaling_initializer.
Transforms:
tf.contrib.layers.variance_scaling_initializer(
factor, mode, uniform, seed, dtype
) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, mode=mode.lower(),
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
And handles the case where no factor is provided and scale needs to be
set to 2.0 to match contrib's default instead of tf.keras.initializer's
default of 1.0
"""
def _replace_distribution(parent, old_value):
"""Replaces old_value: ("uniform" if (old_value) else "truncated_normal")"""
new_value = pasta.parse(
"\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = new_value.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.ast_utils.replace_child(parent, old_value, new_value)
pasta.base.formatting.set(new_value, "prefix", "(")
pasta.base.formatting.set(new_value, "suffix", ")")
def _replace_mode(parent, old_value):
"""Replaces old_value with (old_value).lower()."""
new_value = pasta.parse("mode.lower()")
mode = new_value.body[0].value.func
pasta.ast_utils.replace_child(mode, mode.value, old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Need to keep track of scale because slim & keras
# have different defaults
found_scale = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
found_scale = True
if keyword_arg.arg == "mode":
_replace_mode(keyword_arg, keyword_arg.value)
if keyword_arg.arg == "uniform":
keyword_arg.arg = "distribution"
_replace_distribution(keyword_arg, keyword_arg.value)
# Handle any detected positional arguments
if len(node.args) >= 1:
found_scale = True
if len(node.args) >= 2:
_replace_mode(node, node.args[1])
if len(node.args) >= 3:
_replace_distribution(node, node.args[2])
# If no scale was provided, make tf 2.0 use slim's default factor
if not found_scale:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
scale_value = pasta.parse("2.0")
node.keywords = ([ast.keyword(arg="scale", value=scale_value)]
+ node.keywords)
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers.variance_scaling_initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_l1_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l1 regularizer with Keras one.
This entails renaming the 'scale' arg to 'l' and dropping any
provided scope arg.
"""
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renaming scale arg of regularizer\n"))
keyword.arg = "l"
if keyword.arg == "scope":
scope_keyword = keyword
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l1"
return node
def _contrib_layers_l2_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l2 regularizer with Keras one, with l=0.5*scale.
Also drops the scope argument.
"""
def _replace_scale_node(parent, old_value):
"""Replaces old_value with 0.5*(old_value)."""
half = ast.Num(n=0.5)
half.lineno = 0
half.col_offset = 0
new_value = ast.BinOp(left=half, op=ast.Mult(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around scale.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
keyword.arg = "l"
_replace_scale_node(keyword, keyword.value)
if keyword.arg == "scope":
scope_keyword = keyword
# Maybe it was a positional arg
if len(node.args) >= 1:
_replace_scale_node(node, node.args[0])
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Multiplying scale arg of tf.contrib.layers.l2_regularizer"
" by half to what tf.keras.regularizers.l2 expects.\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l2"
return node
def _name_scope_transformer(parent, node, full_name, name, logs):
"""Fix name scope invocation to use 'default_name' and omit 'values' args."""
name_found, name = ast_edits.get_arg_value(node, "name", 0)
default_found, default_name = ast_edits.get_arg_value(node, "default_name", 1)
# If an actual name was given...
if name_found and pasta.dump(name) != "None":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"`name` passed to `name_scope`. Because you may be re-entering"
" an existing scope, it is not safe to convert automatically, "
" the v2 name_scope does not support re-entering scopes by"
" name.\n"))
# Rename to compat.v1
new_name = "tf.compat.v1.name_scope"
logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset,
"Renamed %r to %r" % (full_name, new_name)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
if default_found:
# New name scope doesn't have name, but it has a default name. We use
# name=default_name, and values can be dropped (it's only for
# error reporting and useless outside of graph mode).
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Using default_name as name in call to name_scope.\n"))
# Remove all args other than name
node.args = []
node.keywords = [ast.keyword(arg="name", value=default_name)]
return node
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"name_scope call with neither name nor default_name cannot be "
"converted properly."))
def _rename_to_compat_v1(node, full_name, logs, reason):
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
return _rename_func(node, full_name, new_name, logs, reason)
def _rename_func(node, full_name, new_name, logs, reason):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renamed %r to %r: %s" % (full_name, new_name, reason)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
def _string_split_transformer(parent, node, full_name, name, logs):
"""Update tf.string_split arguments: skip_empty, sep, result_type, source."""
# Check the skip_empty parameter: if not false, then use compat.v1.
for i, kw in enumerate(node.keywords):
if kw.arg == "skip_empty":
if _is_ast_false(kw.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"removed argument skip_empty for tf.string_split."))
node.keywords.pop(i)
break
else:
return _rename_to_compat_v1(
node, full_name, logs, "tf.string_split's replacement no longer "
"takes the skip_empty argument.")
# Check the sep parameter: if it's definitely an empty string, use
# tf.strings.bytes_split(). If we can't tell, then use compat.v1.
found_sep = False
for i, kw in enumerate(node.keywords):
if kw.arg == "sep":
found_sep = True
if isinstance(kw.value, ast.Str):
if kw.value.s == "":
node = _rename_func(
node, full_name, "tf.strings.bytes_split", logs,
"Splitting bytes is not handled by tf.strings.bytes_split().")
node.keywords.pop(i)
else:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep is the empty string; but sep is not a string literal, "
"so we can't tell if it's an empty string.")
if not found_sep:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep unspecified: it now splits on all whitespace, not just "
"the space character.")
# Check the result_type parameter
return _string_split_rtype_transformer(parent, node, full_name, name, logs)
def _string_split_rtype_transformer(parent, node, full_name, name, logs):
"""Update tf.strings.split arguments: result_type, source."""
# Remove the "result_type" argument.
need_to_sparse = True
for i, kw in enumerate(node.keywords):
if kw.arg == "result_type":
if (isinstance(kw.value, ast.Str) and
kw.value.s in ("RaggedTensor", "SparseTensor")):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument result_type=%r for function %s" %
(kw.value.s, full_name or name)))
node.keywords.pop(i)
if kw.value.s == "RaggedTensor":
need_to_sparse = False
else:
return _rename_to_compat_v1(
node, full_name, logs,
"%s no longer takes the result_type parameter." % full_name)
break
for i, kw in enumerate(node.keywords):
if kw.arg == "source":
kw.arg = "input"
# If necessary, add a call to .to_sparse() to convert the output of
# strings.split from a RaggedTensor to a SparseTensor.
if need_to_sparse:
if (isinstance(parent, ast.Attribute) and parent.attr == "to_sparse"):
return # Prevent infinite recursion (since child nodes are transformed)
logs.append(
(ast_edits.INFO, node.lineno, node.col_offset,
"Adding call to RaggedTensor.to_sparse() to result of strings.split, "
"since it now returns a RaggedTensor."))
node = ast.Attribute(value=copy.deepcopy(node), attr="to_sparse")
try:
node = ast.Call(node, [], [])
except TypeError:
node = ast.Call(node, [], [], None, None)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/tf_upgrade_v2.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 1.0.
We also test whether a converted file is executable. test_file_v0_11.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.mul(a, b)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.mul` to `tf.multiply`"))
def testRename(self):
text = "tf.mul(a, tf.sub(b, c))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.multiply(a, tf.subtract(b, c))\n")
def testRenamePack(self):
text = "tf.pack(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.stack(a)\n")
text = "tf.unpack(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.unstack(a)\n")
def testReorder(self):
text = "tf.concat(a, b)\ntf.split(a, b, c)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n"
"tf.split(axis=a, num_or_size_splits=b, value=c)\n")
def testConcatReorderWithKeywordArgs(self):
text = "tf.concat(concat_dim=a, values=b)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n")
text = "tf.concat(values=b, concat_dim=a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(values=b, axis=a)\n")
text = "tf.concat(a, values=b)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n")
def testConcatReorderNested(self):
text = "tf.concat(a, tf.concat(c, d))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.concat(axis=a, values=tf.concat(axis=c, values=d))\n")
def testInitializers(self):
text = ("tf.zeros_initializer;tf.zeros_initializer ()\n"
"tf.ones_initializer;tf.ones_initializer ()\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.zeros_initializer();tf.zeros_initializer ()\n"
"tf.ones_initializer();tf.ones_initializer ()\n")
def testKeyword(self):
text = "tf.reduce_any(a, reduction_indices=[1, 2])\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.reduce_any(a, axis=[1, 2])\n")
def testComplexExpression(self):
text = "(foo + bar)[a].word()"
_ = self._upgrade(text)
def testReverse(self):
text = "tf.reverse(a, b)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, new_text)
self.assertIn("tf.reverse requires manual check", errors[0])
def testListComprehension(self):
def _test(input, output): # pylint: disable=redefined-builtin
_, unused_report, errors, new_text = self._upgrade(input)
self.assertEqual(new_text, output)
_test("tf.concat(0, \t[x for x in y])\n",
"tf.concat(axis=0, \tvalues=[x for x in y])\n")
_test("tf.concat(0,[x for x in y])\n",
"tf.concat(axis=0,values=[x for x in y])\n")
_test("tf.concat(0,[\nx for x in y])\n",
"tf.concat(axis=0,values=[\nx for x in y])\n")
_test("tf.concat(0,[\n \tx for x in y])\n",
"tf.concat(axis=0,values=[\n \tx for x in y])\n")
# TODO(aselle): Explicitly not testing command line interface and process_tree
# for now, since this is a one off utility.
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.mul(a, b)\n"
upgraded = "tf.multiply(a, b)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/tf_upgrade_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
This file should be updated whenever a function is added to
self.reordered_function_names in tf_upgrade_v2.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
reorders = {
'tf.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.batch_to_space': ['input', 'crops', 'block_size', 'name', 'block_shape'],
'tf.boolean_mask': ['tensor', 'mask', 'name', 'axis'],
'tf.cond': ['pred', 'true_fn', 'false_fn', 'strict', 'name', 'fn1', 'fn2'],
'tf.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.convert_to_tensor': ['value', 'dtype', 'name', 'preferred_dtype', 'dtype_hint'],
'tf.data.experimental.RaggedTensorStructure': ['dtype', 'shape', 'ragged_rank'],
'tf.data.experimental.SparseTensorStructure': ['dtype', 'shape'],
'tf.data.experimental.TensorArrayStructure': ['dtype', 'element_shape', 'dynamic_size', 'infer_shape'],
'tf.data.experimental.TensorStructure': ['dtype', 'shape'],
'tf.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.estimator.BaselineClassifier': ['model_dir', 'n_classes', 'weight_column', 'label_vocabulary', 'optimizer', 'config', 'loss_reduction'],
'tf.estimator.BaselineRegressor': ['model_dir', 'label_dimension', 'weight_column', 'optimizer', 'config', 'loss_reduction'],
'tf.estimator.DNNClassifier': ['hidden_units', 'feature_columns', 'model_dir', 'n_classes', 'weight_column', 'label_vocabulary', 'optimizer', 'activation_fn', 'dropout', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm'],
'tf.estimator.DNNLinearCombinedClassifier': ['model_dir', 'linear_feature_columns', 'linear_optimizer', 'dnn_feature_columns', 'dnn_optimizer', 'dnn_hidden_units', 'dnn_activation_fn', 'dnn_dropout', 'n_classes', 'weight_column', 'label_vocabulary', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm', 'linear_sparse_combiner'],
'tf.estimator.DNNLinearCombinedRegressor': ['model_dir', 'linear_feature_columns', 'linear_optimizer', 'dnn_feature_columns', 'dnn_optimizer', 'dnn_hidden_units', 'dnn_activation_fn', 'dnn_dropout', 'label_dimension', 'weight_column', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm', 'linear_sparse_combiner'],
'tf.estimator.DNNRegressor': ['hidden_units', 'feature_columns', 'model_dir', 'label_dimension', 'weight_column', 'optimizer', 'activation_fn', 'dropout', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm'],
'tf.estimator.LinearClassifier': ['feature_columns', 'model_dir', 'n_classes', 'weight_column', 'label_vocabulary', 'optimizer', 'config', 'partitioner', 'warm_start_from', 'loss_reduction', 'sparse_combiner'],
'tf.estimator.LinearRegressor': ['feature_columns', 'model_dir', 'label_dimension', 'weight_column', 'optimizer', 'config', 'partitioner', 'warm_start_from', 'loss_reduction', 'sparse_combiner'],
'tf.feature_column.categorical_column_with_vocabulary_file': ['key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'default_value', 'dtype'],
'tf.gradients': ['ys', 'xs', 'grad_ys', 'name', 'colocate_gradients_with_ops', 'gate_gradients', 'aggregation_method', 'stop_gradients', 'unconnected_gradients'],
'tf.hessians': ['ys', 'xs', 'name', 'colocate_gradients_with_ops', 'gate_gradients', 'aggregation_method'],
'tf.image.sample_distorted_bounding_box': ['image_size', 'bounding_boxes', 'seed', 'seed2', 'min_object_covered', 'aspect_ratio_range', 'area_range', 'max_attempts', 'use_image_if_no_bounding_boxes', 'name'],
'tf.initializers.uniform_unit_scaling': ['factor', 'seed', 'dtype'],
'tf.io.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.io.parse_example': ['serialized', 'features', 'name', 'example_names'],
'tf.io.parse_single_example': ['serialized', 'features', 'name', 'example_names'],
'tf.io.serialize_many_sparse': ['sp_input', 'name', 'out_type'],
'tf.io.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.linalg.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.math.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.math.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.math.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.math.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.math.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.nn.avg_pool': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'],
'tf.nn.avg_pool2d': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'],
'tf.nn.conv1d': ['value', 'filters', 'stride', 'padding', 'use_cudnn_on_gpu', 'data_format', 'name', 'input', 'dilations'],
'tf.nn.conv2d': ['input', 'filter', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name', 'filters'],
'tf.nn.conv2d_backprop_input': ['input_sizes', 'filter', 'out_backprop', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name', 'filters'],
'tf.nn.convolution': ['input', 'filter', 'padding', 'strides', 'dilation_rate', 'name', 'data_format', 'filters', 'dilations'],
'tf.nn.crelu': ['features', 'name', 'axis'],
'tf.nn.ctc_beam_search_decoder': ['inputs', 'sequence_length', 'beam_width', 'top_paths', 'merge_repeated'],
'tf.nn.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.depthwise_conv2d': ['input', 'filter', 'strides', 'padding', 'rate', 'name', 'data_format', 'dilations'],
'tf.nn.embedding_lookup': ['params', 'ids', 'partition_strategy', 'name', 'validate_indices', 'max_norm'],
'tf.nn.embedding_lookup_sparse': ['params', 'sp_ids', 'sp_weights', 'partition_strategy', 'name', 'combiner', 'max_norm'],
'tf.nn.fractional_avg_pool': ['value', 'pooling_ratio', 'pseudo_random', 'overlapping', 'deterministic', 'seed', 'seed2', 'name'],
'tf.nn.fractional_max_pool': ['value', 'pooling_ratio', 'pseudo_random', 'overlapping', 'deterministic', 'seed', 'seed2', 'name'],
'tf.nn.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.nn.max_pool': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'],
'tf.nn.moments': ['x', 'axes', 'shift', 'name', 'keep_dims', 'keepdims'],
'tf.nn.pool': ['input', 'window_shape', 'pooling_type', 'padding', 'dilation_rate', 'strides', 'name', 'data_format', 'dilations'],
'tf.nn.separable_conv2d': ['input', 'depthwise_filter', 'pointwise_filter', 'strides', 'padding', 'rate', 'name', 'data_format', 'dilations'],
'tf.nn.softmax_cross_entropy_with_logits': ['_sentinel', 'labels', 'logits', 'dim', 'name', 'axis'],
'tf.nn.space_to_batch': ['input', 'paddings', 'block_size', 'name', 'block_shape'],
'tf.nn.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.weighted_moments': ['x', 'axes', 'frequency_weights', 'name', 'keep_dims', 'keepdims'],
'tf.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.pad': ['tensor', 'paddings', 'mode', 'name', 'constant_values'],
'tf.parse_example': ['serialized', 'features', 'name', 'example_names'],
'tf.parse_single_example': ['serialized', 'features', 'name', 'example_names'],
'tf.quantize_v2': ['input', 'min_range', 'max_range', 'T', 'mode', 'name', 'round_mode'],
'tf.random.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.random.poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.random_poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices', 'keepdims'],
'tf.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reverse_sequence': ['input', 'seq_lengths', 'seq_axis', 'batch_axis', 'name', 'seq_dim', 'batch_dim'],
'tf.serialize_many_sparse': ['sp_input', 'name', 'out_type'],
'tf.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.shape': ['input', 'name', 'out_type'],
'tf.size': ['input', 'name', 'out_type'],
'tf.space_to_batch': ['input', 'paddings', 'block_size', 'name', 'block_shape'],
'tf.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.sparse.add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse.concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim', 'expand_nonconcat_dims'],
'tf.sparse.reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse.segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.sparse_add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse_concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim', 'expand_nonconcat_dims'],
'tf.sparse_matmul': ['a', 'b', 'transpose_a', 'transpose_b', 'a_is_sparse', 'b_is_sparse', 'name'],
'tf.sparse_reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse_segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.strings.length': ['input', 'name', 'unit'],
'tf.strings.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices', 'keepdims'],
'tf.strings.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.test.assert_equal_graph_def': ['actual', 'expected', 'checkpoint_v2', 'hash_table_shared_name'],
'tf.train.sdca_fprint': ['input', 'name'],
'tf.train.sdca_optimizer': ['sparse_example_indices', 'sparse_feature_indices', 'sparse_feature_values', 'dense_features', 'example_weights', 'example_labels', 'sparse_indices', 'sparse_weights', 'dense_weights', 'example_state_data', 'loss_type', 'l1', 'l2', 'num_loss_partitions', 'num_inner_iterations', 'adaptative', 'name'],
'tf.train.sdca_shrink_l1': ['weights', 'l1', 'l2', 'name'],
'tf.transpose': ['a', 'perm', 'name', 'conjugate'],
'tf.tuple': ['tensors', 'name', 'control_inputs'],
'tf.uniform_unit_scaling_initializer': ['factor', 'seed', 'dtype'],
'tf.while_loop': ['cond', 'body', 'loop_vars', 'shape_invariants', 'parallel_iterations', 'back_prop', 'swap_memory', 'name', 'maximum_iterations', 'return_same_structure']
}
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/reorders_v2.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""A module to support operations on ipynb files"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import re
import shutil
import tempfile
CodeLine = collections.namedtuple("CodeLine", ["cell_number", "code"])
def is_python(cell):
"""Checks if the cell consists of Python code."""
return (cell["cell_type"] == "code" # code cells only
and cell["source"] # non-empty cells
and not cell["source"][0].startswith("%%")) # multiline eg: %%bash
def process_file(in_filename, out_filename, upgrader):
"""The function where we inject the support for ipynb upgrade."""
print("Extracting code lines from original notebook")
raw_code, notebook = _get_code(in_filename)
raw_lines = [cl.code for cl in raw_code]
# The function follows the original flow from `upgrader.process_fil`
with tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
processed_file, new_file_content, log, process_errors = (
upgrader.update_string_pasta("\n".join(raw_lines), in_filename))
if temp_file and processed_file:
new_notebook = _update_notebook(notebook, raw_code,
new_file_content.split("\n"))
json.dump(new_notebook, temp_file)
else:
raise SyntaxError(
"Was not able to process the file: \n%s\n" % "".join(log))
files_processed = processed_file
report_text = upgrader._format_log(log, in_filename, out_filename)
errors = process_errors
shutil.move(temp_file.name, out_filename)
return files_processed, report_text, errors
def skip_magic(code_line, magic_list):
"""Checks if the cell has magic, that is not Python-based.
Args:
code_line: A line of Python code
magic_list: A list of jupyter "magic" exceptions
Returns:
If the line jupyter "magic" line, not Python line
>>> skip_magic('!ls -laF', ['%', '!', '?'])
True
"""
for magic in magic_list:
if code_line.startswith(magic):
return True
return False
def check_line_split(code_line):
r"""Checks if a line was split with `\`.
Args:
code_line: A line of Python code
Returns:
If the line was split with `\`
>>> skip_magic("!gcloud ml-engine models create ${MODEL} \\\n")
True
"""
return re.search(r"\\\s*\n$", code_line)
def _get_code(input_file):
"""Loads the ipynb file and returns a list of CodeLines."""
raw_code = []
with open(input_file) as in_file:
notebook = json.load(in_file)
cell_index = 0
for cell in notebook["cells"]:
if is_python(cell):
cell_lines = cell["source"]
is_line_split = False
for line_idx, code_line in enumerate(cell_lines):
# Sometimes, jupyter has more than python code
# Idea is to comment these lines, for upgrade time
if skip_magic(code_line, ["%", "!", "?"]) or is_line_split:
# Found a special character, need to "encode"
code_line = "###!!!" + code_line
# if this cell ends with `\` -> skip the next line
is_line_split = check_line_split(code_line)
if is_line_split:
is_line_split = check_line_split(code_line)
# Sometimes, people leave \n at the end of cell
# in order to migrate only related things, and make the diff
# the smallest -> here is another hack
if (line_idx == len(cell_lines) - 1) and code_line.endswith("\n"):
code_line = code_line.replace("\n", "###===")
# sometimes a line would start with `\n` and content after
# that's the hack for this
raw_code.append(
CodeLine(cell_index,
code_line.rstrip().replace("\n", "###===")))
cell_index += 1
return raw_code, notebook
def _update_notebook(original_notebook, original_raw_lines, updated_code_lines):
"""Updates notebook, once migration is done."""
new_notebook = copy.deepcopy(original_notebook)
# validate that the number of lines is the same
assert len(original_raw_lines) == len(updated_code_lines), \
("The lengths of input and converted files are not the same: "
"{} vs {}".format(len(original_raw_lines), len(updated_code_lines)))
code_cell_idx = 0
for cell in new_notebook["cells"]:
if not is_python(cell):
continue
applicable_lines = [
idx for idx, code_line in enumerate(original_raw_lines)
if code_line.cell_number == code_cell_idx
]
new_code = [updated_code_lines[idx] for idx in applicable_lines]
cell["source"] = "\n".join(new_code).replace("###!!!", "").replace(
"###===", "\n")
code_cell_idx += 1
return new_notebook
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/ipynb.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
_TEST_VERSION = 1
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 2.0."""
@classmethod
def setUpClass(cls):
cls._tf_api_version = 1 if hasattr(tf, 'contrib') else 2
def setUp(self):
tf.compat.v1.enable_v2_behavior()
def testRenames(self):
self.assertAllClose(1.04719755, tf.acos(0.5))
self.assertAllClose(0.5, tf.rsqrt(4.0))
def testSerializeSparseTensor(self):
sp_input = tf.SparseTensor(
indices=tf.constant([[1]], dtype=tf.int64),
values=tf.constant([2], dtype=tf.int64),
dense_shape=[2])
with self.cached_session():
serialized_sp = tf.serialize_sparse(sp_input, 'serialize_name', tf.string)
self.assertEqual((3,), serialized_sp.shape)
self.assertTrue(serialized_sp[0].numpy()) # check non-empty
def testSerializeManySparse(self):
sp_input = tf.SparseTensor(
indices=tf.constant([[0, 1]], dtype=tf.int64),
values=tf.constant([2], dtype=tf.int64),
dense_shape=[1, 2])
with self.cached_session():
serialized_sp = tf.serialize_many_sparse(
sp_input, 'serialize_name', tf.string)
self.assertEqual((1, 3), serialized_sp.shape)
def testArgMaxMin(self):
self.assertAllClose(
[1],
tf.argmax([[1, 3, 2]], name='abc', dimension=1))
self.assertAllClose(
[0, 0, 0],
tf.argmax([[1, 3, 2]], dimension=0))
self.assertAllClose(
[0],
tf.argmin([[1, 3, 2]], name='abc', dimension=1))
def testSoftmaxCrossEntropyWithLogits(self):
out = tf.nn.softmax_cross_entropy_with_logits(
logits=[0.1, 0.8], labels=[0, 1])
self.assertAllClose(out, 0.40318608)
out = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=[0.1, 0.8], labels=[0, 1])
self.assertAllClose(out, 0.40318608)
def testLinearClassifier(self):
if _TEST_VERSION == 2 and self._tf_api_version == 1:
# Skip if we converted this file to v2 but running with tf v1.
# In this case, conversion script adds reference to
# tf.keras.losses.Reduction which is not available in v1.
self.skipTest(
'After converting to 2.0, this test does not work with '
'TensorFlow 1.x.')
return
feature_column = tf.feature_column.numeric_column(
'feature', shape=(1,))
classifier = tf.estimator.LinearClassifier(
n_classes=2, feature_columns=[feature_column])
data = {'feature': [1, 20, 3]}
target = [0, 1, 0]
classifier.train(
input_fn=lambda: (data, target),
steps=100)
scores = classifier.evaluate(
input_fn=lambda: (data, target),
steps=100)
self.assertGreater(scores['accuracy'], 0.99)
def testUniformUnitScalingInitializer(self):
init = tf.initializers.uniform_unit_scaling(0.5, seed=1)
self.assertArrayNear(
[-0.45200047, 0.72815341],
init((2,)).numpy(),
err=1e-6)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/testdata/test_file_v1_12.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 1.0.
This test will not run in current TensorFlow, but did run in 0.11.
This file is intended to be converted by a genrule() that uses the converter
so that a 1.0 compatible version of this file is generated. That is run as
a unit test if the converter is successful.
"""
@test_util.run_v1_only("b/120545219")
def testArgRenames(self):
with self.cached_session():
a = [[1., 2., 3.], [4., 5., 6.]]
b = [[True, False, False], [False, True, True]]
dim0 = [1]
dim1 = [1]
self.assertAllEqual(
tf.reduce_any(
b, reduction_indices=dim0).eval(), [True, True])
self.assertAllEqual(
tf.reduce_all(
b, reduction_indices=[0]).eval(), [False, False, False])
self.assertAllEqual(
tf.reduce_all(
b, reduction_indices=dim1).eval(), [False, False])
self.assertAllEqual(
tf.reduce_sum(
a, reduction_indices=[1]).eval(), [6., 15.])
self.assertAllEqual(
tf.reduce_sum(
a, reduction_indices=[0, 1]).eval(), 21.0)
self.assertAllEqual(tf.reduce_sum(a, [0, 1]).eval(), 21.0)
self.assertAllEqual(
tf.reduce_prod(
a, reduction_indices=[1]).eval(), [6., 120.])
self.assertAllEqual(
tf.reduce_prod(
a, reduction_indices=[0, 1]).eval(), 720.0)
self.assertAllEqual(tf.reduce_prod(a, [0, 1]).eval(), 720.0)
self.assertAllEqual(
tf.reduce_mean(
a, reduction_indices=[1]).eval(), [2., 5.])
self.assertAllEqual(
tf.reduce_mean(
a, reduction_indices=[0, 1]).eval(), 3.5)
self.assertAllEqual(tf.reduce_mean(a, [0, 1]).eval(), 3.5)
self.assertAllEqual(
tf.reduce_min(
a, reduction_indices=[1]).eval(), [1., 4.])
self.assertAllEqual(
tf.reduce_min(
a, reduction_indices=[0, 1]).eval(), 1.0)
self.assertAllEqual(tf.reduce_min(a, [0, 1]).eval(), 1.0)
self.assertAllEqual(
tf.reduce_max(
a, reduction_indices=[1]).eval(), [3., 6.])
self.assertAllEqual(
tf.reduce_max(
a, reduction_indices=[0, 1]).eval(), 6.0)
self.assertAllEqual(tf.reduce_max(a, [0, 1]).eval(), 6.0)
self.assertAllClose(tf.reduce_logsumexp(a, reduction_indices=[1]).eval(),
[3.40760589, 6.40760612])
self.assertAllClose(
tf.reduce_logsumexp(a, reduction_indices=[0, 1]).eval(),
6.45619344711)
self.assertAllClose(
tf.reduce_logsumexp(a, [0, 1]).eval(), 6.45619344711)
self.assertAllEqual(
tf.expand_dims([[1, 2], [3, 4]], axis=1).eval(),
[[[1, 2]], [[3, 4]]])
@test_util.run_v1_only("b/120545219")
def testArgMinMax(self):
with self.cached_session():
self.assertAllEqual(
tf.argmin([[1, 2, 3], [4, 1, 0]], dimension=1).eval(),
[0, 2])
self.assertAllEqual(
tf.argmin([[1, 2, 3], [4, 1, 0]], dimension=0).eval(),
[0, 1, 1])
self.assertAllEqual(
tf.argmax([[1, 2, 3], [4, 1, 0]], dimension=1).eval(),
[2, 0])
self.assertAllEqual(
tf.argmax([[1, 2, 3], [4, 1, 0]], dimension=0).eval(),
[1, 0, 0])
@test_util.run_v1_only("b/120545219")
def testExpandAndSqueeze(self):
with self.cached_session():
# TODO(aselle): sparse_split, sparse_reduce_sum,
# sparse_reduce_sum_sparse, reduce_join
a = [[1, 2, 3]]
self.assertAllEqual(tf.expand_dims(tf.squeeze(a, [0]), 0).eval(),
a)
self.assertAllEqual(tf.squeeze(tf.expand_dims(a, 1), [1]).eval(),
a)
self.assertAllEqual(
tf.expand_dims(tf.squeeze([[1, 2, 3]], axis=[0]), dim=0).eval(), a)
self.assertAllEqual(
tf.squeeze(tf.expand_dims([[1, 2, 3]], dim=1), axis=[1]).eval(), a)
self.assertAllEqual(
tf.squeeze(tf.expand_dims([[1, 2, 3]], dim=1), axis=[1]).eval(), a)
@test_util.run_v1_only("b/120545219")
def testArithmeticRenames(self):
with self.cached_session() as s:
stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
vals = s.run(stuff)
self.assertAllEqual(vals,
[[[1, 2], [4, 5]], [[3, 4], [6, 7]]])
self.assertAllEqual(
tf.neg(tf.mul(tf.add(1, 2), tf.sub(5, 3))).eval(),
-6)
self.assertAllEqual(
s.run(tf.listdiff([1, 2, 3], [3, 3, 4]))[0], [1, 2])
self.assertAllEqual(
tf.list_diff([1, 2, 3], [3, 3, 4])[0].eval(), [1, 2])
a = [[1., 2., 3.], [4., 5., 6.]]
foo = np.where(np.less(a, 2), np.negative(a), a)
self.assertAllEqual(
tf.select(tf.less(a, 2), tf.neg(a), a).eval(),
foo)
self.assertAllEqual(
tf.complex_abs(tf.constant(3 + 4.j)).eval(),
5)
# # TODO(aselle): (tf.batch_*)
# ]
@test_util.run_v1_only("b/120545219")
def testBatchAndSvd(self):
with self.cached_session():
mat = [[1., 2.], [2., 3.]]
batched_mat = tf.expand_dims(mat, [0])
result = tf.matmul(mat, mat).eval()
result_batched = tf.batch_matmul(batched_mat, batched_mat).eval()
self.assertAllEqual(result_batched, np.expand_dims(result, 0))
self.assertAllEqual(
tf.svd(mat, False, True).eval(),
tf.svd(mat, compute_uv=False, full_matrices=True).eval())
@test_util.run_v1_only("b/120545219")
def testCrossEntropy(self):
# TODO(aselle): Test sparse_softmax_...
with self.cached_session():
labels = [.8, .5, .2, .1]
logits = [.9, .1, .3, .1]
self.assertAllEqual(
tf.nn.softmax_cross_entropy_with_logits(
logits, labels).eval(),
tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits).eval())
self.assertAllEqual(
tf.nn.sigmoid_cross_entropy_with_logits(
logits, labels).eval(),
tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits).eval())
@test_util.run_v1_only("b/120545219")
def testVariables(self):
with self.cached_session() as s:
# make some variables
_ = [tf.Variable([1, 2, 3], dtype=tf.float32),
tf.Variable([1, 2, 3], dtype=tf.int32)]
s.run(tf.global_variables_initializer())
_ = [v.name for v in tf.all_variables()]
_ = [v.name for v in tf.local_variables()]
@test_util.run_v1_only("b/120545219")
def testSummaries(self):
with self.cached_session() as s:
var = tf.Variable([1, 2, 3], dtype=tf.float32)
s.run(tf.global_variables_initializer())
x, y = np.meshgrid(np.linspace(-10, 10, 256), np.linspace(-10, 10, 256))
image = np.sin(x**2 + y**2) / np.sqrt(x**2 + y**2) * .5 + .5
image = image[None, :, :, None]
# make a dummy sound
freq = 440 # A = 440Hz
sampling_frequency = 11000
audio = np.sin(2 * np.pi * np.linspace(0, 1, sampling_frequency) * freq)
audio = audio[None, :, None]
test_dir = tempfile.mkdtemp()
# test summaries
writer = tf.train.SummaryWriter(test_dir)
summaries = [
tf.scalar_summary("scalar_var", var[0]),
tf.scalar_summary("scalar_reduce_var", tf.reduce_sum(var)),
tf.histogram_summary("var_histogram", var),
tf.image_summary("sin_image", image),
tf.audio_summary("sin_wave", audio, sampling_frequency),
]
run_summaries = s.run(summaries)
writer.add_summary(s.run(tf.merge_summary(inputs=run_summaries)))
# This is redundant, but we want to be able to rewrite the command
writer.add_summary(s.run(tf.merge_all_summaries()))
writer.close()
shutil.rmtree(test_dir)
if __name__ == "__main__":
test_lib.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/testdata/test_file_v0_11.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Script for updating tensorflow/tools/compatibility/reorders_v2.py.
To update reorders_v2.py, run:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
# pylint: enable=line-too-long
import tensorflow as tf
# This import is needed so that TensorFlow python modules are in sys.modules.
from tensorflow import python as tf_python # pylint: disable=unused-import
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import app
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import tf_upgrade_v2
_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/reorders_v2.py'
_FILE_HEADER = """# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
\"\"\"List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
This file should be updated whenever a function is added to
self.reordered_function_names in tf_upgrade_v2.py.
\"\"\"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
def collect_function_arg_names(function_names):
"""Determines argument names for reordered function signatures.
Args:
function_names: Functions to collect arguments for.
Returns:
Dictionary mapping function name to its arguments.
"""
# Map from reordered function name to its arguments.
function_to_args = {}
def visit(unused_path, unused_parent, children):
"""Visitor that collects arguments for reordered functions."""
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
api_names_v1 = ['tf.%s' % name for name in api_names_v1]
matches_function_names = any(
name in function_names for name in api_names_v1)
if matches_function_names:
if tf_inspect.isclass(attr):
# Get constructor arguments if attr is a class
arg_list = tf_inspect.getargspec(
getattr(attr, '__init__'))[0]
arg_list = arg_list[1:] # skip 'self' argument
else:
# Get function arguments.
# getargspec returns a tuple of (args, varargs, keywords, defaults)
# we just look at args.
arg_list = tf_inspect.getargspec(attr)[0]
for name in api_names_v1:
function_to_args[name] = arg_list
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.do_not_descend_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
return function_to_args
def get_reorder_line(name, arg_list):
return ' \'%s\': %s' % (name, str(arg_list))
def update_reorders_v2(output_file_path):
"""Writes a Python dictionary mapping function name to argument order.
Args:
output_file_path: File path to write output to. Any existing contents
would be replaced.
"""
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
all_reorders = collect_function_arg_names(reordered_function_names)
# List of reorder lines to write to output file in the form:
# 'tf.function_name': ['arg1', 'arg2', ...]
rename_lines = [
get_reorder_line(name, arg_names)
for name, arg_names in all_reorders.items()]
renames_file_text = '%sreorders = {\n%s\n}\n' % (
_FILE_HEADER, ',\n'.join(sorted(rename_lines)))
file_io.write_string_to_file(output_file_path, renames_file_text)
def main(unused_argv):
update_reorders_v2(_OUTPUT_FILE_PATH)
if __name__ == '__main__':
app.run(main=main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/update/generate_v2_reorders_map.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Script for updating tensorflow/tools/compatibility/renames_v2.py.
To update renames_v2.py, run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
pyformat --in_place third_party/tensorflow/tools/compatibility/renames_v2.py
"""
# pylint: enable=line-too-long
import sys
import tensorflow as tf
# This import is needed so that TensorFlow python modules are in sys.modules.
from tensorflow import python as tf_python # pylint: disable=unused-import
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import app
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import all_renames_v2
_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/renames_v2.py'
_FILE_HEADER = """# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
\"\"\"List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
This file should be updated whenever endpoints are deprecated.
\"\"\"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
def get_canonical_name(v2_names, v1_name):
if v2_names:
return v2_names[0]
return 'compat.v1.%s' % v1_name
def get_all_v2_names():
"""Get a set of function/class names available in TensorFlow 2.0."""
v2_names = set() # All op names in TensorFlow 2.0
def visit(unused_path, unused_parent, children):
"""Visitor that collects TF 2.0 names."""
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
v2_names.add(name)
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.do_not_descend_map['tf.compat'] = ['v1']
traverse.traverse(tf.compat.v2, visitor)
return v2_names
def collect_constant_renames():
"""Looks for constants that need to be renamed in TF 2.0.
Returns:
Set of tuples of the form (current name, new name).
"""
renames = set()
for module in sys.modules.values():
constants_v1_list = tf_export.get_v1_constants(module)
constants_v2_list = tf_export.get_v2_constants(module)
# _tf_api_constants attribute contains a list of tuples:
# (api_names_list, constant_name)
# We want to find API names that are in V1 but not in V2 for the same
# constant_names.
# First, we convert constants_v1_list and constants_v2_list to
# dictionaries for easier lookup.
constants_v1 = {constant_name: api_names
for api_names, constant_name in constants_v1_list}
constants_v2 = {constant_name: api_names
for api_names, constant_name in constants_v2_list}
# Second, we look for names that are in V1 but not in V2.
for constant_name, api_names_v1 in constants_v1.items():
api_names_v2 = constants_v2[constant_name]
for name in api_names_v1:
if name not in api_names_v2:
renames.add((name, get_canonical_name(api_names_v2, name)))
return renames
def collect_function_renames():
"""Looks for functions/classes that need to be renamed in TF 2.0.
Returns:
Set of tuples of the form (current name, new name).
"""
# Set of rename lines to write to output file in the form:
# 'tf.deprecated_name': 'tf.canonical_name'
renames = set()
def visit(unused_path, unused_parent, children):
"""Visitor that collects rename strings to add to rename_line_set."""
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
api_names_v2 = tf_export.get_v2_names(attr)
deprecated_api_names = set(api_names_v1) - set(api_names_v2)
for name in deprecated_api_names:
renames.add((name, get_canonical_name(api_names_v2, name)))
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.do_not_descend_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
# It is possible that a different function is exported with the
# same name. For e.g. when creating a different function to
# rename arguments. Exclude it from renames in this case.
v2_names = get_all_v2_names()
renames = set((name, new_name) for name, new_name in renames
if name not in v2_names)
return renames
def get_rename_line(name, canonical_name):
return ' \'tf.%s\': \'tf.%s\'' % (name, canonical_name)
def update_renames_v2(output_file_path):
"""Writes a Python dictionary mapping deprecated to canonical API names.
Args:
output_file_path: File path to write output to. Any existing contents
would be replaced.
"""
function_renames = collect_function_renames()
constant_renames = collect_constant_renames()
all_renames = function_renames.union(constant_renames)
manual_renames = set(
all_renames_v2.manual_symbol_renames.keys())
# List of rename lines to write to output file in the form:
# 'tf.deprecated_name': 'tf.canonical_name'
rename_lines = [
get_rename_line(name, canonical_name)
for name, canonical_name in all_renames
if 'tf.' + name not in manual_renames]
renames_file_text = '%srenames = {\n%s\n}\n' % (
_FILE_HEADER, ',\n'.join(sorted(rename_lines)))
file_io.write_string_to_file(output_file_path, renames_file_text)
def main(unused_argv):
update_renames_v2(_OUTPUT_FILE_PATH)
if __name__ == '__main__':
app.run(main=main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/compatibility/update/generate_v2_renames_map.py
|
#!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically update TensorFlow version in source files
#
# Usage:
# ./tensorflow/tools/ci_build/update_version.py --version 1.4.0-rc1
# ./tensorflow/tools/ci_build/update_version.py --nightly
#
"""Update version of TensorFlow script."""
# pylint: disable=superfluous-parens
import argparse
import os
import re
import subprocess
import time
# File parameters.
TF_SRC_DIR = "tensorflow"
VERSION_H = "%s/core/public/version.h" % TF_SRC_DIR
SETUP_PY = "%s/tools/pip_package/setup.py" % TF_SRC_DIR
README_MD = "./README.md"
TENSORFLOW_BZL = "%s/tensorflow.bzl" % TF_SRC_DIR
RELEVANT_FILES = [TF_SRC_DIR, VERSION_H, SETUP_PY, README_MD]
# Version type parameters.
NIGHTLY_VERSION = 1
REGULAR_VERSION = 0
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found. Are you under the TensorFlow source root"
" directory?")
def check_all_files():
"""Check all relevant files necessary for upgrade."""
for file_name in RELEVANT_FILES:
check_existence(file_name)
def replace_string_in_line(search, replace, filename):
"""Replace with sed when regex is required."""
with open(filename, "r") as source:
content = source.read()
with open(filename, "w") as source:
source.write(re.sub(search, replace, content))
class Version(object):
"""Version class object that stores SemVer version information."""
def __init__(self, major, minor, patch, identifier_string, version_type):
"""Constructor.
Args:
major: major string eg. (1)
minor: minor string eg. (3)
patch: patch string eg. (1)
identifier_string: extension string eg. (-rc0)
version_type: version parameter ((REGULAR|NIGHTLY)_VERSION)
"""
self.major = major
self.minor = minor
self.patch = patch
self.identifier_string = identifier_string
self.version_type = version_type
self._update_string()
def _update_string(self):
self.string = "%s.%s.%s%s" % (self.major,
self.minor,
self.patch,
self.identifier_string)
def __str__(self):
return self.string
def set_identifier_string(self, identifier_string):
self.identifier_string = identifier_string
self._update_string()
@property
def pep_440_str(self):
if self.version_type == REGULAR_VERSION:
return_string = "%s.%s.%s%s" % (self.major,
self.minor,
self.patch,
self.identifier_string)
return return_string.replace("-", "")
else:
return_string = "%s.%s.%s" % (self.major,
self.minor,
self.identifier_string)
return return_string.replace("-", "")
@staticmethod
def parse_from_string(string, version_type):
"""Returns version object from Semver string.
Args:
string: version string
version_type: version parameter
Raises:
RuntimeError: If the version string is not valid.
"""
# Check validity of new version string.
if not re.search(r"[0-9]+\.[0-9]+\.[a-zA-Z0-9]+", string):
raise RuntimeError("Invalid version string: %s" % string)
major, minor, extension = string.split(".", 2)
# Isolate patch and identifier string if identifier string exists.
extension_split = extension.split("-", 1)
patch = extension_split[0]
if len(extension_split) == 2:
identifier_string = "-" + extension_split[1]
else:
identifier_string = ""
return Version(major,
minor,
patch,
identifier_string,
version_type)
def get_current_semver_version():
"""Returns a Version object of current version.
Returns:
version: Version object of current SemVer string based on information from
core/public/version.h
"""
# Get current version information.
version_file = open(VERSION_H, "r")
for line in version_file:
major_match = re.search("^#define TF_MAJOR_VERSION ([0-9]+)", line)
minor_match = re.search("^#define TF_MINOR_VERSION ([0-9]+)", line)
patch_match = re.search("^#define TF_PATCH_VERSION ([0-9]+)", line)
extension_match = re.search("^#define TF_VERSION_SUFFIX \"(.*)\"", line)
if major_match:
old_major = major_match.group(1)
if minor_match:
old_minor = minor_match.group(1)
if patch_match:
old_patch_num = patch_match.group(1)
if extension_match:
old_extension = extension_match.group(1)
break
if "dev" in old_extension:
version_type = NIGHTLY_VERSION
else:
version_type = REGULAR_VERSION
return Version(old_major,
old_minor,
old_patch_num,
old_extension,
version_type)
def update_version_h(old_version, new_version):
"""Update tensorflow/core/public/version.h."""
replace_string_in_line("#define TF_MAJOR_VERSION %s" % old_version.major,
"#define TF_MAJOR_VERSION %s" % new_version.major,
VERSION_H)
replace_string_in_line("#define TF_MINOR_VERSION %s" % old_version.minor,
"#define TF_MINOR_VERSION %s" % new_version.minor,
VERSION_H)
replace_string_in_line("#define TF_PATCH_VERSION %s" % old_version.patch,
"#define TF_PATCH_VERSION %s" % new_version.patch,
VERSION_H)
replace_string_in_line(
"#define TF_VERSION_SUFFIX \"%s\"" % old_version.identifier_string,
"#define TF_VERSION_SUFFIX \"%s\"" % new_version.identifier_string,
VERSION_H)
def update_setup_dot_py(old_version, new_version):
"""Update setup.py."""
replace_string_in_line("_VERSION = '%s'" % old_version.string,
"_VERSION = '%s'" % new_version.string, SETUP_PY)
def update_readme(old_version, new_version):
"""Update README."""
pep_440_str = new_version.pep_440_str
replace_string_in_line(r"%s\.%s\.([[:alnum:]]+)-" % (old_version.major,
old_version.minor),
"%s-" % pep_440_str, README_MD)
def update_tensorflow_bzl(old_version, new_version):
"""Update tensorflow.bzl."""
old_mmp = "%s.%s.%s" % (old_version.major, old_version.minor,
old_version.patch)
new_mmp = "%s.%s.%s" % (new_version.major, new_version.minor,
new_version.patch)
replace_string_in_line('VERSION = "%s"' % old_mmp,
'VERSION = "%s"' % new_mmp, TENSORFLOW_BZL)
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False
def check_for_lingering_string(lingering_string):
"""Check for given lingering strings."""
formatted_string = lingering_string.replace(".", r"\.")
try:
linger_str_output = subprocess.check_output(
["grep", "-rnoH", formatted_string, TF_SRC_DIR])
linger_strs = linger_str_output.decode("utf8").split("\n")
except subprocess.CalledProcessError:
linger_strs = []
if linger_strs:
print("WARNING: Below are potentially instances of lingering old version "
"string \"%s\" in source directory \"%s/\" that are not "
"updated by this script. Please check them manually!"
% (lingering_string, TF_SRC_DIR))
for linger_str in linger_strs:
print(linger_str)
else:
print("No lingering old version strings \"%s\" found in source directory"
" \"%s/\". Good." % (lingering_string, TF_SRC_DIR))
def check_for_old_version(old_version, new_version):
"""Check for old version references."""
for old_ver in [old_version.string, old_version.pep_440_str]:
check_for_lingering_string(old_ver)
if major_minor_change(old_version, new_version):
old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor)
check_for_lingering_string(old_r_major_minor)
def main():
"""This script updates all instances of version in the tensorflow directory.
Requirements:
version: The version tag
OR
nightly: Create a nightly tag with current date
Raises:
RuntimeError: If the script is not being run from tf source dir
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
# Arg information
parser.add_argument("--version",
help="<new_major_ver>.<new_minor_ver>.<new_patch_ver>",
default="")
parser.add_argument("--nightly",
help="disable the service provisioning step",
action="store_true")
args = parser.parse_args()
check_all_files()
old_version = get_current_semver_version()
if args.nightly:
if args.version:
new_version = Version.parse_from_string(args.version, NIGHTLY_VERSION)
new_version.set_identifier_string("-dev" + time.strftime("%Y%m%d"))
else:
# Dev minor version is one ahead of official.
nightly_minor_ver = int(old_version.minor) + 1
new_version = Version(old_version.major,
str(nightly_minor_ver),
old_version.patch,
"-dev" + time.strftime("%Y%m%d"),
NIGHTLY_VERSION)
else:
new_version = Version.parse_from_string(args.version, REGULAR_VERSION)
update_version_h(old_version, new_version)
update_setup_dot_py(old_version, new_version)
update_readme(old_version, new_version)
update_tensorflow_bzl(old_version, new_version)
# Print transition details.
print("Major: %s -> %s" % (old_version.major, new_version.major))
print("Minor: %s -> %s" % (old_version.minor, new_version.minor))
print("Patch: %s -> %s\n" % (old_version.patch, new_version.patch))
check_for_old_version(old_version, new_version)
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/ci_build/update_version.py
|
#!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically copy TensorFlow binaries
#
# Usage:
# ./tensorflow/tools/ci_build/copy_binary.py --filename
# tf_nightly/tf_nightly_gpu-1.4.0.dev20170914-cp35-cp35m-manylinux1_x86_64.whl
# --new_py_ver 36
#
"""Copy binaries of TensorFlow for different python versions."""
# pylint: disable=superfluous-parens
import argparse
import os
import re
import shutil
import tempfile
import zipfile
TF_NIGHTLY_REGEX = (r"(.+)(tf_nightly.*)-(\d\.[\d]{1,2}"
r"\.\d.dev[\d]{0,8})-(.+)\.whl")
BINARY_STRING_TEMPLATE = "%s-%s-%s.whl"
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found." % filename)
def copy_binary(directory, origin_tag, new_tag, version, package):
"""Rename and copy binaries for different python versions.
Arguments:
directory: string of directory
origin_tag: str of the old python version tag
new_tag: str of the new tag
version: the version of the package
package: str, name of the package
"""
print("Rename and copy binaries with %s to %s." % (origin_tag, new_tag))
origin_binary = BINARY_STRING_TEMPLATE % (package, version, origin_tag)
new_binary = BINARY_STRING_TEMPLATE % (package, version, new_tag)
zip_ref = zipfile.ZipFile(os.path.join(directory, origin_binary), "r")
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
zip_ref.extractall()
zip_ref.close()
old_py_ver = re.search(r"(cp\d\d-cp\d\d)", origin_tag).group(1)
new_py_ver = re.search(r"(cp\d\d-cp\d\d)", new_tag).group(1)
wheel_file = os.path.join(
tmpdir, "%s-%s.dist-info" % (package, version), "WHEEL")
with open(wheel_file, "r") as f:
content = f.read()
with open(wheel_file, "w") as f:
f.write(content.replace(old_py_ver, new_py_ver))
zout = zipfile.ZipFile(directory + new_binary, "w", zipfile.ZIP_DEFLATED)
zip_these_files = [
"%s-%s.dist-info" % (package, version),
"%s-%s.data" % (package, version),
"tensorflow",
"tensorflow_core",
]
for dirname in zip_these_files:
for root, _, files in os.walk(dirname):
for filename in files:
zout.write(os.path.join(root, filename))
zout.close()
finally:
shutil.rmtree(tmpdir)
def main():
"""This script copies binaries.
Requirements:
filename: The path to the whl file
AND
new_py_ver: Create a nightly tag with current date
Raises:
RuntimeError: If the whl file was not found
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
# Arg information
parser.add_argument(
"--filename", help="path to whl file we are copying", required=True)
parser.add_argument(
"--new_py_ver", help="two digit py version eg. 27 or 33", required=True)
args = parser.parse_args()
# Argument checking
args.filename = os.path.abspath(args.filename)
check_existence(args.filename)
regex_groups = re.search(TF_NIGHTLY_REGEX, args.filename)
directory = regex_groups.group(1)
package = regex_groups.group(2)
version = regex_groups.group(3)
origin_tag = regex_groups.group(4)
old_py_ver = re.search(r"(cp\d\d)", origin_tag).group(1)
# Create new tags
new_tag = origin_tag.replace(old_py_ver, "cp" + args.new_py_ver)
# Copy the binary with the info we have
copy_binary(directory, origin_tag, new_tag, version, package)
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/ci_build/copy_binary.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configure build environment for certain Intel platforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import subprocess
NEHALEM_CPU_INSTRUCTIONS = [
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE4.1", "SSE4.2", "POPCNT"
]
SANDYBRIDGE_CPU_INSTRUCTIONS = NEHALEM_CPU_INSTRUCTIONS[:]
SANDYBRIDGE_CPU_INSTRUCTIONS.extend(["AVX", "AES", "PCLMUL"])
HASWELL_CPU_INSTRUCTIONS = SANDYBRIDGE_CPU_INSTRUCTIONS[:]
HASWELL_CPU_INSTRUCTIONS.extend(
["FSGSBASE", "RDRND", "FMA", "BMI", "BMI2", "F16C", "MOVBE", "AVX2"])
SKYLAKE_CPU_INSTRUCTIONS = HASWELL_CPU_INSTRUCTIONS[:]
SKYLAKE_CPU_INSTRUCTIONS.extend([
"PKU", "RDSEED", "ADCX", "PREFETCHW", "CLFLUSHOPT", "XSAVEC", "XSAVES",
"AVX512F", "CLWB", "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512CD"
])
ICELAKE_CPU_INSTRUCTIONS = SKYLAKE_CPU_INSTRUCTIONS[:]
ICELAKE_CPU_INSTRUCTIONS.extend([
"AVX512VBMI", "AVX512IFMA", "SHA", "CLWB", "UMIP", "RDPID", "GFNI",
"AVX512VBMI2", "AVX512VPOPCNTDQ", "AVX512BITALG", "AVX512VNNI",
"VPCLMULQDQ", "VAES"
])
BASIC_BUILD_OPTS = ["--cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0", "--copt=-O3"]
SECURE_BUILD_OPTS = [
"--copt=-Wformat", "--copt=-Wformat-security", "--copt=-fstack-protector",
"--copt=-fPIC", "--copt=-fpic", "--linkopt=-znoexecstack",
"--linkopt=-zrelro", "--linkopt=-znow", "--linkopt=-fstack-protector"
]
class BuildEnvSetter(object):
"""Prepares the proper environment settings for various Intel platforms."""
default_platform_ = "haswell"
PLATFORMS = {
"nehalem": {
"min_gcc_major_version": "4",
"min_gcc_minor_version": "8",
"flags": NEHALEM_CPU_INSTRUCTIONS
},
"sandybridge": {
"min_gcc_major_version": "4",
"min_gcc_minor_version": "8",
"flags": SANDYBRIDGE_CPU_INSTRUCTIONS
},
"haswell": {
"min_gcc_major_version": "4",
"min_gcc_minor_version": "8",
"flags": HASWELL_CPU_INSTRUCTIONS
},
"skylake": {
"min_gcc_major_version": "6",
"min_gcc_minor_version": "0",
"flags": SKYLAKE_CPU_INSTRUCTIONS
},
"icelake": {
"min_gcc_major_version": "8",
"min_gcc_minor_version": "0",
"flags": ICELAKE_CPU_INSTRUCTIONS
}
}
def __init__(self):
self.args = None
self.bazel_flags_ = "build "
self.go()
def gcc_version_ok(self, min_gcc_major_version, min_gcc_minor_version):
"""Make sure the GCC version installed on the machine is acceptable."""
# check to see if gcc is present
gcc_path = ""
gcc_path_cmd = "command -v gcc"
try:
print("gcc_path_cmd = {}".format(gcc_path_cmd))
gcc_path = subprocess.check_output(gcc_path_cmd, shell=True,
stderr=subprocess.STDOUT).\
strip()
print("gcc located here: {}".format(gcc_path))
if not os.access(gcc_path, os.F_OK | os.X_OK):
raise ValueError(
"{} does not exist or is not executable.".format(gcc_path))
gcc_output = subprocess.check_output(
[gcc_path, "-dumpfullversion", "-dumpversion"],
stderr=subprocess.STDOUT).strip()
# handle python2 vs 3 (bytes vs str type)
if isinstance(gcc_output, bytes):
gcc_output = gcc_output.decode("utf-8")
print("gcc version: {}".format(gcc_output))
gcc_info = gcc_output.split(".")
if gcc_info[0] < min_gcc_major_version:
print("Your MAJOR version of GCC is too old: {}; "
"it must be at least {}.{}".format(gcc_info[0],
min_gcc_major_version,
min_gcc_minor_version))
return False
elif gcc_info[0] == min_gcc_major_version:
if gcc_info[1] < min_gcc_minor_version:
print("Your MINOR version of GCC is too old: {}; "
"it must be at least {}.{}".format(gcc_info[1],
min_gcc_major_version,
min_gcc_minor_version))
return False
return True
else:
self._debug("gcc version OK: {}.{}".format(gcc_info[0], gcc_info[1]))
return True
except subprocess.CalledProcessException as e:
print("Problem getting gcc info: {}".format(e))
return False
def parse_args(self):
"""Set up argument parser, and parse CLI args."""
arg_parser = argparse.ArgumentParser(
description="Parse the arguments for the "
"TensorFlow build environment "
" setter")
arg_parser.add_argument(
"--disable-mkl",
dest="disable_mkl",
help="Turn off MKL. By default the compiler flag "
"--config=mkl is enabled.",
action="store_true")
arg_parser.add_argument(
"--disable-v2",
dest="disable_v2",
help="Don't build TensorFlow v2. By default the "
" compiler flag --config=v2 is enabled.",
action="store_true")
arg_parser.add_argument(
"-s",
"--secure-build",
dest="secure_build",
help="Enable secure build flags.",
action="store_true")
arg_parser.add_argument(
"-p",
"--platform",
choices=self.PLATFORMS.keys(),
help="The target platform.",
dest="target_platform",
default=self.default_platform_)
arg_parser.add_argument(
"-f",
"--bazelrc-file",
dest="bazelrc_file",
help="The full path to the bazelrc file into which "
"the build command will be written. The path "
"will be relative to the container "
" environment.",
required=True)
self.args = arg_parser.parse_args()
def validate_args(self):
if os.path.exists(self.args.bazelrc_file):
if os.path.isfile(self.args.bazelrc_file):
self._debug("The file {} exists and will be deleted.".format(
self.args.bazelrc_file))
elif os.path.isdir(self.args.bazelrc_file):
raise ValueError("{} is not a valid file name".format(
self.args.bazelrc_file))
return True
def set_build_args(self):
"""Generate Bazel build flags."""
for flag in BASIC_BUILD_OPTS:
self.bazel_flags_ += "{} ".format(flag)
if self.args.secure_build:
for flag in SECURE_BUILD_OPTS:
self.bazel_flags_ += "{} ".format(flag)
for flag in self.PLATFORMS.get(self.args.target_platform)["flags"]:
self.bazel_flags_ += "--copt=-m{} ".format(flag.lower())
if not self.args.disable_mkl:
self.bazel_flags_ += "--config=mkl "
if not self.args.disable_v2:
self.bazel_flags_ += "--config=v2 "
def write_build_args(self):
self._debug("Writing build flags: {}".format(self.bazel_flags_))
with open(self.args.bazelrc_file, "w") as f:
f.write(self.bazel_flags_)
def _debug(self, msg):
print(msg)
def go(self):
self.parse_args()
target_platform = self.PLATFORMS.get(self.args.target_platform)
if self.validate_args() and \
self.gcc_version_ok(target_platform["min_gcc_major_version"],
target_platform["min_gcc_minor_version"]):
self.set_build_args()
self.write_build_args()
else:
print("Error.")
env_setter = BuildEnvSetter()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/ci_build/linux/mkl/set-build-env.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Check that TensorFlow python files have certain __future__ imports.
This makes it easier to find Python 2.7 / Python 3.x incompatibility bugs.
In particular, this test makes it illegal to write a Python file that
doesn't import division from __future__, which can catch subtle division
bugs in Python 3.
Note: We can't use tf.test in this file because it needs to run in an
environment that doesn't include license-free gen_blah_ops.py files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import six
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
FUTURES_PATTERN = re.compile(r'^from __future__ import (\w+)\s*$')
FUTURES_PATTERN_2 = re.compile(
r'^from __future__ import (\w+), (\w+), (\w+)\s*$')
FUTURES_PATTERN_3 = re.compile(r'^from __future__ import (\w+) as \w+\s*$')
REQUIRED_FUTURES = frozenset(['absolute_import', 'division', 'print_function'])
WHITELIST = [
'python/platform/control_imports.py',
'tools/docker/jupyter_notebook_config.py',
'tools/ci_build/update_version.py',
'tools/ci_build/copy_binary.py',
]
# Tests that must *not* import division
OLD_DIVISION = [
'python/framework/tensor_shape_div_test.py',
'python/kernel_tests/division_past_test.py',
]
def check_file(path, old_division):
futures = set()
count = 0
for line in open(path, encoding='utf-8') if six.PY3 else open(path):
count += 1
m = FUTURES_PATTERN.match(line)
if not m:
m = FUTURES_PATTERN_3.match(line)
if m:
futures.add(m.group(1))
else:
m = FUTURES_PATTERN_2.match(line)
if m:
for entry in m.groups():
futures.add(entry)
if not count:
return # Skip empty files
if old_division:
# This file checks correct behavior without importing division
# from __future__, so make sure it's doing that.
expected = set(['absolute_import', 'print_function'])
if futures != expected:
raise AssertionError(('Incorrect futures for old_division file:\n'
' expected = %s\n got = %s') %
(' '.join(expected), ' '.join(futures)))
else:
missing = REQUIRED_FUTURES - futures
if missing:
raise AssertionError('Missing futures: %s' % ' '.join(missing))
def main():
# Make sure BASE_DIR ends with tensorflow. If it doesn't, we probably
# computed the wrong directory.
if os.path.split(BASE_DIR)[-1] != 'tensorflow':
raise AssertionError("BASE_DIR = '%s' doesn't end with tensorflow" %
BASE_DIR)
# Verify that all files have futures
whitelist = frozenset(os.path.join(BASE_DIR, w) for w in WHITELIST)
old_division = frozenset(os.path.join(BASE_DIR, w) for w in OLD_DIVISION)
for root, _, filenames in os.walk(BASE_DIR):
for f in fnmatch.filter(filenames, '*.py'):
path = os.path.join(root, f)
if path not in whitelist:
try:
check_file(path, old_division=path in old_division)
except AssertionError as e:
short_path = path[len(BASE_DIR) + 1:]
raise AssertionError('Error in %s: %s' % (short_path, str(e)))
if __name__ == '__main__':
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/check_futures_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import multiprocessing
import platform
import re
import socket
# pylint: disable=g-bad-import-order
# Note: cpuinfo and psutil are not installed for you in the TensorFlow
# OSS tree. They are installable via pip.
import cpuinfo
import psutil
# pylint: enable=g-bad-import-order
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
from tensorflow.tools.test import gpu_info_lib
def gather_machine_configuration():
"""Gather Machine Configuration. This is the top level fn of this library."""
config = test_log_pb2.MachineConfiguration()
config.cpu_info.CopyFrom(gather_cpu_info())
config.platform_info.CopyFrom(gather_platform_info())
# gather_available_device_info must come before gather_gpu_devices
# because the latter may access libcudart directly, which confuses
# TensorFlow StreamExecutor.
for d in gather_available_device_info():
config.available_device_info.add().CopyFrom(d)
for gpu in gpu_info_lib.gather_gpu_devices():
config.device_info.add().Pack(gpu)
config.memory_info.CopyFrom(gather_memory_info())
config.hostname = gather_hostname()
return config
def gather_hostname():
return socket.gethostname()
def gather_memory_info():
"""Gather memory info."""
mem_info = test_log_pb2.MemoryInfo()
vmem = psutil.virtual_memory()
mem_info.total = vmem.total
mem_info.available = vmem.available
return mem_info
def gather_cpu_info():
"""Gather CPU Information. Assumes all CPUs are the same."""
cpu_info = test_log_pb2.CPUInfo()
cpu_info.num_cores = multiprocessing.cpu_count()
# Gather num_cores_allowed
try:
with gfile.GFile('/proc/self/status', 'rb') as fh:
nc = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', fh.read())
if nc: # e.g. 'ff' => 8, 'fff' => 12
cpu_info.num_cores_allowed = (
bin(int(nc.group(1).replace(',', ''), 16)).count('1'))
except errors.OpError:
pass
finally:
if cpu_info.num_cores_allowed == 0:
cpu_info.num_cores_allowed = cpu_info.num_cores
# Gather the rest
info = cpuinfo.get_cpu_info()
cpu_info.cpu_info = info['brand']
cpu_info.num_cores = info['count']
cpu_info.mhz_per_cpu = info['hz_advertised_raw'][0] / 1.0e6
l2_cache_size = re.match(r'(\d+)', str(info.get('l2_cache_size', '')))
if l2_cache_size:
# If a value is returned, it's in KB
cpu_info.cache_size['L2'] = int(l2_cache_size.group(0)) * 1024
# Try to get the CPU governor
try:
cpu_governors = set([
gfile.GFile(f, 'r').readline().rstrip()
for f in glob.glob(
'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')
])
if cpu_governors:
if len(cpu_governors) > 1:
cpu_info.cpu_governor = 'mixed'
else:
cpu_info.cpu_governor = list(cpu_governors)[0]
except errors.OpError:
pass
return cpu_info
def gather_available_device_info():
"""Gather list of devices available to TensorFlow.
Returns:
A list of test_log_pb2.AvailableDeviceInfo messages.
"""
device_info_list = []
devices = device_lib.list_local_devices()
for d in devices:
device_info = test_log_pb2.AvailableDeviceInfo()
device_info.name = d.name
device_info.type = d.device_type
device_info.memory_limit = d.memory_limit
device_info.physical_description = d.physical_device_desc
device_info_list.append(device_info)
return device_info_list
def gather_platform_info():
"""Gather platform info."""
platform_info = test_log_pb2.PlatformInfo()
(platform_info.bits, platform_info.linkage) = platform.architecture()
platform_info.machine = platform.machine()
platform_info.release = platform.release()
platform_info.system = platform.system()
platform_info.version = platform.version()
return platform_info
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/system_info_lib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import app
from tensorflow.tools.test import system_info_lib
def main(unused_args):
config = system_info_lib.gather_machine_configuration()
print(config)
if __name__ == "__main__":
app.run()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/system_info.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command to upload benchmark test results to a cloud datastore.
This uploader script is typically run periodically as a cron job. It locates,
in a specified data directory, files that contain benchmark test results. The
results are written by the "run_and_gather_logs.py" script using the JSON-format
serialization of the "TestResults" protobuf message (core/util/test_log.proto).
For each file, the uploader reads the "TestResults" data, transforms it into
the schema used in the datastore (see below), and upload it to the datastore.
After processing a file, the uploader moves it to a specified archive directory
for safe-keeping.
The uploader uses file-level exclusive locking (non-blocking flock) which allows
multiple instances of this script to run concurrently if desired, splitting the
task among them, each one processing and archiving different files.
The "TestResults" object contains test metadata and multiple benchmark entries.
The datastore schema splits this information into two Kinds (like tables), one
holding the test metadata in a single "Test" Entity (like rows), and one holding
each related benchmark entry in a separate "Entry" Entity. Datastore create a
unique ID (retrieval key) for each Entity, and this ID is always returned along
with the data when an Entity is fetched.
* Test:
- test: unique name of this test (string)
- start: start time of this test run (datetime)
- info: JSON-encoded test metadata (string, not indexed)
* Entry:
- test: unique name of this test (string)
- entry: unique name of this benchmark entry within this test (string)
- start: start time of this test run (datetime)
- timing: average time (usec) per iteration of this test/entry run (float)
- info: JSON-encoded entry metadata (string, not indexed)
A few composite indexes are created (upload_test_benchmarks_index.yaml) for fast
retrieval of benchmark data and reduced I/O to the client without adding a lot
of indexing and storage burden:
* Test: (test, start) is indexed to fetch recent start times for a given test.
* Entry: (test, entry, start, timing) is indexed to use projection and only
fetch the recent (start, timing) data for a given test/entry benchmark.
Example retrieval GQL statements:
* Get the recent start times for a given test:
SELECT start FROM Test WHERE test = <test-name> AND
start >= <recent-datetime> LIMIT <count>
* Get the recent timings for a given benchmark:
SELECT start, timing FROM Entry WHERE test = <test-name> AND
entry = <entry-name> AND start >= <recent-datetime> LIMIT <count>
* Get all test names uniquified (e.g. display a list of available tests):
SELECT DISTINCT ON (test) test FROM Test
* For a given test (from the list above), get all its entry names. The list of
entry names can be extracted from the test "info" metadata for a given test
name and start time (e.g. pick the latest start time for that test).
SELECT * FROM Test WHERE test = <test-name> AND start = <latest-datetime>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import fcntl
import json
import os
import shutil
from six import text_type
from google.cloud import datastore
def is_real_file(dirpath, fname):
fpath = os.path.join(dirpath, fname)
return os.path.isfile(fpath) and not os.path.islink(fpath)
def get_mtime(dirpath, fname):
fpath = os.path.join(dirpath, fname)
return os.stat(fpath).st_mtime
def list_files_by_mtime(dirpath):
"""Return a list of files in the directory, sorted in increasing "mtime".
Return a list of files in the given directory, sorted from older to newer file
according to their modification times. Only return actual files, skipping
directories, symbolic links, pipes, etc.
Args:
dirpath: directory pathname
Returns:
A list of file names relative to the given directory path.
"""
files = [f for f in os.listdir(dirpath) if is_real_file(dirpath, f)]
return sorted(files, key=lambda f: get_mtime(dirpath, f))
# Note: The file locking code uses flock() instead of lockf() because benchmark
# files are only opened for reading (not writing) and we still want exclusive
# locks on them. This imposes the limitation that the data directory must be
# local, not NFS-mounted.
def lock(fd):
fcntl.flock(fd, fcntl.LOCK_EX)
def unlock(fd):
fcntl.flock(fd, fcntl.LOCK_UN)
def trylock(fd):
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
except Exception: # pylint: disable=broad-except
return False
def upload_benchmark_data(client, data):
"""Parse benchmark data and use the client to upload it to the datastore.
Parse the given benchmark data from the serialized JSON-format used to write
the test results file. Create the different datastore Entities from that data
and upload them to the datastore in a batch using the client connection.
Args:
client: datastore client connection
data: JSON-encoded benchmark data
"""
test_result = json.loads(data)
test_name = text_type(test_result["name"])
start_time = datetime.datetime.utcfromtimestamp(
float(test_result["startTime"]))
batch = []
# Create the Test Entity containing all the test information as a
# non-indexed JSON blob.
t_key = client.key("Test")
t_val = datastore.Entity(t_key, exclude_from_indexes=["info"])
t_val.update({
"test": test_name,
"start": start_time,
"info": text_type(data)
})
batch.append(t_val)
# Create one Entry Entity for each benchmark entry. The wall-clock timing is
# the attribute to be fetched and displayed. The full entry information is
# also stored as a non-indexed JSON blob.
for ent in test_result["entries"].get("entry", []):
ent_name = text_type(ent["name"])
e_key = client.key("Entry")
e_val = datastore.Entity(e_key, exclude_from_indexes=["info"])
e_val.update({
"test": test_name,
"start": start_time,
"entry": ent_name,
"timing": ent["wallTime"],
"info": text_type(json.dumps(ent))
})
batch.append(e_val)
# Put the whole batch of Entities in the datastore.
client.put_multi(batch)
def upload_benchmark_files(opts):
"""Find benchmark files, process them, and upload their data to the datastore.
Locate benchmark files in the data directory, process them, and upload their
data to the datastore. After processing each file, move it to the archive
directory for safe-keeping. Each file is locked for processing, which allows
multiple uploader instances to run concurrently if needed, each one handling
different benchmark files, skipping those already locked by another.
Args:
opts: command line options object
Note: To use locking, the file is first opened, then its descriptor is used to
lock and read it. The lock is released when the file is closed. Do not open
that same file a 2nd time while the lock is already held, because when that
2nd file descriptor is closed, the lock will be released prematurely.
"""
client = datastore.Client()
for fname in list_files_by_mtime(opts.datadir):
fpath = os.path.join(opts.datadir, fname)
try:
with open(fpath, "r") as fd:
if trylock(fd):
upload_benchmark_data(client, fd.read())
shutil.move(fpath, os.path.join(opts.archivedir, fname))
# unlock(fd) -- When "with open()" closes fd, the lock is released.
except Exception as e: # pylint: disable=broad-except
print("Cannot process '%s', skipping. Error: %s" % (fpath, e))
def parse_cmd_line():
"""Parse command line options.
Returns:
The parsed arguments object.
"""
desc = "Upload benchmark results to datastore."
opts = [
("-a", "--archivedir", str, None, True,
"Directory where benchmark files are archived."),
("-d", "--datadir", str, None, True,
"Directory of benchmark files to upload."),
]
parser = argparse.ArgumentParser(description=desc)
for opt in opts:
parser.add_argument(opt[0], opt[1], type=opt[2], default=opt[3],
required=opt[4], help=opt[5])
return parser.parse_args()
def main():
options = parse_cmd_line()
# Check that credentials are specified to access the datastore.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"):
raise ValueError("GOOGLE_APPLICATION_CREDENTIALS env. var. is not set.")
upload_benchmark_files(options)
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/upload_test_benchmarks.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test runner for TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shlex
from string import maketrans
import sys
import time
from google.protobuf import json_format
from google.protobuf import text_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.test import run_and_gather_logs_lib
# pylint: disable=g-import-not-at-top
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# Note: cpuinfo and psutil are not installed for you in the TensorFlow
# OSS tree. They are installable via pip.
try:
import cpuinfo
import psutil
except ImportError as e:
tf_logging.error("\n\n\nERROR: Unable to import necessary library: {}. "
"Issuing a soft exit.\n\n\n".format(e))
sys.exit(0)
# pylint: enable=g-bad-import-order
# pylint: enable=unused-import
FLAGS = None
def gather_build_configuration():
build_config = test_log_pb2.BuildConfiguration()
build_config.mode = FLAGS.compilation_mode
# Include all flags except includes
cc_flags = [
flag for flag in shlex.split(FLAGS.cc_flags) if not flag.startswith("-i")
]
build_config.cc_flags.extend(cc_flags)
return build_config
def main(unused_args):
name = FLAGS.name
test_name = FLAGS.test_name
test_args = FLAGS.test_args
benchmark_type = FLAGS.benchmark_type
test_results, _ = run_and_gather_logs_lib.run_and_gather_logs(
name, test_name=test_name, test_args=test_args,
benchmark_type=benchmark_type)
# Additional bits we receive from bazel
test_results.build_configuration.CopyFrom(gather_build_configuration())
if not FLAGS.test_log_output_dir:
print(text_format.MessageToString(test_results))
return
if FLAGS.test_log_output_filename:
file_name = FLAGS.test_log_output_filename
else:
file_name = (name.strip("/").translate(maketrans("/:", "__")) +
time.strftime("%Y%m%d%H%M%S", time.gmtime()))
if FLAGS.test_log_output_use_tmpdir:
tmpdir = test.get_temp_dir()
output_path = os.path.join(tmpdir, FLAGS.test_log_output_dir, file_name)
else:
output_path = os.path.join(
os.path.abspath(FLAGS.test_log_output_dir), file_name)
json_test_results = json_format.MessageToJson(test_results)
gfile.GFile(output_path + ".json", "w").write(json_test_results)
tf_logging.info("Test results written to: %s" % output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register(
"type", "bool", lambda v: v.lower() in ("true", "t", "y", "yes"))
parser.add_argument(
"--name", type=str, default="", help="Benchmark target identifier.")
parser.add_argument(
"--test_name", type=str, default="", help="Test target to run.")
parser.add_argument(
"--benchmark_type",
type=str,
default="",
help="BenchmarkType enum string (benchmark type).")
parser.add_argument(
"--test_args",
type=str,
default="",
help="Test arguments, space separated.")
parser.add_argument(
"--test_log_output_use_tmpdir",
type="bool",
nargs="?",
const=True,
default=False,
help="Store the log output into tmpdir?")
parser.add_argument(
"--compilation_mode",
type=str,
default="",
help="Mode used during this build (e.g. opt, dbg).")
parser.add_argument(
"--cc_flags",
type=str,
default="",
help="CC flags used during this build.")
parser.add_argument(
"--test_log_output_dir",
type=str,
default="",
help="Directory to write benchmark results to.")
parser.add_argument(
"--test_log_output_filename",
type=str,
default="",
help="Filename to output benchmark results to. If the filename is not "
"specified, it will be automatically created based on --name "
"and current time.")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/run_and_gather_logs.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import platform
from tensorflow.core.util import test_log_pb2
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
def _gather_gpu_devices_proc():
"""Try to gather NVidia GPU device information via /proc/driver."""
dev_info = []
for f in gfile.Glob("/proc/driver/nvidia/gpus/*/information"):
bus_id = f.split("/")[5]
key_values = dict(line.rstrip().replace("\t", "").split(":", 1)
for line in gfile.GFile(f, "r"))
key_values = dict((k.lower(), v.strip(" ").rstrip(" "))
for (k, v) in key_values.items())
info = test_log_pb2.GPUInfo()
info.model = key_values.get("model", "Unknown")
info.uuid = key_values.get("gpu uuid", "Unknown")
info.bus_id = bus_id
dev_info.append(info)
return dev_info
class CUDADeviceProperties(ct.Structure):
# See $CUDA_HOME/include/cuda_runtime_api.h for the definition of
# the cudaDeviceProp struct.
_fields_ = [
("name", ct.c_char * 256),
("totalGlobalMem", ct.c_size_t),
("sharedMemPerBlock", ct.c_size_t),
("regsPerBlock", ct.c_int),
("warpSize", ct.c_int),
("memPitch", ct.c_size_t),
("maxThreadsPerBlock", ct.c_int),
("maxThreadsDim", ct.c_int * 3),
("maxGridSize", ct.c_int * 3),
("clockRate", ct.c_int),
("totalConstMem", ct.c_size_t),
("major", ct.c_int),
("minor", ct.c_int),
("textureAlignment", ct.c_size_t),
("texturePitchAlignment", ct.c_size_t),
("deviceOverlap", ct.c_int),
("multiProcessorCount", ct.c_int),
("kernelExecTimeoutEnabled", ct.c_int),
("integrated", ct.c_int),
("canMapHostMemory", ct.c_int),
("computeMode", ct.c_int),
("maxTexture1D", ct.c_int),
("maxTexture1DMipmap", ct.c_int),
("maxTexture1DLinear", ct.c_int),
("maxTexture2D", ct.c_int * 2),
("maxTexture2DMipmap", ct.c_int * 2),
("maxTexture2DLinear", ct.c_int * 3),
("maxTexture2DGather", ct.c_int * 2),
("maxTexture3D", ct.c_int * 3),
("maxTexture3DAlt", ct.c_int * 3),
("maxTextureCubemap", ct.c_int),
("maxTexture1DLayered", ct.c_int * 2),
("maxTexture2DLayered", ct.c_int * 3),
("maxTextureCubemapLayered", ct.c_int * 2),
("maxSurface1D", ct.c_int),
("maxSurface2D", ct.c_int * 2),
("maxSurface3D", ct.c_int * 3),
("maxSurface1DLayered", ct.c_int * 2),
("maxSurface2DLayered", ct.c_int * 3),
("maxSurfaceCubemap", ct.c_int),
("maxSurfaceCubemapLayered", ct.c_int * 2),
("surfaceAlignment", ct.c_size_t),
("concurrentKernels", ct.c_int),
("ECCEnabled", ct.c_int),
("pciBusID", ct.c_int),
("pciDeviceID", ct.c_int),
("pciDomainID", ct.c_int),
("tccDriver", ct.c_int),
("asyncEngineCount", ct.c_int),
("unifiedAddressing", ct.c_int),
("memoryClockRate", ct.c_int),
("memoryBusWidth", ct.c_int),
("l2CacheSize", ct.c_int),
("maxThreadsPerMultiProcessor", ct.c_int),
("streamPrioritiesSupported", ct.c_int),
("globalL1CacheSupported", ct.c_int),
("localL1CacheSupported", ct.c_int),
("sharedMemPerMultiprocessor", ct.c_size_t),
("regsPerMultiprocessor", ct.c_int),
("managedMemSupported", ct.c_int),
("isMultiGpuBoard", ct.c_int),
("multiGpuBoardGroupID", ct.c_int),
# Pad with extra space to avoid dereference crashes if future
# versions of CUDA extend the size of this struct.
("__future_buffer", ct.c_char * 4096)
]
def _gather_gpu_devices_cudart():
"""Try to gather NVidia GPU device information via libcudart."""
dev_info = []
system = platform.system()
if system == "Linux":
libcudart = ct.cdll.LoadLibrary("libcudart.so")
elif system == "Darwin":
libcudart = ct.cdll.LoadLibrary("libcudart.dylib")
elif system == "Windows":
libcudart = ct.windll.LoadLibrary("libcudart.dll")
else:
raise NotImplementedError("Cannot identify system.")
version = ct.c_int()
rc = libcudart.cudaRuntimeGetVersion(ct.byref(version))
if rc != 0:
raise ValueError("Could not get version")
if version.value < 6050:
raise NotImplementedError("CUDA version must be between >= 6.5")
device_count = ct.c_int()
libcudart.cudaGetDeviceCount(ct.byref(device_count))
for i in range(device_count.value):
properties = CUDADeviceProperties()
rc = libcudart.cudaGetDeviceProperties(ct.byref(properties), i)
if rc != 0:
raise ValueError("Could not get device properties")
pci_bus_id = " " * 13
rc = libcudart.cudaDeviceGetPCIBusId(ct.c_char_p(pci_bus_id), 13, i)
if rc != 0:
raise ValueError("Could not get device PCI bus id")
info = test_log_pb2.GPUInfo() # No UUID available
info.model = properties.name
info.bus_id = pci_bus_id
dev_info.append(info)
del properties
return dev_info
def gather_gpu_devices():
"""Gather gpu device info.
Returns:
A list of test_log_pb2.GPUInfo messages.
"""
try:
# Prefer using /proc if possible, it provides the UUID.
dev_info = _gather_gpu_devices_proc()
if not dev_info:
raise ValueError("No devices found")
return dev_info
except (IOError, ValueError, errors.OpError):
pass
try:
# Fall back on using libcudart
return _gather_gpu_devices_cudart()
except (OSError, ValueError, NotImplementedError, errors.OpError):
return []
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/gpu_info_lib.py
|
#!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Test that checks if we have any issues with case insensitive filesystems.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
ERROR_MESSAGE = """
Files with same name but different case detected in directory: {}
"""
def main():
# Make sure BASE_DIR ends with tensorflow. If it doesn't, we probably
# computed the wrong directory.
if os.path.split(BASE_DIR)[-1] != 'tensorflow':
raise AssertionError(
"BASE_DIR = '%s' doesn't end with tensorflow" % BASE_DIR)
for dirpath, dirnames, filenames in os.walk(BASE_DIR, followlinks=True):
lowercase_directories = [x.lower() for x in dirnames]
lowercase_files = [x.lower() for x in filenames]
lowercase_dir_contents = lowercase_directories + lowercase_files
if len(lowercase_dir_contents) != len(set(lowercase_dir_contents)):
raise AssertionError(ERROR_MESSAGE.format(dirpath))
if __name__ == '__main__':
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/file_name_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import shlex
import subprocess
import tempfile
import time
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import gfile
from tensorflow.tools.test import gpu_info_lib
from tensorflow.tools.test import system_info_lib
class MissingLogsError(Exception):
pass
def get_git_commit_sha():
"""Get git commit SHA for this build.
Attempt to get the SHA from environment variable GIT_COMMIT, which should
be available on Jenkins build agents.
Returns:
SHA hash of the git commit used for the build, if available
"""
return os.getenv("GIT_COMMIT")
def process_test_logs(name, test_name, test_args, benchmark_type,
start_time, run_time, log_files):
"""Gather test information and put it in a TestResults proto.
Args:
name: Benchmark target identifier.
test_name: A unique bazel target, e.g. "//path/to:test"
test_args: A string containing all arguments to run the target with.
benchmark_type: A string representing the BenchmarkType enum; the
benchmark type for this target.
start_time: Test starting time (epoch)
run_time: Wall time that the test ran for
log_files: Paths to the log files
Returns:
A TestResults proto
"""
results = test_log_pb2.TestResults()
results.name = name
results.target = test_name
results.start_time = start_time
results.run_time = run_time
results.benchmark_type = test_log_pb2.TestResults.BenchmarkType.Value(
benchmark_type.upper())
# Gather source code information
git_sha = get_git_commit_sha()
if git_sha:
results.commit_id.hash = git_sha
results.entries.CopyFrom(process_benchmarks(log_files))
results.run_configuration.argument.extend(test_args)
results.machine_configuration.CopyFrom(
system_info_lib.gather_machine_configuration())
return results
def process_benchmarks(log_files):
benchmarks = test_log_pb2.BenchmarkEntries()
for f in log_files:
content = gfile.GFile(f, "rb").read()
if benchmarks.MergeFromString(content) != len(content):
raise Exception("Failed parsing benchmark entry from %s" % f)
return benchmarks
def run_and_gather_logs(name, test_name, test_args,
benchmark_type):
"""Run the bazel test given by test_name. Gather and return the logs.
Args:
name: Benchmark target identifier.
test_name: A unique bazel target, e.g. "//path/to:test"
test_args: A string containing all arguments to run the target with.
benchmark_type: A string representing the BenchmarkType enum; the
benchmark type for this target.
Returns:
A tuple (test_results, mangled_test_name), where
test_results: A test_log_pb2.TestResults proto
test_adjusted_name: Unique benchmark name that consists of
benchmark name optionally followed by GPU type.
Raises:
ValueError: If the test_name is not a valid target.
subprocess.CalledProcessError: If the target itself fails.
IOError: If there are problems gathering test log output from the test.
MissingLogsError: If we couldn't find benchmark logs.
"""
if not (test_name and test_name.startswith("//") and ".." not in test_name and
not test_name.endswith(":") and not test_name.endswith(":all") and
not test_name.endswith("...") and len(test_name.split(":")) == 2):
raise ValueError("Expected test_name parameter with a unique test, e.g.: "
"--test_name=//path/to:test")
test_executable = test_name.rstrip().strip("/").replace(":", "/")
if gfile.Exists(os.path.join("bazel-bin", test_executable)):
# Running in standalone mode from core of the repository
test_executable = os.path.join("bazel-bin", test_executable)
else:
# Hopefully running in sandboxed mode
test_executable = os.path.join(".", test_executable)
test_adjusted_name = name
gpu_config = gpu_info_lib.gather_gpu_devices()
if gpu_config:
gpu_name = gpu_config[0].model
gpu_short_name_match = re.search(r"Tesla (K40|K80|P100|V100)", gpu_name)
if gpu_short_name_match:
gpu_short_name = gpu_short_name_match.group(0)
test_adjusted_name = name + "|" + gpu_short_name.replace(" ", "_")
temp_directory = tempfile.mkdtemp(prefix="run_and_gather_logs")
mangled_test_name = (test_adjusted_name.strip("/")
.replace("|", "_").replace("/", "_").replace(":", "_"))
test_file_prefix = os.path.join(temp_directory, mangled_test_name)
test_file_prefix = "%s." % test_file_prefix
try:
if not gfile.Exists(test_executable):
raise ValueError("Executable does not exist: %s" % test_executable)
test_args = shlex.split(test_args)
# This key is defined in tf/core/util/reporter.h as
# TestReporter::kTestReporterEnv.
os.environ["TEST_REPORT_FILE_PREFIX"] = test_file_prefix
start_time = time.time()
subprocess.check_call([test_executable] + test_args)
run_time = time.time() - start_time
log_files = gfile.Glob("{}*".format(test_file_prefix))
if not log_files:
raise MissingLogsError("No log files found at %s." % test_file_prefix)
return (process_test_logs(
test_adjusted_name,
test_name=test_name,
test_args=test_args,
benchmark_type=benchmark_type,
start_time=int(start_time),
run_time=run_time,
log_files=log_files), test_adjusted_name)
finally:
try:
gfile.DeleteRecursively(temp_directory)
except OSError:
pass
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/test/run_and_gather_logs_lib.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests to check that py_test are properly loaded in BUILD files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
def check_output_despite_error(args):
"""Get output of args from command line, even if there are errors.
Args:
args: a list of command line args.
Returns:
output as string.
"""
try:
output = subprocess.check_output(args, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output = e.output
return output.strip()
def main():
# Get all py_test target, note bazel query result will also include
# cuda_py_test etc.
try:
targets = subprocess.check_output([
'bazel', 'query',
'kind(py_test, //tensorflow/contrib/... + '
'//tensorflow/python/... - '
'//tensorflow/contrib/tensorboard/...)']).strip()
except subprocess.CalledProcessError as e:
targets = e.output
targets = targets.decode("utf-8") if isinstance(targets, bytes) else targets
# Only keep py_test targets, and filter out targets with 'no_pip' tag.
valid_targets = []
for target in targets.split('\n'):
kind = check_output_despite_error(['buildozer', 'print kind', target])
if kind == 'py_test':
tags = check_output_despite_error(['buildozer', 'print tags', target])
if 'no_pip' not in tags:
valid_targets.append(target)
# Get all BUILD files for all valid targets.
build_files = set()
for target in valid_targets:
build_files.add(os.path.join(target[2:].split(':')[0], 'BUILD'))
# Check if BUILD files load py_test.
files_missing_load = []
for build_file in build_files:
updated_build_file = subprocess.check_output(
['buildozer', '-stdout', 'new_load //tensorflow:tensorflow.bzl py_test',
build_file])
with open(build_file, 'r') as f:
if f.read() != updated_build_file:
files_missing_load.append(build_file)
if files_missing_load:
raise RuntimeError('The following files are missing %s:\n %s' % (
'load("//tensorflow:tensorflow.bzl", "py_test").\nThis load statement'
' is needed because otherwise pip tests will try to use their '
'dependencies, which are not visible to them.',
'\n'.join(files_missing_load)))
else:
print('TEST PASSED.')
if __name__ == '__main__':
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/pip_package/check_load_py_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/pip_package/simple_console_for_windows.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow is an open source machine learning framework for everyone.
TensorFlow is an open source software library for high performance numerical
computation. Its flexible architecture allows easy deployment of computation
across a variety of platforms (CPUs, GPUs, TPUs), and from desktops to clusters
of servers to mobile and edge devices.
Originally developed by researchers and engineers from the Google Brain team
within Google's AI organization, it comes with strong support for machine
learning and deep learning and the flexible numerical computation core is used
across many other scientific domains.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
DOCLINES = __doc__.split('\n')
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
# Also update tensorflow/tensorflow.bzl and
# tensorflow/core/public/version.h
_VERSION = '1.15.5'
import setupnovernormalize
VERSION = _VERSION.replace('-', '')+"+nv"+os.getenv('RELEASE_VERSION', "")
REQUIRED_PACKAGES = [
'absl-py >= 0.9.0',
'astunparse == 1.6.3',
'backports.weakref >= 1.0rc1;python_version<"3.4"',
'enum34 >= 1.1.6;python_version<"3.4"',
# functools comes with python3, need to install the backport for python2
'functools32 >= 3.2.3;python_version<"3"',
'astor == 0.8.1',
'gast == 0.3.3',
'google_pasta >= 0.1.6',
'keras_applications >= 1.0.8',
'keras_preprocessing >= 1.0.5',
# mock comes with unittest.mock for python3, need to install for python2
'mock >= 2.0.0;python_version<"3"',
'numpy ~= 1.19.0;python_version<"3.7"',
'numpy >= 1.22.0, < 1.24;python_version>="3.7"',
'opt_einsum >= 2.3.2',
'six >= 1.10.0',
'protobuf >= 3.6.1, < 4.0.0',
'tensorboard >= 1.15.0, < 1.16.0',
'tensorflow-estimator == 1.15.1',
'termcolor >= 1.1.0',
# python3 requires wheel 0.26
'wheel >= 0.26;python_version>="3"',
'wheel;python_version<"3"',
'wrapt >= 1.11.1',
'h5py == 2.10.0'
]
EXTRA_PACKAGES = {}
if sys.byteorder == 'little':
# grpcio does not build correctly on big-endian machines due to lack of
# BoringSSL support.
# See https://github.com/tensorflow/tensorflow/issues/17882.
REQUIRED_PACKAGES.append('grpcio >= 1.8.6')
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# tf-nightly should depend on tb-nightly
if 'tf_nightly' in project_name:
for i, pkg in enumerate(REQUIRED_PACKAGES):
if 'tensorboard' in pkg:
REQUIRED_PACKAGES[i] = 'tb-nightly >= 1.15.0a0, < 1.16.0a0'
elif 'tensorflow_estimator' in pkg and '2.0' in project_name:
REQUIRED_PACKAGES[i] = 'tensorflow-estimator-2.0-preview'
elif 'tensorflow_estimator' in pkg:
REQUIRED_PACKAGES[i] = 'tf-estimator-nightly'
if 'nvidia_tensorflow' == project_name:
require_exact_versions = os.getenv('REQUIRE_EXACT_VERSIONS','') == '1'
def get_version_specifier(version_var_name, include_cuda_maj=True):
requested_version = os.getenv(version_var_name, '')
package_suffix = ''
if include_cuda_maj:
cuda_ver = os.getenv('CUDA_VERSION', '')
# Allow CUDA_MAJMIN to be overriden for this specific package using requested_version like "8.3.2.44+cuda11.5"
override_cuda_ver = requested_version.split('+cuda')
if len(override_cuda_ver) == 2:
requested_version, cuda_ver = override_cuda_ver
# Only major version is included
package_suffix = cuda_ver.split('.')[0]
if require_exact_versions:
if not requested_version:
raise Exception("No version was set in env var %s, but REQUIRE_EXACT_VERSIONS was set." % version_var_name)
return package_suffix + ' == ' + requested_version
if len(requested_version) == 0:
return package_suffix
# Require compatible release >=maj.min,==maj.*
major, minor = requested_version.split('.')[:2]
return '{package_suffix} ~= {major}.{minor}'.format(package_suffix=package_suffix, major=major, minor=minor)
def append_nv_release(pkg_name, pkg_env_var, skip_if_none=False):
if os.getenv(pkg_env_var,'') == '':
return '' if skip_if_none else pkg_name
else:
return pkg_name + '==' + os.getenv(pkg_env_var) + '+nv' + os.getenv('RELEASE_VERSION')
REQUIRED_PACKAGES += [
'nvidia-cuda-runtime-cu' + get_version_specifier('CUDARUNTIME_VERSION'),
'nvidia-cublas-cu' + get_version_specifier('CUBLAS_VERSION'),
'nvidia-cufft-cu' + get_version_specifier('CUFFT_VERSION'),
'nvidia-cudnn-cu' + get_version_specifier('CUDNN_VERSION'),
'nvidia-curand-cu' + get_version_specifier('CURAND_VERSION'),
'nvidia-cusolver-cu' + get_version_specifier('CUSOLVER_VERSION'),
'nvidia-cusparse-cu' + get_version_specifier('CUSPARSE_VERSION'),
'nvidia-nccl-cu' + get_version_specifier('NCCL_VERSION'),
'nvidia-cuda-cupti-cu' + get_version_specifier('CUPTI_VERSION'),
'nvidia-cuda-nvcc-cu' + get_version_specifier('NVCC_VERSION'),
'tensorrt' + get_version_specifier('TRT_VERSION', include_cuda_maj=False)
]
REQUIRED_PACKAGES += [append_nv_release('nvidia-dali-nvtf-plugin', 'DALI_VERSION', skip_if_none=True)]
EXTRA_PACKAGES['horovod'] = [append_nv_release('nvidia-horovod', 'HOROVOD_VERSION')]
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:run_main',
'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main',
'estimator_ckpt_converter = tensorflow_estimator.python.estimator.tools.checkpoint_converter:main',
]
# pylint: enable=line-too-long
# Only keep freeze_graph console script in 1.X.
if _VERSION.startswith('1.') and '_2.0' not in project_name:
CONSOLE_SCRIPTS.append(
'freeze_graph = tensorflow.python.tools.freeze_graph:run_main')
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib, 'tensorflow_core',
'include')
self.install_lib = self.install_platlib
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
install_dir = re.sub('/include/tensorflow_core/', '/include/tensorflow/',
install_dir)
# Copy external code headers into tensorflow_core/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
external_header_locations = [
'tensorflow_core/include/external/eigen_archive/',
'tensorflow_core/include/external/com_google_absl/',
]
for location in external_header_locations:
if location in install_dir:
extra_dir = install_dir.replace(location, '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for dirpath, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(dirpath, filename)
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
matches = []
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
)
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (
list(find_files('*.h', 'tensorflow_core/core')) +
list(find_files('*.h', 'tensorflow_core/stream_executor')) +
list(find_files('*.h', 'google/com_google_protobuf/src')) +
list(find_files('*.inc', 'google/com_google_protobuf/src')) +
list(find_files('*', 'third_party/eigen3')) + list(
find_files('*.h', 'tensorflow_core/include/external/com_google_absl')) +
list(
find_files('*.inc', 'tensorflow_core/include/external/com_google_absl'))
+ list(find_files('*', 'tensorflow_core/include/external/eigen_archive')))
setup(
name=project_name,
version=VERSION,
build_tag=os.getenv('CI_PIPELINE_ID', ""),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/',
download_url='https://github.com/tensorflow/tensorflow/tags',
author='Google Inc.',
author_email='packages@tensorflow.org',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
extras_require=EXTRA_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',
)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/pip_package/setup.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.