code stringlengths 17 6.64M |
|---|
def randomString(stringLength=5):
letters = string.ascii_lowercase
return [random.choice(letters) for i in range(stringLength)]
|
def do_decode(decoder, src_sentences, trgt_sentences=None, num_log=1):
all_hypos = []
start_time = time.time()
logging.info(('Start time: %s' % start_time))
for (sen_idx, src) in enumerate(src_sentences):
decoder.set_current_sen_id(sen_idx)
logging.info(('Next sentence (ID: %d): %s' % ((sen_idx + 1), ''.join(src))))
decoder.apply_predictor_count = 0
start_hypo_time = time.time()
hypos = decoder.decode(src)
all_hypos.append(hypos)
if (not hypos):
logging.error(('No translation found for ID %d!' % (sen_idx + 1)))
logging.info(('Stats (ID: %d): score=<not-found> num_expansions=%d time=%.2f' % ((sen_idx + 1), decoder.apply_predictor_count, (time.time() - start_hypo_time))))
hypos = [_generate_dummy_hypo()]
for logged_hypo in sorted(hypos, reverse=True)[:num_log]:
logging.info(('Decoded (ID: %d): %s' % ((sen_idx + 1), logged_hypo.trgt_sentence)))
logging.info(('Stats (ID: %d): score=%f inc=%f num_expansions=%d time=%.2f perplexity=%.2f' % ((sen_idx + 1), logged_hypo.total_score, logged_hypo.base_score, decoder.apply_predictor_count, (time.time() - start_hypo_time), utils.perplexity(logged_hypo.score_breakdown))))
return all_hypos
|
def compare_decoders(decoder1, decoder2, src_sentences, early_stopping=False, num_log=1):
all_hypos1 = do_decode(decoder1, src_sentences, num_log=num_log)
print('-------------------')
all_hypos2 = do_decode(decoder2, src_sentences, num_log=num_log)
for (sentences1, sentences2) in zip(all_hypos1, all_hypos2):
if early_stopping:
assert (max(sentences1).total_score == max(sentences2).total_score)
else:
for (sen1, sen2) in zip(sorted(sentences1), sorted(sentences2)):
assert (sen1.total_score == sen2.total_score)
logging.info('Sets returned are equal!')
|
def test_utils():
'\n TODO: add more tests\n '
from arsenal.maths import assert_equal
for (a, b) in np.random.uniform(0, 10, size=(100, 2)):
if (a < b):
(a, b) = (b, a)
want = np.log((a - b))
assert_equal(want, utils.log_minus(np.log(a), np.log(b)), 'log sub')
want = np.log((a + b))
assert_equal(want, utils.log_add(np.log(a), np.log(b)), 'log add')
|
def str2bool(v):
'For making the ``ArgumentParser`` understand boolean values'
return (v.lower() in ('yes', 'true', 't', '1'))
|
def run_diagnostics():
'Check availability of external libraries.'
OKGREEN = '\x1b[92m'
FAIL = '\x1b[91m'
ENDC = '\x1b[0m'
if (sys.version_info > (3, 0)):
print(('Checking Python3.... %sOK (%s)%s' % (OKGREEN, platform.python_version(), ENDC)))
else:
print(('Checking Python3.... %sNOT FOUND %s%s' % (FAIL, sys.version_info, ENDC)))
print('Please upgrade to Python 3!')
try:
import torch
print(('Checking PyTorch.... %sOK (%s)%s' % (OKGREEN, torch.__version__, ENDC)))
except ImportError:
print(('Checking PyTorch.... %sNOT FOUND%s' % (FAIL, ENDC)))
print('PyTorch is not available. This affects the following components: Predictors: fairseq, onmtpy. Check the documentation for further instructions.')
try:
import fairseq
print(('Checking fairseq.... %sOK (%s)%s' % (OKGREEN, fairseq.__version__, ENDC)))
except ImportError:
print(('Checking fairseq.... %sNOT FOUND%s' % (FAIL, ENDC)))
print('fairseq is not available. This affects the following components: Predictors: fairseq. Check the documentation for further instructions.')
|
def get_parser():
'Get the parser object which is used to build the configuration\n argument ``args``. This is a helper method for ``get_args()``\n TODO: Decentralize configuration\n \n Returns:\n ArgumentParser. The pre-filled parser object\n '
parser = argparse.ArgumentParser()
parser.register('type', 'bool', str2bool)
group = parser.add_argument_group('General options')
group.add_argument('--config_file', help='Configuration file in standard .ini format. NOTE: Configuration file overrides command line arguments')
group.add_argument('--run_diagnostics', default=False, action='store_true', help='Run diagnostics and check availability of external libraries.')
group.add_argument('--verbosity', default='info', choices=['debug', 'info', 'warn', 'error'], help='Log level: debug,info,warn,error')
group.add_argument('--range', default='', help='Defines the range of sentences to be processed. Syntax is equal to HiFSTs printstrings and lmerts idxrange parameter: <start-idx>:<end-idx> (both inclusive, start with 1). E.g. 2:5 means: skip the first sentence, process next 4 sentences. If this points to a file, we grap sentence IDs to translate from that file and delete the decoded IDs. This can be used for distributed decoding.')
group.add_argument('--input_file', default='', help="Path to source. This is expected to be a plain text file with one source sentence in each line. Words need to be appropriately formatted for the specified preprocessing procedure, e.g. use word IDs instead of their string representations if preprocessing set to 'id'")
group.add_argument('--trgt_file', default='', help="Path to target test set. This is expected to be a plain text file with one source sentence in each line. Words need to be appropriately formatted for the specified preprocessing procedure, e.g. use word IDs instead of their string representations if preprocessing set to 'id'")
group.add_argument('--indexing_scheme', default='fairseq', choices=['t2t', 'fairseq'], help="This parameter defines the reserved IDs.\n\n* 't2t': unk: 3, <s>: 2, </s>: 1.\n* 'fairseq': unk: 3, <s>: 0, </s>: 2.")
group.add_argument('--ignore_sanity_checks', default=False, type='bool', help='SGNMT terminates when a sanity check fails by default. Set this to true to ignore sanity checks.')
group.add_argument('--input_method', default='file', choices=['dummy', 'file', 'shell', 'stdin'], help="This parameter controls how the input to SGNMT is provided. SGNMT supports three modes:\n\n* 'dummy': Use dummy source sentences.\n* 'file': Read test sentences from a plain text filespecified by --src_test.\n* 'shell': Start SGNMT in an interactive shell.\n* 'stdin': Test sentences are read from stdin\n\n")
group.add_argument('--log_sum', default='log', choices=['tropical', 'log'], help="Controls how to compute the sum in the log space, i.e. how to compute log(exp(l1)+exp(l2)) for log values l1,l2.\n\n* 'tropical': approximate with max(l1,l2)\n* 'log': Use logsumexp in scipy")
group.add_argument('--n_cpu_threads', default=(- 1), type=int, help='Set the number of CPU threads.')
group = parser.add_argument_group('Decoding options')
group.add_argument('--decoder', default=None, choices=decoding.DECODER_REGISTRY.keys(), help='Strategy for traversing the search space which is spanned by the predictors.\n')
group.add_argument('--beam', default=0, type=int, help="Size of beam. For 'dijkstra' it limits the capacity of the queue. Use --beam 0 for unlimited capacity.")
group.add_argument('--allow_unk_in_output', default=True, type='bool', help='If false, remove all UNKs in the final posteriors. Predictor distributions can still produce UNKs, but they have to be replaced by other words by other predictors')
group.add_argument('--max_len_factor', default=2.0, type=float, help='Limits the length of hypotheses to avoid infinity loops in search strategies for unbounded search spaces. The length of any translation is limited to max_len_factor times the length of the source sentence.')
group.add_argument('--early_stopping', default=True, type='bool', help='Use this parameter if you are only interested in the first best decoding result. This option has a different effect depending on the used --decoder. For the beam decoder, it means stopping decoding when the best active hypothesis ends with </s>. If false, do not stop until all hypotheses end with EOS. For the dfs and restarting decoders, early stopping enables admissible pruning of branches when the accumulated score already exceeded the currently best score. DO NOT USE early stopping in combination with the dfs or restarting decoder when your predictors can produce positive scores!')
group.add_argument('--gumbel', action='store_true', default=False, help='Add gumbel random variable as in Kool et. al 2019. effectively makex beam search random sampling')
group.add_argument('--temperature', default=1.0, type=float, metavar='N', help='temperature for generation')
group.add_argument('--seed', default=0, type=int, help='Random seed to use for numpy operations')
group.add_argument('--length_normalization', default=False, action='store_true', help='Normalize hypothesis score by length. Parameter sets strength of penalty (should be positive to encourage UID)')
group.add_argument('--variance_regularizer', default=0.0, type=float, help='UID variance regularizer. Parameter sets strength of penalty (should be positive to encourage UID)')
group.add_argument('--greedy_regularizer', default=0.0, type=float, help='UID greedy regularizer. Parameter sets strength of penalty (should be positive to encourage UID)')
group.add_argument('--local_variance_regularizer', default=0.0, type=float, help='UID local variance regularizer. Parameter sets strength of penalty (should be positive to encourage UID)')
group.add_argument('--max_regularizer', default=0.0, type=float, help='UID max regularizer. Parameter sets strength of penalty (should be positive to encourage UID)')
group.add_argument('--square_regularizer', default=0.0, type=float, help='UID squared regularizer. Parameter sets strength of penalty (should be positive to encourage UID)')
group = parser.add_argument_group('Output options')
group.add_argument('--nbest', default=0, type=int, help='Maximum number of hypotheses in the output files. Set to 0 to output all hypotheses found by the decoder. If you use the beam or astar decoder, this option is limited by the beam size.')
group.add_argument('--num_log', default=1, type=int, help='Maximum number of hypotheses to log')
group.add_argument('--output_path', default='sgnmt-out.%s', help='Path to the output files generated by SGNMT. You can use the placeholder %%s for the format specifier')
group.add_argument('--outputs', default='', help="Comma separated list of output formats: \n\n* 'text': First best translations in plain text format\n* 'nbest': Moses' n-best format with separate scores for each predictor.\n* 'nbest_sep': nbest translations in plain text output to individual files based off of 'output_path'\n* 'score': writes scores of hypotheses to file; output is line-by-line.\n* 'ngram': MBR-style n-gram posteriors.\n\nFor extract_scores_along_reference.py, select one of the following output formats:\nThe path to the output files can be specified with --output_path")
group.add_argument('--remove_eos', default=True, type='bool', help='Whether to remove </S> symbol on output.')
group.add_argument('--src_wmap', default='', help='Path to the source side word map (Format: <word> <id>). See --preprocessing and --postprocessing for more details.')
group.add_argument('--trg_wmap', default='', help='Path to the source side word map (Format: <word> <id>). See --preprocessing and --postprocessing for more details.')
group.add_argument('--wmap', default='', help='Sets --src_wmap and --trg_wmap at the same time')
group.add_argument('--preprocessing', default='id', choices=['id', 'word', 'char', 'bpe', 'bpe@@'], help="Preprocessing strategy for source sentences.\n* 'id': Input sentences are expected in indexed representation (321 123 456 4444 ...).\n* 'word': Apply --src_wmap on the input.\n* 'char': Split into characters, then apply (character-level) --src_wmap.\n* 'bpe': Apply Sennrich's subword_nmt segmentation \nSGNMT style (as in $SGNMT/scripts/subword-nmt)\n* 'bpe@@': Apply Sennrich's subword_nmt segmentation with original default values (removing </w>, using @@ separator)\n")
group.add_argument('--postprocessing', default='id', choices=['id', 'word', 'bpe@@', 'wmap', 'char', 'subword_nmt', 'bpe_'], help='Postprocessing strategy for output sentences. See --preprocessing for more.')
group.add_argument('--bpe_codes', default='', help="Must be set if preprocessing=bpe. Path to the BPE codes file from Sennrich's subword_nmt.")
group.add_argument('--add_incomplete', default=False, type='bool', help='If nbest hypotheses are not found, add incomplete hypotheses to output')
group.add_argument('--no_statistics', default=False, action='store_true', help='Do not compute or print hypothesis UID statistics. Cannot be set if using UID decoding.')
group = parser.add_argument_group('General predictor options')
group.add_argument('--predictor', default='fairseq', help="Predictors are scoring modules which define a distribution over target words given the history and some side information like the source sentence. If vocabulary sizes differ among predictors, we fill in gaps with predictor UNK scores.:\n\n* 'fairseq': fairseq predictor.\n Options: fairseq_path, fairseq_user_dir, fairseq_lang_pair, n_cpu_threads")
return parser
|
def parse_args(parser):
(args, _) = parser.parse_known_args()
if (args.decoder is not None):
decoding.DECODER_REGISTRY[args.decoder].add_args(parser)
if (args.predictor is not None):
import predictors
predictors.PREDICTOR_REGISTRY[args.predictor].add_args(parser)
return parser.parse_args()
|
def get_args():
parser = get_parser()
args = parse_args(parser)
return args
|
def validate_args(args):
'Some rudimentary sanity checks for configuration options.\n This method directly prints help messages to the user. In case of fatal\n errors, it terminates using ``logging.fatal()``\n \n Args:\n args (object): Configuration as returned by ``get_args``\n '
if (args.range and (args.input_method == 'shell')):
logging.warn("The --range parameter can lead to unexpected behavior in 'shell' mode.")
sanity_check_failed = False
if ((args.input_method == 'dummy') and (args.max_len_factor < 10)):
logging.warn(('You are using the dummy input method but a low value for max_len_factor (%d). This means that decoding will not consider hypotheses longer than %d tokens. Consider increasing max_len_factor to the length longest relevant hypothesis' % (args.max_len_factor, args.max_len_factor)))
sanity_check_failed = True
if (('fairseq' in args.predictor) and (args.indexing_scheme != 'fairseq')):
logging.warn('You are using the fairseq predictor, but indexing_scheme is not set to fairseq.')
sanity_check_failed = True
if ((args.preprocessing != 'id') and (not args.wmap) and (not args.src_wmap)):
logging.warn('Your preprocessing method needs a source wmap.')
sanity_check_failed = True
if ((args.postprocessing != 'id') and (not args.wmap) and (not args.trg_wmap)):
logging.warn('Your postprocessing method needs a target wmap.')
sanity_check_failed = True
if (args.no_statistics and any([args.variance_regularizer, args.square_regularizer, args.max_regularizer, args.local_variance_regularizer, args.greedy_regularizer])):
logging.warn('Cannot turn off stats computation when using UID decoding')
sanity_check_failed = True
if (args.gumbel or ('sampling' in args.decoder)):
if (not args.nbest):
logging.warn('Must set nbest equivalent to number of desired samples when using gumbel or sampling decoders; beam size will not be used.')
sanity_check_failed = True
elif (args.beam == 0):
logging.warn('Beam size of 0 used. No hypotheses will be pruned!')
if (sanity_check_failed and (not args.ignore_sanity_checks)):
raise AttributeError('Sanity check failed (see warnings). If you want to proceed despite these warnings, use --ignore_sanity_checks.')
|
def switch_to_fairseq_indexing():
'Calling this method overrides the global definitions of the \n reserved word ids ``GO_ID``, ``EOS_ID``, and ``UNK_ID``\n with the fairseq indexing scheme. \n '
global GO_ID
global EOS_ID
global UNK_ID
GO_ID = 0
EOS_ID = 2
UNK_ID = 3
|
def switch_to_t2t_indexing():
'Calling this method overrides the global definitions of the \n reserved word ids ``GO_ID``, ``EOS_ID``, and ``UNK_ID``\n with the tensor2tensor indexing scheme. This scheme is used in all\n t2t models. \n '
global GO_ID
global EOS_ID
global UNK_ID
GO_ID = 2
EOS_ID = 1
UNK_ID = 3
|
def log_sum_tropical_semiring(vals):
'Approximates summation in log space with the max.\n \n Args:\n vals (set): List or set of numerical values\n '
return max(vals)
|
def log_sum_log_semiring(vals):
'Uses the ``logsumexp`` function in scipy to calculate the log of\n the sum of a set of log values.\n \n Args:\n vals (set): List or set of numerical values\n '
return logsumexp(np.asarray([val for val in vals]))
|
def oov_to_unk(seq, vocab_size, unk_idx=None):
if (unk_idx is None):
unk_idx = UNK_ID
return [(x if (x < vocab_size) else unk_idx) for x in seq]
|
def argmax_n(arr, n):
'Get indices of the ``n`` maximum entries in ``arr``. The \n parameter ``arr`` can be a dictionary. The returned index set is \n not guaranteed to be sorted.\n \n Args:\n arr (list,array,dict): Set of numerical values\n n (int): Number of values to retrieve\n \n Returns:\n List of indices or keys of the ``n`` maximum entries in ``arr``\n '
if isinstance(arr, dict):
return sorted(arr, key=arr.get, reverse=True)[:n]
elif (len(arr) <= n):
return range(len(arr))
elif (hasattr(arr, 'is_cuda') and arr.is_cuda):
return np.argpartition(arr.cpu(), (- n))[(- n):]
return np.argpartition(arr, (- n))[(- n):]
|
def max_(arr):
'Get indices of the ``n`` maximum entries in ``arr``. The \n parameter ``arr`` can be a dictionary. The returned index set is \n not guaranteed to be sorted.\n \n Args:\n arr (list,array,dict): Set of numerical values\n n (int): Number of values to retrieve\n \n Returns:\n List of indices or keys of the ``n`` maximum entries in ``arr``\n '
if isinstance(arr, dict):
return max(arr.values())
if isinstance(arr, list):
return max(arr)
return np.max(arr)
|
def argmax(arr):
'Get the index of the maximum entry in ``arr``. The parameter can\n be a dictionary.\n \n Args:\n arr (list,array,dict): Set of numerical values\n \n Returns:\n Index or key of the maximum entry in ``arr``\n '
if isinstance(arr, dict):
return max(arr.items(), key=operator.itemgetter(1))[0]
else:
return np.argmax(arr)
|
def flattened(X):
'flattens list of lists'
return [y for x in X for y in x]
|
def as_ndarray(X, pad=(- 1), min_length=0):
'turns list of lists into ndarray'
longest = max(len(max(X, key=len)), min_length)
return np.array([(i + ([pad] * (longest - len(i)))) for i in X])
|
def log1mexp_basic(x, ignore_zero=False):
'\n Vectorizable implementation of log(1-exp(x))\n '
if ignore_zero:
with np.errstate(divide='ignore'):
return np.log1p((- np.exp(x)))
return np.log1p((- np.exp(x)))
|
def log1pexp_basic(x, ignore_zero=False):
'\n Vectorizable implementation of log(1+exp(x))\n '
if ignore_zero:
with np.errstate(divide='ignore'):
return np.log1p(np.exp(x))
return np.log1p(np.exp(x))
|
def log1pexp(x):
'\n Numerically stable implementation of log(1+exp(x)) aka softmax(0,x).\n\n -log1pexp(-x) is log(sigmoid(x))\n\n Source:\n http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf\n '
if (x <= (- 37)):
return np.exp(x)
elif ((- 37) <= x <= 18):
return np.log1p(np.exp(x))
elif (18 < x <= 33.3):
return (x + np.exp((- x)))
else:
return x
|
def log1mexp(x):
'\n Numerically stable implementation of log(1-exp(x))\n\n Note: function is finite for x < 0.\n\n Source:\n http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf\n '
if (x >= 0):
return np.nan
else:
a = abs(x)
if (0 < a <= 0.693):
return np.log((- np.expm1((- a))))
else:
return np.log1p((- np.exp((- a))))
|
def log_add(x, y):
'\n Addition of 2 values in log space.\n Need separate checks for inf because inf-inf=nan\n '
if (x == NEG_INF):
return y
elif (y == NEG_INF):
return x
else:
if (y <= x):
d = (y - x)
r = x
else:
d = (x - y)
r = y
return (r + log1pexp(d))
|
def log_minus(x, y):
'\n Subtractioon of 2 values in log space.\n Need separate checks for inf because inf-inf=nan\n '
if (x == y):
return NEG_INF
if (y > x):
if ((y - x) > MACHINE_EPS):
logging.warn('Using function log_minus for invalid values')
return np.nan
else:
return (x + log1mexp((y - x)))
|
def logsigmoid(x):
'\n log(sigmoid(x)) = -log(1+exp(-x)) = -log1pexp(-x)\n '
return (- log1pexp((- x)))
|
def signed_log_add(x, y, sign_x, sign_y):
(a, b) = (x, y)
(sign_a, sign_b) = (sign_x, sign_y)
if (y > x):
(a, b) = (y, x)
(sign_a, sign_b) = (sign_y, sign_x)
if (sign_a != sign_b):
val = log_minus(a, b)
else:
val = log_add(a, b)
return (sign_a, val)
|
def softmax(x, temperature=1.0):
return np.exp(log_softmax(x, temperature=temperature))
|
def log_softmax(x, temperature=1.0):
x = (x / temperature)
shift_x = (x - np.max(x))
b = (~ np.ma.masked_invalid(shift_x).mask).astype(int)
return (shift_x - logsumexp(shift_x, b=b))
|
def binary_search(a, x):
i = bisect_left(a, x)
if ((i != len(a)) and (a[i] == x)):
return i
else:
return (- 1)
|
def perplexity(arr):
if (len(arr) == 0):
return INF
score = sum([s for s in arr])
return (2 ** ((- score) / len(arr)))
|
def prod(iterable):
return reduce(operator.mul, iterable, 1.0)
|
def common_viewkeys(obj):
'Can be used to iterate over the keys or indices of a mapping.\n Works with numpy arrays, lists, and dicts. Code taken from\n http://stackoverflow.com/questions/12325608/iterate-over-a-dict-or-list-in-python\n '
if isinstance(obj, dict):
return obj.keys()
else:
return range(len(obj))
|
def common_iterable(obj):
'Can be used to iterate over the key-value pairs of a mapping.\n Works with numpy arrays, lists, and dicts. Code taken from\n http://stackoverflow.com/questions/12325608/iterate-over-a-dict-or-list-in-python\n '
if isinstance(obj, dict):
for (key, value) in obj.items():
(yield (key, value))
else:
for (index, value) in enumerate(obj):
(yield (index, value))
|
def common_get(obj, key, default):
'Can be used to access an element via the index or key.\n Works with numpy arrays, lists, and dicts.\n \n Args:\n ``obj`` (list,array,dict): Mapping\n ``key`` (int): Index or key of the element to retrieve\n ``default`` (object): Default return value if ``key`` not found\n \n Returns:\n ``obj[key]`` if ``key`` in ``obj``, otherwise ``default``\n '
if isinstance(obj, dict):
return obj.get(key, default)
else:
return (obj[key] if (key < len(obj)) else default)
|
def common_contains(obj, key):
'Checks the existence of a key or index in a mapping.\n Works with numpy arrays, lists, and dicts.\n \n Args:\n ``obj`` (list,array,dict): Mapping\n ``key`` (int): Index or key of the element to retrieve\n \n Returns:\n ``True`` if ``key`` in ``obj``, otherwise ``False``\n '
if isinstance(obj, dict):
return (key in obj)
else:
return (key < len(obj))
|
def get_path(tmpl, sub=1):
'Replaces the %d placeholder in ``tmpl`` with ``sub``. If ``tmpl``\n does not contain %d, return ``tmpl`` unmodified.\n \n Args:\n tmpl (string): Path, potentially with %d placeholder\n sub (int): Substitution for %d\n \n Returns:\n string. ``tmpl`` with %d replaced with ``sub`` if present\n '
try:
return (tmpl % sub)
except TypeError:
pass
return tmpl
|
def split_comma(s, func=None):
'Splits a string at commas and removes blanks.'
if (not s):
return []
parts = s.split(',')
if (func is None):
return [el.strip() for el in parts]
return [func(el.strip()) for el in parts]
|
def ngrams(sen, n):
sen = sen.split(' ')
output = []
for i in range(((len(sen) - n) + 1)):
output.append(tuple(sen[i:(i + n)]))
return output
|
def distinct_ngrams(hypos, n):
total_ngrams = 0
distinct = []
for h in hypos:
all_ngrams = ngrams(h, n)
total_ngrams += len(all_ngrams)
distinct.extend(all_ngrams)
if (len(distinct) == 0):
return 0
return (float(len(set(distinct))) / len(distinct))
|
def ngram_diversity(hypos):
ds = [distinct_ngrams(hypos, i) for i in range(1, 5)]
return (sum(ds) / 4)
|
def hamming_distance(hypo, other_hypos, pad=(- 1)):
if isinstance(other_hypos, np.ndarray):
if (len(hypo) != other_hypos.shape[1]):
hypo = np.array((hypo + ([pad] * (other_hypos.shape[1] - len(hypo)))))
return (hypo != other_hypos).sum()
elif isinstance(other_hypos, list):
distance = 0
for h in other_hypos:
(smaller, larger) = (min(len(h), len(hypo)), max(len(h), len(hypo)))
distance += ((larger - smaller) + sum([(a != b) for (a, b) in zip(h[:smaller], hypo[:smaller])]))
return distance
else:
logging.warn(('No implementation for type: ' + str(type(other_hypos))))
|
def sentence_bleu(sentence, reference, detokenizer=None):
'\n Utility function for calculating sentence BLEU. \n Expects sentence and reference as list of tokens.\n Reference may be list of multiple references\n '
if (not isinstance(reference[0], list)):
reference = [reference]
if (detokenizer is not None):
sentence = detokenizer(sentence.split())
reference = [detokenizer(r.split()) for r in reference]
return sacrebleu.sentence_bleu(sentence, [reference]).score
|
def entropy(distribution, base=np.e):
return (- sum((distribution * np.log(distribution, base=base))))
|
def log_entropy(log_distribution, base=np.e):
return (- sum(((base ** log_distribution) * log_distribution)))
|
class Discriminator(object):
def __init__(self, encoder_rnn_output, temperature, is_training=True, ru=False):
with tf.variable_scope('Discriminator_input'):
self.encoder_rnn_output = encoder_rnn_output
self.temperature = temperature
self.is_training = is_training
with tf.variable_scope('discriminator_linear1'):
discriminator_W1 = tf.get_variable(name='discriminator_W1', shape=(FLAGS.RNN_SIZE, 100), dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
discriminator_b1 = tf.get_variable(name='discriminator_b1', shape=100, dtype=tf.float32)
with tf.variable_scope('discriminator_linear2'):
discriminator_W2 = tf.get_variable(name='discriminator_W2', shape=(100, FLAGS.LABEL_CLASS), dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
discriminator_b2 = tf.get_variable(name='discriminator_b2', shape=FLAGS.LABEL_CLASS, dtype=tf.float32)
with tf.name_scope('hidden'):
h = tf.nn.relu((tf.matmul(self.encoder_rnn_output, discriminator_W1) + discriminator_b1))
with tf.name_scope('discriminator_output'):
self.discriminator_logits = (tf.matmul(h, discriminator_W2) + discriminator_b2)
self.discriminator_predict = tf.stop_gradient(tf.argmax(self.discriminator_logits, 1))
self.discriminator_prob = tf.nn.softmax(self.discriminator_logits, name='discriminator_softmax')
with tf.name_scope('sampling'):
self.discriminator_sampling_onehot = self.gumbel_softmax(self.discriminator_logits, self.temperature)
def gumbel_softmax(self, logits, temperature, dim=(- 1)):
u = tf.random_uniform((FLAGS.BATCH_SIZE, FLAGS.LABEL_CLASS), minval=np.finfo(np.float32).tiny)
g = (- tf.log((- tf.log(u))))
onehot = tf.nn.softmax(tf.div((logits + g), temperature), dim=dim, name='discriminator_gumbel_softmax')
return onehot
|
class Encoder_cvae(object):
def __init__(self, embedding, encoder_input_list, is_training=True, ru=False):
with tf.name_scope('encoder_input'):
self.embedding = embedding
self.encoder_input_list = encoder_input_list
self.is_training = is_training
with tf.variable_scope('encoder_rnn'):
with tf.variable_scope('rnn_input_weight'):
self.rnn_input_W = tf.get_variable(name='rnn_input_W', shape=(FLAGS.EMBED_SIZE, FLAGS.RNN_SIZE), dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
self.rnn_input_b = tf.get_variable(name='rnn_input_b', shape=FLAGS.RNN_SIZE, dtype=tf.float32)
with tf.variable_scope('encoder_rnn'):
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(FLAGS.RNN_SIZE)
if self.is_training:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=FLAGS.ENCODER_DROPOUT_KEEP)
self.cell = tf.contrib.rnn.MultiRNNCell(([cell] * FLAGS.RNN_NUM))
self.init_states = [cell.zero_state(FLAGS.BATCH_SIZE, tf.float32) for _ in range(FLAGS.RNN_NUM)]
self.states = [tf.placeholder(tf.float32, FLAGS.BATCH_SIZE, name='state') for _ in range(FLAGS.RNN_NUM)]
with tf.name_scope('encoder_rnn_output'):
self.encoder_rnn_output = self.rnn_train_predict()
def rnn_train_predict(self):
pred = []
state = self.init_states
for i in range(FLAGS.SEQ_LEN):
with tf.name_scope('encoder_input_embedding'):
encoder_input = self.encoder_input_list[i]
encoder_input_embedding = tf.nn.embedding_lookup(self.embedding, encoder_input)
assert (encoder_input_embedding.shape == (FLAGS.BATCH_SIZE, FLAGS.EMBED_SIZE))
with tf.name_scope('rnn_input'):
rnn_input = tf.nn.relu((tf.matmul(encoder_input_embedding, self.rnn_input_W) + self.rnn_input_b))
assert (rnn_input.shape == (FLAGS.BATCH_SIZE, FLAGS.RNN_SIZE))
with tf.name_scope('rnn_predict'):
(step_pred, state) = self.cell(rnn_input, state)
assert (state[(- 1)][1].shape == (FLAGS.BATCH_SIZE, FLAGS.RNN_SIZE))
assert (step_pred.shape == (FLAGS.BATCH_SIZE, FLAGS.RNN_SIZE))
pred.append(step_pred)
return state[(- 1)][1]
|
class Sampler(object):
def __init__(self, encoder_rnn_output, label_onehot, is_training=True):
self.encoder_rnn_output = encoder_rnn_output
self.label_onehot = label_onehot
self.is_training = is_training
with tf.variable_scope('encoder_linear1'):
context_to_hidden_W = tf.get_variable(name='context_to_hidden_W', shape=[(FLAGS.RNN_SIZE + FLAGS.LABEL_CLASS), 100], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
context_to_hidden_b = tf.get_variable(name='context_to_hidden_b', shape=[100], dtype=tf.float32)
with tf.variable_scope('encoder_linear2'):
context_to_mu_W = tf.get_variable(name='context_to_mu_W', shape=[100, FLAGS.LATENT_VARIABLE_SIZE], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
context_to_mu_b = tf.get_variable(name='context_to_mu_b', shape=[FLAGS.LATENT_VARIABLE_SIZE], dtype=tf.float32)
context_to_logvar_W = tf.get_variable(name='context_to_logvar_W', shape=[100, FLAGS.LATENT_VARIABLE_SIZE], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
context_to_logvar_b = tf.get_variable(name='context_to_logvar_b', shape=[FLAGS.LATENT_VARIABLE_SIZE], dtype=tf.float32)
with tf.name_scope('rnn_output_and_label'):
rnn_output_and_label = tf.concat((encoder_rnn_output, self.label_onehot), axis=1, name='concat_encoder_rnn_output_and_label')
with tf.name_scope('sampler_hiddenstate'):
h = tf.nn.relu((tf.matmul(rnn_output_and_label, context_to_hidden_W) + context_to_hidden_b))
with tf.name_scope('mu'):
self.mu = (tf.matmul(h, context_to_mu_W) + context_to_mu_b)
with tf.name_scope('log_var'):
self.logvar = (tf.matmul(h, context_to_logvar_W) + context_to_logvar_b)
with tf.name_scope('z'):
z = tf.truncated_normal((FLAGS.BATCH_SIZE, FLAGS.LATENT_VARIABLE_SIZE), stddev=1.0)
with tf.name_scope('latent_variables'):
self.latent_variables = (self.mu + (tf.exp((0.5 * self.logvar)) * z))
|
class Encoder_vae(object):
def __init__(self, embedding, encoder_input_list, is_training=True, ru=False):
with tf.variable_scope('Encoder_input'):
self.embedding = embedding
self.encoder_input_list = encoder_input_list
self.is_training = is_training
with tf.variable_scope('encoder_rnn'):
with tf.variable_scope('rnn_input_weight'):
self.rnn_input_W = tf.get_variable(name='rnn_input_W', shape=(FLAGS.EMBED_SIZE, FLAGS.RNN_SIZE), dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
self.rnn_input_b = tf.get_variable(name='rnn_input_b', shape=FLAGS.RNN_SIZE, dtype=tf.float32)
with tf.variable_scope('encoder_rnn'):
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(FLAGS.RNN_SIZE)
if self.is_training:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=FLAGS.ENCODER_DROPOUT_KEEP)
self.cell = tf.contrib.rnn.MultiRNNCell(([cell] * FLAGS.RNN_NUM))
self.init_states = [cell.zero_state(FLAGS.BATCH_SIZE, tf.float32) for _ in range(FLAGS.RNN_NUM)]
self.states = [tf.placeholder(tf.float32, FLAGS.BATCH_SIZE, name='state') for _ in range(FLAGS.RNN_NUM)]
with tf.variable_scope('encoder_rnn_output'):
self.encoder_rnn_output = self.rnn_train_predict()
with tf.variable_scope('encoder_linear1'):
context_to_hidden_W = tf.get_variable(name='context_to_hidden_W', shape=[FLAGS.RNN_SIZE, 100], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
context_to_hidden_b = tf.get_variable(name='context_to_hidden_b', shape=[100], dtype=tf.float32)
with tf.variable_scope('encoder_linear2'):
context_to_mu_W = tf.get_variable(name='context_to_mu_W', shape=[100, FLAGS.LATENT_VARIABLE_SIZE], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
context_to_mu_b = tf.get_variable(name='context_to_mu_b', shape=[FLAGS.LATENT_VARIABLE_SIZE], dtype=tf.float32)
context_to_logvar_W = tf.get_variable(name='context_to_logvar_W', shape=[100, FLAGS.LATENT_VARIABLE_SIZE], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
context_to_logvar_b = tf.get_variable(name='context_to_logvar_b', shape=[FLAGS.LATENT_VARIABLE_SIZE], dtype=tf.float32)
with tf.name_scope('hiddenstate'):
h = tf.nn.relu((tf.matmul(self.encoder_rnn_output, context_to_hidden_W) + context_to_hidden_b))
with tf.name_scope('mu'):
self.mu = (tf.matmul(h, context_to_mu_W) + context_to_mu_b)
with tf.name_scope('log_var'):
self.logvar = (tf.matmul(h, context_to_logvar_W) + context_to_logvar_b)
with tf.name_scope('z'):
z = tf.truncated_normal((FLAGS.BATCH_SIZE, FLAGS.LATENT_VARIABLE_SIZE), stddev=1.0)
with tf.name_scope('latent_variables'):
self.latent_variables = (self.mu + (tf.exp((0.5 * self.logvar)) * z))
def rnn_train_predict(self):
pred = []
state = self.init_states
for i in range(FLAGS.SEQ_LEN):
with tf.name_scope('encoder_input_embedding'):
encoder_input = self.encoder_input_list[i]
encoder_input_embedding = tf.nn.embedding_lookup(self.embedding, encoder_input)
assert (encoder_input_embedding.shape == (FLAGS.BATCH_SIZE, FLAGS.EMBED_SIZE))
with tf.name_scope('rnn_input'):
rnn_input = tf.nn.relu((tf.matmul(encoder_input_embedding, self.rnn_input_W) + self.rnn_input_b))
assert (rnn_input.shape == (FLAGS.BATCH_SIZE, FLAGS.RNN_SIZE))
with tf.name_scope('rnn_predict'):
(step_pred, state) = self.cell(rnn_input, state)
assert (state[(- 1)][1].shape == (FLAGS.BATCH_SIZE, FLAGS.RNN_SIZE))
assert (step_pred.shape == (FLAGS.BATCH_SIZE, FLAGS.RNN_SIZE))
pred.append(step_pred)
return state[(- 1)][1]
|
class Semi_VAE(object):
def __init__(self, batchloader, is_training=True, without_label=False, ru=False):
self.batchloader = batchloader
self.ru = ru
self.is_training = is_training
self.without_label = without_label
self.lr = tf.placeholder(tf.float32, shape=(), name='learning_rate')
self.gumbel_temperature = tf.placeholder(tf.float32, shape=(), name='gumbel_temperature')
with tf.name_scope('Placeholders'):
self.encoder_input = tf.placeholder(tf.int64, shape=(FLAGS.BATCH_SIZE, FLAGS.SEQ_LEN), name='encoder_input')
self.decoder_input = tf.placeholder(tf.int64, shape=(FLAGS.BATCH_SIZE, FLAGS.SEQ_LEN), name='decoder_input')
self.target = tf.placeholder(tf.int64, shape=(FLAGS.BATCH_SIZE, FLAGS.SEQ_LEN), name='target')
encoder_input_t = tf.transpose(self.encoder_input, perm=[1, 0])
self.encoder_input_list = []
decoder_input_t = tf.transpose(self.decoder_input, perm=[1, 0])
self.decoder_input_list = []
target_t = tf.transpose(self.target, perm=[1, 0])
self.target_list = []
self.step = tf.placeholder(tf.float32, shape=(), name='step')
for i in range(FLAGS.SEQ_LEN):
self.encoder_input_list.append(encoder_input_t[i])
assert (self.encoder_input_list[i].shape == FLAGS.BATCH_SIZE)
self.decoder_input_list.append(decoder_input_t[i])
assert (self.decoder_input_list[i].shape == FLAGS.BATCH_SIZE)
self.target_list.append(target_t[i])
assert (self.target_list[i].shape == FLAGS.BATCH_SIZE)
if (not without_label):
self.label = tf.placeholder(tf.int64, shape=FLAGS.BATCH_SIZE, name='label')
self.label_onehot = tf.one_hot(self.label, FLAGS.LABEL_CLASS, name='label_onehot')
assert (self.label_onehot.shape == (FLAGS.BATCH_SIZE, FLAGS.LABEL_CLASS))
with tf.variable_scope('Embedding'):
self.embedding = tf.get_variable(name='embedding', shape=[FLAGS.VOCAB_SIZE, FLAGS.EMBED_SIZE], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
with tf.variable_scope('Encoder'):
self.encoder = Encoder[FLAGS.ENCODER_NAME](self.embedding, self.encoder_input_list, is_training=self.is_training, ru=self.ru)
with tf.variable_scope('Discriminator'):
self.discriminator = Discriminator(self.encoder.encoder_rnn_output, self.gumbel_temperature)
if self.without_label:
self.label_onehot = self.discriminator.discriminator_sampling_onehot
assert (self.label_onehot.shape == (FLAGS.BATCH_SIZE, FLAGS.LABEL_CLASS))
with tf.name_scope('Latent_variables'):
self.sampler = Sampler(self.encoder.encoder_rnn_output, self.label_onehot, is_training=self.is_training)
if self.is_training:
self.latent_variables = self.sampler.latent_variables
else:
self.latent_variables = tf.placeholder(tf.float32, shape=(FLAGS.BATCH_SIZE, FLAGS.LATENT_VARIABLE_SIZE), name='latent_variables_input')
with tf.variable_scope('Decoder'):
self.decoder = Decoder[FLAGS.DECODER_NAME](self.decoder_input, self.latent_variables, self.label_onehot, self.embedding, self.batchloader, is_training=self.is_training, ru=self.ru)
with tf.name_scope('Loss'):
if (not self.without_label):
discriminator_correct = tf.equal(self.discriminator.discriminator_predict, self.label)
self.discriminator_accuracy = tf.reduce_mean(tf.cast(discriminator_correct, tf.float32))
self.discriminator_loss = (tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.discriminator.discriminator_logits, labels=self.label, name='labeled_discriminator_cross_entropy')) * FLAGS.SEQ_LEN)
else:
true_y = tf.fill([FLAGS.BATCH_SIZE, FLAGS.LABEL_CLASS], (1 / FLAGS.LABEL_CLASS), name='true_y_distribution')
self.kld2 = tf.reduce_mean(tf.reduce_sum((self.discriminator.discriminator_prob * (tf.log((self.discriminator.discriminator_prob + 1e-06)) - tf.log(true_y))), axis=1))
self.logits = self.decoder.logits
self.kld = tf.reduce_mean(((- 0.5) * tf.reduce_sum((((self.sampler.logvar - tf.square(self.sampler.mu)) - tf.exp(self.sampler.logvar)) + 1), axis=1)))
self.kld_weight = tf.clip_by_value((FLAGS.INIT_KLD_WEIGHT + (((1 - FLAGS.INIT_KLD_WEIGHT) * (self.step - FLAGS.KLD_ANNEAL_START)) / (FLAGS.KLD_ANNEAL_END - FLAGS.KLD_ANNEAL_START))), 0, 1)
reconst_losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets) for (logits, targets) in zip(self.logits, self.target_list)]
self.reconst_loss = (tf.reduce_mean(reconst_losses) * FLAGS.SEQ_LEN)
if (not self.without_label):
self.loss = (((self.reconst_loss + (self.kld_weight * self.kld)) + tf.log((1 / FLAGS.LABEL_CLASS))) + self.discriminator_loss)
else:
self.loss = ((self.reconst_loss + (self.kld_weight * self.kld)) + self.kld2)
with tf.name_scope('Summary'):
if (self.is_training and (not self.without_label)):
reconst_loss_summary = tf.summary.scalar('labeled_reconst_loss', self.reconst_loss, family='train_loss')
kld_summary = tf.summary.scalar('labeled_kld', self.kld, family='kld')
disc_loss_summary = tf.summary.scalar('labeled_disc_train_loss', self.discriminator_loss, family='disc_loss')
disc_acc_summary = tf.summary.scalar('labeled_disc_train_acc', self.discriminator_accuracy, family='disc_acc')
kld_weight_summary = tf.summary.scalar('kld_weight', self.kld_weight, family='parameters')
mu_summary = tf.summary.histogram('labeled_mu', tf.reduce_mean(self.sampler.mu, 0))
var_summary = tf.summary.histogram('labeled_var', tf.reduce_mean(tf.exp(self.sampler.logvar), 0))
lr_summary = tf.summary.scalar('lr', self.lr, family='parameters')
self.merged_summary = tf.summary.merge([reconst_loss_summary, kld_summary, disc_loss_summary, disc_acc_summary, kld_weight_summary, mu_summary, var_summary, lr_summary])
elif (self.is_training and self.without_label):
reconst_loss_summary = tf.summary.scalar('unlabeled_reconst_loss', self.reconst_loss, family='train_loss')
kld_summary = tf.summary.scalar('unlabeled_kld', self.kld, family='kld')
gumbel_summary = tf.summary.scalar('gumbel_temperature', self.gumbel_temperature, family='parameters')
kld2_summary = tf.summary.scalar('unlabeled_kld2', self.kld2, family='kld')
mu_summary = tf.summary.histogram('unlabeled_mu', tf.reduce_mean(self.sampler.mu, 0))
var_summary = tf.summary.histogram('unlabeled_var', tf.reduce_mean(tf.exp(self.sampler.logvar), 0))
self.merged_summary = tf.summary.merge([reconst_loss_summary, kld_summary, gumbel_summary, kld2_summary, mu_summary, var_summary])
else:
valid_reconst_loss_summary = tf.summary.scalar('valid_reconst_loss', self.reconst_loss, family='valid_loss')
disc_loss_summary = tf.summary.scalar('disc_valid_loss', self.discriminator_loss, family='disc_loss')
disc_acc_summary = tf.summary.scalar('disc_valid_acc', self.discriminator_accuracy, family='disc_acc')
self.merged_summary = tf.summary.merge([valid_reconst_loss_summary, disc_loss_summary, disc_acc_summary])
if self.is_training:
tvars = tf.trainable_variables()
with tf.name_scope('Optimizer'):
tvars = tf.trainable_variables()
(grads, _) = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), FLAGS.MAX_GRAD)
optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.5)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
|
class Simple_VAE(object):
def __init__(self, batchloader, is_training=True, ru=False):
self.batchloader = batchloader
self.ru = ru
self.is_training = is_training
self.lr = tf.placeholder(tf.float32, shape=(), name='learning_rate')
with tf.name_scope('Placeholders'):
self.encoder_input = tf.placeholder(tf.int64, shape=(FLAGS.BATCH_SIZE, FLAGS.SEQ_LEN), name='encoder_input')
self.decoder_input = tf.placeholder(tf.int64, shape=(FLAGS.BATCH_SIZE, FLAGS.SEQ_LEN), name='decoder_input')
self.target = tf.placeholder(tf.int64, shape=(FLAGS.BATCH_SIZE, FLAGS.SEQ_LEN), name='target')
encoder_input_t = tf.transpose(self.encoder_input, perm=[1, 0])
self.encoder_input_list = []
decoder_input_t = tf.transpose(self.decoder_input, perm=[1, 0])
self.decoder_input_list = []
target_t = tf.transpose(self.target, perm=[1, 0])
self.target_list = []
self.step = tf.placeholder(tf.float32, shape=(), name='step')
for i in range(FLAGS.SEQ_LEN):
self.encoder_input_list.append(encoder_input_t[i])
assert (self.encoder_input_list[i].shape == FLAGS.BATCH_SIZE)
self.decoder_input_list.append(decoder_input_t[i])
assert (self.decoder_input_list[i].shape == FLAGS.BATCH_SIZE)
self.target_list.append(target_t[i])
assert (self.target_list[i].shape == FLAGS.BATCH_SIZE)
with tf.variable_scope('Embedding'):
self.embedding = tf.get_variable(name='embedding', shape=[FLAGS.VOCAB_SIZE, FLAGS.EMBED_SIZE], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.1))
with tf.variable_scope('Encoder'):
self.encoder = Encoder[FLAGS.ENCODER_NAME](self.embedding, self.encoder_input_list, is_training=self.is_training, ru=self.ru)
with tf.name_scope('Latent_variables'):
if self.is_training:
self.latent_variables = self.encoder.latent_variables
else:
self.latent_variables = tf.placeholder(tf.float32, shape=(FLAGS.BATCH_SIZE, FLAGS.LATENT_VARIABLE_SIZE), name='latent_variables_input')
with tf.variable_scope('Decoder'):
self.decoder = Decoder[FLAGS.DECODER_NAME](self.decoder_input, self.latent_variables, self.embedding, self.batchloader, is_training=self.is_training, ru=self.ru)
with tf.name_scope('Loss'):
self.logits = self.decoder.logits
self.kld = tf.reduce_mean(((- 0.5) * tf.reduce_sum((((self.encoder.logvar - tf.square(self.encoder.mu)) - tf.exp(self.encoder.logvar)) + 1), axis=1)))
self.kld_weight = tf.clip_by_value((FLAGS.INIT_KLD_WEIGHT + (((1 - FLAGS.INIT_KLD_WEIGHT) * (self.step - FLAGS.KLD_ANNEAL_START)) / (FLAGS.KLD_ANNEAL_END - FLAGS.KLD_ANNEAL_START))), 0, 1)
reconst_losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets) for (logits, targets) in zip(self.logits, self.target_list)]
self.reconst_loss = (tf.reduce_mean(reconst_losses) * FLAGS.SEQ_LEN)
self.loss = (self.reconst_loss + (self.kld_weight * self.kld))
with tf.name_scope('Summary'):
if is_training:
loss_summary = tf.summary.scalar('loss', self.loss, family='train_loss')
reconst_loss_summary = tf.summary.scalar('reconst_loss', self.reconst_loss, family='train_loss')
kld_summary = tf.summary.scalar('kld', self.kld, family='kld')
kld_weight_summary = tf.summary.scalar('kld_weight', self.kld_weight, family='parameters')
mu_summary = tf.summary.histogram('mu', tf.reduce_mean(self.encoder.mu, 0))
var_summary = tf.summary.histogram('var', tf.reduce_mean(tf.exp(self.encoder.logvar), 0))
lr_summary = tf.summary.scalar('lr', self.lr, family='parameters')
self.merged_summary = tf.summary.merge([loss_summary, reconst_loss_summary, kld_summary, kld_weight_summary, mu_summary, var_summary, lr_summary])
else:
valid_reconst_loss_summary = tf.summary.scalar('valid_reconst_loss', self.reconst_loss, family='valid_loss')
self.merged_summary = tf.summary.merge([valid_reconst_loss_summary])
if self.is_training:
tvars = tf.trainable_variables()
with tf.name_scope('Optimizer'):
tvars = tf.trainable_variables()
(grads, _) = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), FLAGS.MAX_GRAD)
optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.5)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
|
def sampling():
batchloader = BatchLoader(with_label=True)
sess_conf = tf.ConfigProto(gpu_options=tf.GPUOptions())
with tf.Graph().as_default():
with tf.Session(config=sess_conf) as sess:
with tf.variable_scope('VAE'):
vae_restored = VAE[FLAGS.VAE_NAME](batchloader, is_training=False, ru=False)
saver = tf.train.Saver()
saver.restore(sess, (MODEL_DIR + '/model40.ckpt'))
itr = (SAMPLE_NUM // FLAGS.BATCH_SIZE)
res = (SAMPLE_NUM - (itr * FLAGS.BATCH_SIZE))
generated_texts = []
lv_list = []
for i in range((itr + 1)):
z = np.random.normal(loc=0.0, scale=1.0, size=[FLAGS.BATCH_SIZE, FLAGS.LATENT_VARIABLE_SIZE])
sample_logits = sess.run(vae_restored.logits, feed_dict={vae_restored.latent_variables: z})
lv_list.extend(z)
if (i == itr):
sample_num = res
else:
sample_num = FLAGS.BATCH_SIZE
sample_texts = batchloader.logits2str(logits=sample_logits, sample_num=sample_num)
generated_texts.extend(sample_texts)
for i in range(SAMPLE_NUM):
log_and_print(SAVE_FILE, generated_texts[i])
|
def log_and_print(log_file, logstr, br=True):
print(logstr)
if br:
logstr = (logstr + '\n')
with open(log_file, 'a') as f:
f.write(logstr)
|
def main():
os.mkdir(FLAGS.LOG_DIR)
os.mkdir((FLAGS.LOG_DIR + '/model'))
log_file = (FLAGS.LOG_DIR + '/log.txt')
shutil.copyfile('config.py', (FLAGS.LOG_DIR + '/config.py'))
shutil.copyfile('README.md', (FLAGS.LOG_DIR + '/README.md'))
sess_conf = tf.ConfigProto(gpu_options=tf.GPUOptions())
with tf.Graph().as_default():
with tf.Session(config=sess_conf) as sess:
batchloader = BatchLoader(with_label=True)
with tf.variable_scope('VAE'):
vae_labeled = VAE[FLAGS.VAE_NAME](batchloader, is_training=True, without_label=False, ru=False)
with tf.variable_scope('VAE', reuse=True):
vae_unlabeled = VAE[FLAGS.VAE_NAME](batchloader, is_training=True, without_label=True, ru=True)
with tf.variable_scope('VAE', reuse=True):
vae_test = VAE[FLAGS.VAE_NAME](batchloader, is_training=False, without_label=False, ru=True)
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(FLAGS.LOG_DIR, sess.graph)
sess.run(tf.global_variables_initializer())
log_and_print(log_file, 'start training')
loss_sum = []
labeled_kld_sum = []
labeled_reconst_loss_sum = []
discriminator_loss_sum = []
unlabeled_kld_sum = []
unlabeled_reconst_loss_sum = []
lr = FLAGS.LEARNING_RATE
step = 0
gumbel_temperature = 1.0
for epoch in range(FLAGS.EPOCH):
log_and_print(log_file, ('epoch %d' % (epoch + 1)))
if ((epoch >= FLAGS.LR_DECAY_START) and ((epoch % 2) == 0)):
lr *= 0.5
for batch in range(FLAGS.BATCHES_PER_EPOCH):
step += 1
if ((step % 100) == 99):
gumbel_temperature = max(0.5, np.exp(((- 1e-05) * step)))
(labeled_encoder_input, labeled_decoder_input, labeled_target, label, unlabeled_encoder_input, unlabeled_decoder_input, unlabeled_target) = batchloader.next_batch(FLAGS.BATCH_SIZE, 'train')
labeled_feed_dict = {vae_labeled.encoder_input: labeled_encoder_input, vae_labeled.decoder_input: labeled_decoder_input, vae_labeled.target: labeled_target, vae_labeled.label: label, vae_labeled.step: step, vae_labeled.lr: lr}
(labeled_logits, labeled_loss, labeled_reconst_loss, labeled_kld, discriminator_loss, discriminator_accuracy, labeled_merged_summary, _) = sess.run([vae_labeled.logits, vae_labeled.loss, vae_labeled.reconst_loss, vae_labeled.kld, vae_labeled.discriminator_loss, vae_labeled.discriminator_accuracy, vae_labeled.merged_summary, vae_labeled.train_op], feed_dict=labeled_feed_dict)
labeled_reconst_loss_sum.append(labeled_reconst_loss)
labeled_kld_sum.append(labeled_kld)
discriminator_loss_sum.append(discriminator_loss)
summary_writer.add_summary(labeled_merged_summary, step)
unlabeled_feed_dict = {vae_unlabeled.encoder_input: unlabeled_encoder_input, vae_unlabeled.decoder_input: unlabeled_decoder_input, vae_unlabeled.target: unlabeled_target, vae_unlabeled.step: step, vae_unlabeled.lr: lr, vae_unlabeled.gumbel_temperature: gumbel_temperature}
(unlabeled_logits, unlabeled_loss, unlabeled_reconst_loss, unlabeled_kld, unlabeled_merged_summary, _) = sess.run([vae_unlabeled.logits, vae_unlabeled.loss, vae_unlabeled.reconst_loss, vae_unlabeled.kld, vae_unlabeled.merged_summary, vae_unlabeled.train_op], feed_dict=unlabeled_feed_dict)
unlabeled_reconst_loss_sum.append(unlabeled_reconst_loss)
unlabeled_kld_sum.append(unlabeled_kld)
loss_sum.append((labeled_loss + unlabeled_loss))
summary_writer.add_summary(unlabeled_merged_summary, step)
if ((batch == 9) or ((batch % 100) == 99)):
log_and_print(log_file, ('epoch %d batch %d' % ((epoch + 1), (batch + 1))), br=False)
ave_loss = np.average(loss_sum)
log_and_print(log_file, ('\tloss: %f' % ave_loss), br=False)
labeled_ave_rnnloss = np.average(labeled_reconst_loss_sum)
log_and_print(log_file, ('\tlabeled_reconst_loss: %f' % labeled_ave_rnnloss), br=False)
labeled_ave_kld = np.average(labeled_kld_sum)
log_and_print(log_file, ('\tlabeled_kld %f' % labeled_ave_kld), br=True)
unlabeled_ave_rnnloss = np.average(unlabeled_reconst_loss_sum)
log_and_print(log_file, ('\tunlabeled_reconst_loss: %f' % unlabeled_ave_rnnloss), br=False)
unlabeled_ave_kld = np.average(unlabeled_kld_sum)
log_and_print(log_file, ('\tunlabeled_kld %f' % unlabeled_ave_kld), br=True)
ave_disc_loss = np.average(discriminator_loss_sum)
log_and_print(log_file, ('\tdisc_loss %f' % ave_disc_loss), br=True)
loss_sum = []
labeled_kld_sum = []
labeled_reconst_loss_sum = []
discriminator_loss_sum = []
unlabeled_kld_sum = []
unlabeled_reconst_loss_sum = []
(sample_train_input, sample_train_input_list) = sess.run([vae_labeled.encoder_input, vae_labeled.encoder_input_list], feed_dict=labeled_feed_dict)
encoder_input_texts = batchloader.logits2str(sample_train_input_list, 1, onehot=False, numpy=True)
log_and_print(log_file, ('\ttrain input: %s' % encoder_input_texts[0]))
sample_train_outputs = batchloader.logits2str(labeled_logits, 1)
log_and_print(log_file, ('\ttrain output: %s' % sample_train_outputs[0]))
train_latent_variables = sess.run(vae_test.sampler.latent_variables, feed_dict={vae_test.encoder_input: sample_train_input, vae_test.label: label})
sample_logits = sess.run(vae_test.logits, feed_dict={vae_test.latent_variables: train_latent_variables, vae_test.label: label})
train_valid_samples = batchloader.logits2str(sample_logits, 1)
print(('\ttrain valid output: %s' % train_valid_samples[0]))
(sample_input, _, sample_target, sample_label) = batchloader.next_batch(FLAGS.BATCH_SIZE, 'test')
(sample_input_list, sample_latent_variables, discriminator_loss, discriminator_accuracy) = sess.run([vae_test.encoder_input_list, vae_test.sampler.latent_variables, vae_test.discriminator_loss, vae_test.discriminator_accuracy], feed_dict={vae_test.encoder_input: sample_input, vae_test.label: sample_label})
(sample_logits, valid_loss, merged_summary) = sess.run([vae_test.logits, vae_test.reconst_loss, vae_test.merged_summary], feed_dict={vae_test.encoder_input: sample_input, vae_test.target: sample_target, vae_test.label: sample_label, vae_test.latent_variables: sample_latent_variables})
log_and_print(log_file, ('\tvalid loss: %f' % valid_loss))
sample_input_texts = batchloader.logits2str(sample_input_list, 1, onehot=False, numpy=True)
sample_output_texts = batchloader.logits2str(sample_logits, 1)
log_and_print(log_file, ('\tsample input: %s' % sample_input_texts[0]))
log_and_print(log_file, ('\tsample output: %s' % sample_output_texts[0]))
summary_writer.add_summary(merged_summary, step)
save_path = saver.save(sess, (FLAGS.LOG_DIR + ('/model/model%d.ckpt' % (epoch + 1))))
log_and_print(log_file, ('Model saved in file %s' % save_path))
|
def log_and_print(log_file, logstr, br=True):
print(logstr)
if br:
logstr = (logstr + '\n')
with open(log_file, 'a') as f:
f.write(logstr)
|
def main():
os.mkdir(FLAGS.LOG_DIR)
os.mkdir((FLAGS.LOG_DIR + '/model'))
log_file = (FLAGS.LOG_DIR + '/log.txt')
shutil.copyfile('config.py', (FLAGS.LOG_DIR + '/config.py'))
shutil.copyfile('README.md', (FLAGS.LOG_DIR + '/README.md'))
sess_conf = tf.ConfigProto(gpu_options=tf.GPUOptions())
with tf.Graph().as_default():
with tf.Session(config=sess_conf) as sess:
batchloader = BatchLoader(with_label=False)
with tf.variable_scope('VAE'):
vae = VAE[FLAGS.VAE_NAME](batchloader, is_training=True, ru=False)
with tf.variable_scope('VAE', reuse=True):
vae_test = VAE[FLAGS.VAE_NAME](batchloader, is_training=False, ru=True)
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(FLAGS.LOG_DIR, sess.graph)
sess.run(tf.global_variables_initializer())
log_and_print(log_file, 'start training')
loss_sum = []
reconst_loss_sum = []
kld_sum = []
lr = FLAGS.LEARNING_RATE
step = 0
for epoch in range(FLAGS.EPOCH):
log_and_print(log_file, ('epoch %d' % (epoch + 1)))
if ((epoch >= FLAGS.LR_DECAY_START) and ((epoch % 2) == 0)):
lr *= 0.5
for batch in range(FLAGS.BATCHES_PER_EPOCH):
step += 1
(encoder_input, decoder_input, target) = batchloader.next_batch(FLAGS.BATCH_SIZE, 'train')
feed_dict = {vae.encoder_input: encoder_input, vae.decoder_input: decoder_input, vae.target: target, vae.step: step, vae.lr: lr}
(logits, loss, reconst_loss, kld, merged_summary, _) = sess.run([vae.logits, vae.loss, vae.reconst_loss, vae.kld, vae.merged_summary, vae.train_op], feed_dict=feed_dict)
reconst_loss_sum.append(reconst_loss)
kld_sum.append(kld)
loss_sum.append(loss)
summary_writer.add_summary(merged_summary, step)
if ((batch % 100) == 99):
log_and_print(log_file, ('epoch %d batch %d' % ((epoch + 1), (batch + 1))), br=False)
ave_loss = np.average(loss_sum)
log_and_print(log_file, ('\tloss: %f' % ave_loss), br=False)
ave_rnnloss = np.average(reconst_loss_sum)
log_and_print(log_file, ('\treconst_loss: %f' % ave_rnnloss), br=False)
ave_kld = np.average(kld_sum)
log_and_print(log_file, ('\tkld %f' % ave_kld), br=False)
loss_sum = []
reconst_loss_sum = []
kld_sum = []
(sample_train_input, sample_train_input_list) = sess.run([vae.encoder_input, vae.encoder_input_list], feed_dict=feed_dict)
encoder_input_texts = batchloader.logits2str(sample_train_input_list, 1, onehot=False, numpy=True)
log_and_print(log_file, ('\ttrain input: %s' % encoder_input_texts[0]))
sample_train_outputs = batchloader.logits2str(logits, 1)
log_and_print(log_file, ('\ttrain output: %s' % sample_train_outputs[0]))
(sample_input, _, sample_target) = batchloader.next_batch(FLAGS.BATCH_SIZE, 'test')
(sample_input_list, sample_latent_variables) = sess.run([vae_test.encoder_input_list, vae_test.encoder.latent_variables], feed_dict={vae_test.encoder_input: sample_input})
(sample_logits, valid_loss, merged_summary) = sess.run([vae_test.logits, vae_test.reconst_loss, vae_test.merged_summary], feed_dict={vae_test.target: sample_target, vae_test.latent_variables: sample_latent_variables})
log_and_print(log_file, ('\tvalid loss: %f' % valid_loss))
sample_input_texts = batchloader.logits2str(sample_input_list, 1, onehot=False, numpy=True)
sample_output_texts = batchloader.logits2str(sample_logits, 1)
log_and_print(log_file, ('\tsample input: %s' % sample_input_texts[0]))
log_and_print(log_file, ('\tsample output: %s' % sample_output_texts[0]))
summary_writer.add_summary(merged_summary, step)
save_path = saver.save(sess, (FLAGS.LOG_DIR + ('/model/model%d.ckpt' % (epoch + 1))))
log_and_print(log_file, ('Model saved in file %s' % save_path))
|
class BatchLoader():
def __init__(self, with_label=True):
self.with_label = with_label
self.go_token = '<GO>'
self.pad_token = '<PAD>'
self.unk_token = '<UNK>'
with open(FLAGS.DATA_PATH, 'rb') as f:
data = pkl.load(f)
if self.with_label:
with open(FLAGS.LABEL_PATH, 'rb') as f:
self.label = pkl.load(f)
with open(FLAGS.DICT_PATH, 'rb') as f:
self.char_to_idx = pkl.load(f)
self.idx_to_char = {}
for (char, idx) in self.char_to_idx.items():
self.idx_to_char[idx] = char
self.idx_to_char[self.char_to_idx[self.pad_token]] = '_'
self.idx_to_char[self.char_to_idx[self.unk_token]] = '??'
indexes = np.array([i for i in range(len(data))], dtype=np.int32)
indexes = np.random.permutation(indexes)
data = np.array([np.copy(data[index]) for index in indexes])
if self.with_label:
self.label = np.array([np.copy(self.label[index]) for index in indexes])
self.split = (len(data) // 10)
if with_label:
(self.valid_data, self.labeled_data, self.unlabeled_data) = (data[:self.split], data[self.split:(self.split + FLAGS.LABELED_NUM)], data[(self.split + FLAGS.LABELED_NUM):])
(self.valid_label, self.train_label) = (self.label[:self.split], self.label[self.split:(self.split + FLAGS.LABELED_NUM)])
else:
(self.valid_data, self.train_data) = (data[:self.split], data[self.split:])
self.go_input = self.go_input()
def next_batch(self, batch_size, target: str):
if (target == 'train'):
if self.with_label:
indexes = np.array(np.random.randint(len(self.labeled_data), size=batch_size))
encoder_input = [np.copy(self.labeled_data[idx]).tolist() for idx in indexes]
labeled_list = self.wrap_tensor(encoder_input)
label = np.array([self.train_label[idx] for idx in indexes])
labeled_list.append(label)
indexes = np.array(np.random.randint(len(self.unlabeled_data), size=batch_size))
encoder_input = [np.copy(self.unlabeled_data[idx]).tolist() for idx in indexes]
unlabeled_list = self.wrap_tensor(encoder_input)
return (labeled_list + unlabeled_list)
else:
indexes = np.array(np.random.randint(len(self.train_data), size=batch_size))
encoder_input = [np.copy(self.train_data[idx]).tolist() for idx in indexes]
return self.wrap_tensor(encoder_input)
else:
indexes = np.array(np.random.randint(len(self.valid_data), size=batch_size))
encoder_input = [np.copy(self.valid_data[idx]).tolist() for idx in indexes]
if self.with_label:
label = np.array([np.copy(self.valid_label[idx]).tolist() for idx in indexes])
labeled_list = self.wrap_tensor(encoder_input)
labeled_list.append(label)
return labeled_list
else:
return self.wrap_tensor(encoder_input)
def wrap_tensor(self, input):
encoder_input = np.copy(input)
decoder_input = np.array([np.hstack(([self.char_to_idx[self.go_token]], line[:(len(line) - 1)])) for line in np.copy(input)])
if (FLAGS.DECODER_DROPWORD_KEEP < 1.0):
r = np.random.rand(FLAGS.BATCH_SIZE, FLAGS.SEQ_LEN)
for i in range(FLAGS.BATCH_SIZE):
for j in range(FLAGS.SEQ_LEN):
if ((r[i][j] > FLAGS.DECODER_DROPWORD_KEEP) and (decoder_input[i][j] not in [self.char_to_idx[self.go_token], self.char_to_idx[self.pad_token]])):
decoder_input[i][j] = self.char_to_idx[self.unk_token]
decoder_target = np.copy(input)
return [encoder_input, decoder_input, decoder_target]
def go_input(self):
go_input = np.array([self.char_to_idx[self.go_token] for _ in range(FLAGS.BATCH_SIZE)])
return go_input
def logits2str(self, logits, sample_num, onehot=True, numpy=False):
' convert logits into texts\n Args:\n logits: list of np.array (if onehot: [batch_size, vocab_size] else: [batch_size])\n Output:\n list of texts (batch_size) '
assert (sample_num <= FLAGS.BATCH_SIZE)
generated_texts = []
if onehot:
indices = [np.argmax(l, 1) for l in logits]
else:
indices = logits
seq_len = len(indices)
assert (seq_len == FLAGS.SEQ_LEN)
for i in range(sample_num):
x = ''
for j in range(seq_len):
x += self.idx_to_char[indices[j][i]]
generated_texts.append(x)
return generated_texts
|
def collect_torch_env() -> str:
try:
import torch.__config__
return torch.__config__.show()
except ImportError:
from torch.utils.collect_env import get_pretty_env_info
return get_pretty_env_info()
|
def get_env_module() -> Tuple[str]:
var_name = 'ENV_MODULE'
return (var_name, os.environ.get(var_name, '<not set>'))
|
def collect_env_info() -> str:
data = []
data.append(('Python', sys.version.replace('\n', '')))
data.append(get_env_module())
data.append(('PyTorch', torch.__version__))
data.append(('PyTorch Debug Build', torch.version.debug))
has_cuda = torch.cuda.is_available()
data.append(('CUDA available', has_cuda))
if has_cuda:
data.append(('CUDA ID', os.environ['CUDA_VISIBLE_DEVICES']))
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for (name, devids) in devices.items():
data.append((('GPU ' + ','.join(devids)), name))
data.append(('Pillow', PIL.__version__))
try:
import cv2
data.append(('cv2', cv2.__version__))
except ImportError:
pass
env_str = (tabulate(data) + '\n')
env_str += collect_torch_env()
return env_str
|
def default_argument_parser():
'\n create a simple parser to wrap around config file\n '
parser = argparse.ArgumentParser(description='visual-prompt')
parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file')
parser.add_argument('--train-type', default='', help='training types')
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
from datetime import datetime
time_fmt = datetime.now().strftime('%y%m%d-%H-%M-%S')
parser.add_argument('--id', default=time_fmt)
return parser
|
def logging_train_setup(args, cfg) -> None:
output_dir = cfg.OUTPUT_DIR
if output_dir:
PathManager.mkdirs(output_dir)
logger = logging.setup_logging(cfg.NUM_GPUS, get_world_size(), output_dir, name='visual_prompt')
rank = get_rank()
logger.info(f'Rank of current process: {rank}. World size: {get_world_size()}')
logger.info(('Environment info:\n' + collect_env_info()))
logger.info(('Command line arguments: ' + str(args)))
if (hasattr(args, 'config_file') and (args.config_file != '')):
logger.info('Contents of args.config_file={}:\n{}'.format(args.config_file, PathManager.open(args.config_file, 'r').read()))
logger.info('Training with config:')
logger.info(pprint.pformat(cfg))
if (not (hasattr(args, 'eval_only') and args.eval_only)):
torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
|
def get_cfg():
'\n Get a copy of the default config.\n '
return _C.clone()
|
class CfgNode(_CfgNode):
'\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n support manifold path\n '
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, 'r')
def dump(self, *args, **kwargs):
'\n Returns:\n str: a yaml string representation of the config\n '
return super().dump(*args, **kwargs)
|
def get_testing():
'Returns a minimal configuration for testing.'
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 1
config.transformer.num_heads = 1
config.transformer.num_layers = 1
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
|
def get_b16_config():
'Returns the ViT-B/16 configuration.'
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 768
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 3072
config.transformer.num_heads = 12
config.transformer.num_layers = 12
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
|
def get_r50_b16_config():
'Returns the Resnet50 + ViT-B/16 configuration.'
config = get_b16_config()
del config.patches.size
config.patches.grid = (14, 14)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 9)
config.resnet.width_factor = 1
return config
|
def get_b32_config():
'Returns the ViT-B/32 configuration.'
config = get_b16_config()
config.patches.size = (32, 32)
return config
|
def get_b8_config():
'Returns the ViT-B/32 configuration.'
config = get_b16_config()
config.patches.size = (8, 8)
return config
|
def get_l16_config():
'Returns the ViT-L/16 configuration.'
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1024
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 4096
config.transformer.num_heads = 16
config.transformer.num_layers = 24
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
|
def get_l32_config():
'Returns the ViT-L/32 configuration.'
config = get_l16_config()
config.patches.size = (32, 32)
return config
|
def get_h14_config():
'Returns the ViT-L/16 configuration.'
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (14, 14)})
config.hidden_size = 1280
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 5120
config.transformer.num_heads = 16
config.transformer.num_layers = 32
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
|
class JSONDataset(torch.utils.data.Dataset):
def __init__(self, cfg, split):
assert (split in {'train', 'val', 'test'}), "Split '{}' not supported for {} dataset".format(split, cfg.DATA.NAME)
logger.info('Constructing {} dataset {}...'.format(cfg.DATA.NAME, split))
self.cfg = cfg
self._split = split
self.name = cfg.DATA.NAME
self.data_dir = cfg.DATA.DATAPATH
self.data_percentage = cfg.DATA.PERCENTAGE
self._construct_imdb(cfg)
self.transform = get_transforms(split, cfg.DATA.CROPSIZE)
def get_anno(self):
anno_path = os.path.join(self.data_dir, '{}.json'.format(self._split))
if ('train' in self._split):
if (self.data_percentage < 1.0):
anno_path = os.path.join(self.data_dir, '{}_{}.json'.format(self._split, self.data_percentage))
assert os.path.exists(anno_path), '{} dir not found'.format(anno_path)
return read_json(anno_path)
def get_imagedir(self):
raise NotImplementedError()
def _construct_imdb(self, cfg):
'Constructs the imdb.'
img_dir = self.get_imagedir()
assert os.path.exists(img_dir), '{} dir not found'.format(img_dir)
anno = self.get_anno()
self._class_ids = sorted(list(set(anno.values())))
self._class_id_cont_id = {v: i for (i, v) in enumerate(self._class_ids)}
self._imdb = []
for (img_name, cls_id) in anno.items():
cont_id = self._class_id_cont_id[cls_id]
im_path = os.path.join(img_dir, img_name)
self._imdb.append({'im_path': im_path, 'class': cont_id})
logger.info('Number of images: {}'.format(len(self._imdb)))
logger.info('Number of classes: {}'.format(len(self._class_ids)))
def get_info(self):
num_imgs = len(self._imdb)
return (num_imgs, self.get_class_num(), self.name)
def get_class_num(self):
return self.cfg.DATA.NUMBER_CLASSES
def get_class_weights(self, weight_type):
'get a list of class weight, return a list float'
if ('train' not in self._split):
raise ValueError(('only getting training class distribution, ' + 'got split {} instead'.format(self._split)))
cls_num = self.get_class_num()
if (weight_type == 'none'):
return ([1.0] * cls_num)
id2counts = Counter(self._class_ids)
assert (len(id2counts) == cls_num)
num_per_cls = np.array([id2counts[i] for i in self._class_ids])
if (weight_type == 'inv'):
mu = (- 1.0)
elif (weight_type == 'inv_sqrt'):
mu = (- 0.5)
weight_list = (num_per_cls ** mu)
weight_list = (np.divide(weight_list, np.linalg.norm(weight_list, 1)) * cls_num)
return weight_list.tolist()
def __getitem__(self, index):
im = tv.datasets.folder.default_loader(self._imdb[index]['im_path'])
label = self._imdb[index]['class']
im = self.transform(im)
if (self._split == 'train'):
index = index
else:
index = f'{self._split}{index}'
sample = {'image': im, 'label': label}
return sample
def __len__(self):
return len(self._imdb)
|
class CUB200Dataset(JSONDataset):
'CUB_200 dataset.'
def __init__(self, cfg, split):
super(CUB200Dataset, self).__init__(cfg, split)
def get_imagedir(self):
return os.path.join(self.data_dir, 'images')
|
class CarsDataset(JSONDataset):
'stanford-cars dataset.'
def __init__(self, cfg, split):
super(CarsDataset, self).__init__(cfg, split)
def get_imagedir(self):
return self.data_dir
|
class DogsDataset(JSONDataset):
'stanford-dogs dataset.'
def __init__(self, cfg, split):
super(DogsDataset, self).__init__(cfg, split)
def get_imagedir(self):
return os.path.join(self.data_dir, 'Images')
|
class FlowersDataset(JSONDataset):
'flowers dataset.'
def __init__(self, cfg, split):
super(FlowersDataset, self).__init__(cfg, split)
def get_imagedir(self):
return self.data_dir
|
class NabirdsDataset(JSONDataset):
'Nabirds dataset.'
def __init__(self, cfg, split):
super(NabirdsDataset, self).__init__(cfg, split)
def get_imagedir(self):
return os.path.join(self.data_dir, 'images')
|
class TFDataset(torch.utils.data.Dataset):
def __init__(self, cfg, split):
assert (split in {'train', 'val', 'test', 'trainval'}), "Split '{}' not supported for {} dataset".format(split, cfg.DATA.NAME)
logger.info('Constructing {} dataset {}...'.format(cfg.DATA.NAME, split))
self.cfg = cfg
self._split = split
self.name = cfg.DATA.NAME
self.img_mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
self.img_std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
self.get_data(cfg, split)
def get_data(self, cfg, split):
tf_data = build_tf_dataset(cfg, split)
data_list = list(tf_data)
self._image_tensor_list = [t[0].numpy().squeeze() for t in data_list]
self._targets = [int(t[1].numpy()[0]) for t in data_list]
self._class_ids = sorted(list(set(self._targets)))
logger.info('Number of images: {}'.format(len(self._image_tensor_list)))
logger.info('Number of classes: {} / {}'.format(len(self._class_ids), self.get_class_num()))
del data_list
del tf_data
def get_info(self):
num_imgs = len(self._image_tensor_list)
return (num_imgs, self.get_class_num())
def get_class_num(self):
return self.cfg.DATA.NUMBER_CLASSES
def get_class_weights(self, weight_type):
'get a list of class weight, return a list float'
if ('train' not in self._split):
raise ValueError(('only getting training class distribution, ' + 'got split {} instead'.format(self._split)))
cls_num = self.get_class_num()
if (weight_type == 'none'):
return ([1.0] * cls_num)
id2counts = Counter(self._class_ids)
assert (len(id2counts) == cls_num)
num_per_cls = np.array([id2counts[i] for i in self._class_ids])
if (weight_type == 'inv'):
mu = (- 1.0)
elif (weight_type == 'inv_sqrt'):
mu = (- 0.5)
weight_list = (num_per_cls ** mu)
weight_list = (np.divide(weight_list, np.linalg.norm(weight_list, 1)) * cls_num)
return weight_list.tolist()
def __getitem__(self, index):
label = self._targets[index]
im = to_torch_imgs(self._image_tensor_list[index], self.img_mean, self.img_std)
if (self._split == 'train'):
index = index
else:
index = f'{self._split}{index}'
sample = {'image': im, 'label': label}
return sample
def __len__(self):
return len(self._targets)
|
def preprocess_fn(data, size=224, input_range=(0.0, 1.0)):
image = data['image']
image = tf.image.resize(image, [size, size])
image = (tf.cast(image, tf.float32) / 255.0)
image = ((image * (input_range[1] - input_range[0])) + input_range[0])
data['image'] = image
return data
|
def build_tf_dataset(cfg, mode):
'\n Builds a tf data instance, then transform to a list of tensors and labels\n '
if (mode not in ['train', 'val', 'test', 'trainval']):
raise ValueError('The input pipeline supports `train`, `val`, `test`.Provided mode is {}'.format(mode))
vtab_dataname = cfg.DATA.NAME.split('vtab-')[(- 1)]
data_dir = cfg.DATA.DATAPATH
if (vtab_dataname in DATASETS):
data_cls = Registry.lookup(('data.' + vtab_dataname))
vtab_tf_dataloader = data_cls(data_dir=data_dir)
else:
raise ValueError('Unknown type for "dataset" field: {}'.format(type(vtab_dataname)))
split_name_dict = {'dataset_train_split_name': 'train800', 'dataset_val_split_name': 'val200', 'dataset_trainval_split_name': 'train800val200', 'dataset_test_split_name': 'test'}
def _dict_to_tuple(batch):
return (batch['image'], batch['label'])
return vtab_tf_dataloader.get_tf_data(batch_size=1, drop_remainder=False, split_name=split_name_dict[f'dataset_{mode}_split_name'], preprocess_fn=functools.partial(preprocess_fn, input_range=(0.0, 1.0), size=cfg.DATA.CROPSIZE), for_eval=(mode != 'train'), shuffle_buffer_size=1000, prefetch=1, train_examples=None, epochs=1).map(_dict_to_tuple)
|
def to_torch_imgs(img: np.ndarray, mean: Tensor, std: Tensor) -> Tensor:
t_img: Tensor = torch.from_numpy(np.transpose(img, (2, 0, 1)))
t_img -= mean
t_img /= std
return t_img
|
def _construct_loader(cfg, split, batch_size, shuffle, drop_last):
'Constructs the data loader for the given dataset.'
dataset_name = cfg.DATA.NAME
if dataset_name.startswith('vtab-'):
from .datasets.tf_dataset import TFDataset
dataset = TFDataset(cfg, split)
else:
assert (dataset_name in _DATASET_CATALOG.keys()), "Dataset '{}' not supported".format(dataset_name)
dataset = _DATASET_CATALOG[dataset_name](cfg, split)
sampler = (DistributedSampler(dataset) if (cfg.NUM_GPUS > 1) else None)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=(False if sampler else shuffle), sampler=sampler, num_workers=cfg.DATA.NUM_WORKERS, pin_memory=cfg.DATA.PIN_MEMORY, drop_last=drop_last)
return loader
|
def construct_train_loader(cfg):
'Train loader wrapper.'
if (cfg.NUM_GPUS > 1):
drop_last = True
else:
drop_last = False
return _construct_loader(cfg=cfg, split='train', batch_size=int((cfg.DATA.BATCH_SIZE / cfg.NUM_GPUS)), shuffle=True, drop_last=drop_last)
|
def construct_trainval_loader(cfg):
'Train loader wrapper.'
if (cfg.NUM_GPUS > 1):
drop_last = True
else:
drop_last = False
return _construct_loader(cfg=cfg, split='trainval', batch_size=int((cfg.DATA.BATCH_SIZE / cfg.NUM_GPUS)), shuffle=True, drop_last=drop_last)
|
def construct_test_loader(cfg):
'Test loader wrapper.'
return _construct_loader(cfg=cfg, split='test', batch_size=int((cfg.DATA.BATCH_SIZE / cfg.NUM_GPUS)), shuffle=False, drop_last=False)
|
def construct_val_loader(cfg, batch_size=None):
if (batch_size is None):
bs = int((cfg.DATA.BATCH_SIZE / cfg.NUM_GPUS))
else:
bs = batch_size
'Validation loader wrapper.'
return _construct_loader(cfg=cfg, split='val', batch_size=bs, shuffle=False, drop_last=False)
|
def shuffle(loader, cur_epoch):
'"Shuffles the data.'
assert isinstance(loader.sampler, (RandomSampler, DistributedSampler)), "Sampler type '{}' not supported".format(type(loader.sampler))
if isinstance(loader.sampler, DistributedSampler):
loader.sampler.set_epoch(cur_epoch)
|
def get_transforms(split, size):
normalize = tv.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if (size == 448):
resize_dim = 512
crop_dim = 448
elif (size == 224):
resize_dim = 256
crop_dim = 224
elif (size == 384):
resize_dim = 438
crop_dim = 384
if (split == 'train'):
transform = tv.transforms.Compose([tv.transforms.Resize(resize_dim), tv.transforms.RandomCrop(crop_dim), tv.transforms.RandomHorizontalFlip(0.5), tv.transforms.ToTensor(), normalize])
else:
transform = tv.transforms.Compose([tv.transforms.Resize(resize_dim), tv.transforms.CenterCrop(crop_dim), tv.transforms.ToTensor(), normalize])
return transform
|
def make_get_tensors_fn(output_tensors):
'Create a function that outputs a collection of tensors from the dataset.'
def _get_fn(data):
'Get tensors by name.'
return {tensor_name: data[tensor_name] for tensor_name in output_tensors}
return _get_fn
|
def make_get_and_cast_tensors_fn(output_tensors):
'Create a function that gets and casts a set of tensors from the dataset.\n\n Optionally, you can also rename the tensors.\n\n Examples:\n # This simply gets "image" and "label" tensors without any casting.\n # Note that this is equivalent to make_get_tensors_fn(["image", "label"]).\n make_get_and_cast_tensors_fn({\n "image": None,\n "label": None,\n })\n\n # This gets the "image" tensor without any type conversion, casts the\n # "heatmap" tensor to tf.float32, and renames the tensor "class/label" to\n # "label" and casts it to tf.int64.\n make_get_and_cast_tensors_fn({\n "image": None,\n "heatmap": tf.float32,\n "class/label": ("label", tf.int64),\n })\n\n Args:\n output_tensors: dictionary specifying the set of tensors to get and cast\n from the dataset.\n\n Returns:\n The function performing the operation.\n '
def _tensors_to_cast():
tensors_to_cast = []
for (tensor_name, tensor_dtype) in output_tensors.items():
if (isinstance(tensor_dtype, tuple) and (len(tensor_dtype) == 2)):
tensors_to_cast.append((tensor_name, tensor_dtype[0], tensor_dtype[1]))
elif ((tensor_dtype is None) or isinstance(tensor_dtype, tf.dtypes.DType)):
tensors_to_cast.append((tensor_name, tensor_name, tensor_dtype))
else:
raise ValueError('Values of the output_tensors dictionary must be None, tf.dtypes.DType or 2-tuples.')
return tensors_to_cast
def _get_and_cast_fn(data):
'Get and cast tensors by name, optionally changing the name too.'
return {new_name: (data[name] if (new_dtype is None) else tf.cast(data[name], new_dtype)) for (name, new_name, new_dtype) in _tensors_to_cast()}
return _get_and_cast_fn
|
def compose_preprocess_fn(*functions):
'Compose two or more preprocessing functions.\n\n Args:\n *functions: Sequence of preprocess functions to compose.\n\n Returns:\n The composed function.\n '
def _composed_fn(x):
for fn in functions:
if (fn is not None):
x = fn(x)
return x
return _composed_fn
|
@six.add_metaclass(abc.ABCMeta)
class ImageDataInterface(object):
'Interface to the image data classes.'
@property
@abc.abstractmethod
def default_label_key(self):
'Returns the default label key of the dataset.'
@property
@abc.abstractmethod
def label_keys(self):
'Returns a tuple with the available label keys of the dataset.'
@property
@abc.abstractmethod
def num_channels(self):
'Returns the number of channels of the images in the dataset.'
@property
@abc.abstractmethod
def splits(self):
'Returns the splits defined in the dataset.'
@abc.abstractmethod
def get_num_samples(self, split_name):
'Returns the number of images in the given split name.'
@abc.abstractmethod
def get_num_classes(self, label_key=None):
'Returns the number of classes of the given label_key.'
@abc.abstractmethod
def get_tf_data(self, split_name, batch_size, pairwise_mix_fn=None, preprocess_fn=None, preprocess_before_filter=None, epochs=None, drop_remainder=True, for_eval=False, shuffle_buffer_size=None, prefetch=1, train_examples=None, filtered_num_samples=None, filter_fn=None, batch_preprocess_fn=None, ignore_errors=False, shuffle_files=False):
'Provides preprocessed and batched data.\n\n Args:\n split_name: name of a data split to provide. Can be "train", "val",\n "trainval" or "test".\n batch_size: batch size.\n pairwise_mix_fn: a function for mixing each data with another random one.\n preprocess_fn: a function for preprocessing input data. It expects a\n dictionary with a key "image" associated with a 3D image tensor.\n preprocess_before_filter: a function for preprocessing input data,\n before filter_fn. It is only designed for light preprocessing,\n i.e. augment with image id. For heavy preprocessing, it\'s more\n efficient to do it after filter_fn.\n epochs: number of full passes through the data. If None, the data is\n provided indefinitely.\n drop_remainder: if True, the last incomplete batch of data is dropped.\n Normally, this parameter should be True, otherwise it leads to\n the unknown batch dimension, which is not compatible with training\n or evaluation on TPUs.\n for_eval: get data for evaluation. Disables shuffling.\n shuffle_buffer_size: overrides default shuffle buffer size.\n prefetch: number of batches to prefetch.\n train_examples: optional number of examples to take for training.\n If greater than available number of examples, equivalent to None (all).\n Ignored with for_eval is True.\n filtered_num_samples: required when filter_fn is set, number of\n samples after applying filter_fn.\n filter_fn: filter function for generating training subset.\n batch_preprocess_fn: optional function for preprocessing a full batch of\n input data. Analoguous to preprocess_fn with an extra batch-dimension\n on all tensors.\n ignore_errors: whether to skip images that encountered an error in\n decoding *or pre-processing*, the latter is why it is False by default.\n shuffle_files: whether to shuffle the dataset files or not.\n\n Returns:\n A tf.data.Dataset object as a dictionary containing the output tensors.\n '
|
class ImageData(ImageDataInterface):
'Abstract data provider class.\n\n IMPORTANT: You should use ImageTfdsData below whenever is posible. We want\n to use as many datasets in TFDS as possible to ensure reproducibility of our\n experiments. Your data class should only inherit directly from this if you\n are doing experiments while creating a TFDS dataset.\n '
@abc.abstractmethod
def __init__(self, num_samples_splits, shuffle_buffer_size, num_preprocessing_threads, num_classes, default_label_key='label', base_preprocess_fn=None, filter_fn=None, image_decoder=None, num_channels=3):
'Initializer for the base ImageData class.\n\n Args:\n num_samples_splits: a dictionary, that maps splits ("train", "trainval",\n "val", and "test") to the corresponding number of samples.\n shuffle_buffer_size: size of a buffer used for shuffling.\n num_preprocessing_threads: the number of parallel threads for data\n preprocessing.\n num_classes: int/dict, number of classes in this dataset for the\n `default_label_key` tensor, or dictionary with the number of classes in\n each label tensor.\n default_label_key: optional, string with the name of the tensor to use\n as label. Default is "label".\n base_preprocess_fn: optional, base preprocess function to apply in all\n cases for this dataset.\n filter_fn: optional, function to filter the examples to use in the\n dataset. DEPRECATED, soon to be removed.\n image_decoder: a function to decode image.\n num_channels: number of channels in the dataset image.\n '
self._log_warning_if_direct_inheritance()
self._num_samples_splits = num_samples_splits
self._shuffle_buffer_size = shuffle_buffer_size
self._num_preprocessing_threads = num_preprocessing_threads
self._base_preprocess_fn = base_preprocess_fn
self._default_label_key = default_label_key
self._filter_fn = filter_fn
if self._filter_fn:
tf.logging.warning('Using deprecated filtering mechanism.')
self._image_decoder = image_decoder
self._num_channels = num_channels
if isinstance(num_classes, dict):
self._num_classes = num_classes
if (default_label_key not in num_classes):
raise ValueError(('No num_classes was specified for the default_label_key %r' % default_label_key))
elif isinstance(num_classes, int):
self._num_classes = {default_label_key: num_classes}
else:
raise ValueError(('"num_classes" must be a int or a dict, but type %r was given' % type(num_classes)))
@property
def default_label_key(self):
return self._default_label_key
@property
def label_keys(self):
return tuple(self._num_classes.keys())
@property
def num_channels(self):
return self._num_channels
@property
def splits(self):
return tuple(self._num_samples_splits.keys())
def get_num_samples(self, split_name):
return self._num_samples_splits[split_name]
def get_num_classes(self, label_key=None):
if (label_key is None):
label_key = self._default_label_key
return self._num_classes[label_key]
def get_version(self):
return NotImplementedError('Version is not supported outside TFDS.')
def get_tf_data(self, split_name, batch_size, pairwise_mix_fn=None, preprocess_fn=None, preprocess_before_filter=None, epochs=None, drop_remainder=True, for_eval=False, shuffle_buffer_size=None, prefetch=1, train_examples=None, filtered_num_samples=None, filter_fn=None, batch_preprocess_fn=None, ignore_errors=False, shuffle_files=False):
data = self._get_dataset_split(split_name=split_name, shuffle_files=shuffle_files)
if (preprocess_before_filter is not None):
data = preprocess_before_filter(data)
if (self._filter_fn and (filter_fn is None)):
filter_fn = self._filter_fn
if (filter_fn and train_examples):
raise ValueError('You must not set both filter_fn and train_examples.')
if filter_fn:
tf.logging.warning('You are filtering the dataset. Notice that this may hurt your throughput, since examples still need to be decoded, and may make the result of get_num_samples() inacurate. train_examples is ignored for filtering, but only used for calculating training steps.')
data = data.filter(filter_fn)
num_samples = filtered_num_samples
assert (num_samples is not None), 'You must set filtered_num_samples if filter_fn is set.'
elif ((not for_eval) and train_examples):
data = data.take(train_examples)
num_samples = train_examples
else:
num_samples = self.get_num_samples(split_name)
data = self._cache_data_if_possible(data, split_name=split_name, num_samples=num_samples, for_eval=for_eval)
def print_filtered_subset(ex):
'Print filtered subset for debug purpose.'
if (isinstance(ex, dict) and ('id' in ex) and ('label' in ex)):
print_op = tf.print('filtered_example:', ex['id'], ex['label'], output_stream=tf.logging.error)
with tf.control_dependencies([print_op]):
ex['id'] = tf.identity(ex['id'])
return ex
if ((not for_eval) and filter_fn):
data = data.map(print_filtered_subset)
if ((epochs is None) or (epochs > 1)):
data = data.repeat(epochs)
shuffle_buffer_size = (shuffle_buffer_size or self._shuffle_buffer_size)
if ((not for_eval) and (shuffle_buffer_size > 1)):
data = data.shuffle(shuffle_buffer_size)
data = self._preprocess_and_batch_data(data, batch_size, drop_remainder, pairwise_mix_fn, preprocess_fn, ignore_errors)
if (batch_preprocess_fn is not None):
data = data.map(batch_preprocess_fn, self._num_preprocessing_threads)
if (prefetch != 0):
data = data.prefetch(prefetch)
return data
@abc.abstractmethod
def _get_dataset_split(self, split_name, shuffle_files=False):
'Return the Dataset object for the given split name.\n\n Args:\n split_name: Name of the dataset split to get.\n shuffle_files: Whether or not to shuffle files in the dataset.\n\n Returns:\n A tf.data.Dataset object containing the data for the given split.\n '
def _log_warning_if_direct_inheritance(self):
tf.logging.warning('You are directly inheriting from ImageData. Please, consider porting your dataset to TFDS (go/tfds) and inheriting from ImageTfdsData instead.')
def _preprocess_and_batch_data(self, data, batch_size, drop_remainder=True, pairwise_mix_fn=None, preprocess_fn=None, ignore_errors=False):
'Preprocesses and batches a given tf.Dataset.'
base_preprocess_fn = compose_preprocess_fn(self._image_decoder, self._base_preprocess_fn)
data = data.map(base_preprocess_fn, self._num_preprocessing_threads)
if (pairwise_mix_fn is not None):
data = tf.data.Dataset.zip((data, data.skip(1))).map(pairwise_mix_fn, self._num_preprocessing_threads)
if (preprocess_fn is not None):
data = data.map(preprocess_fn, self._num_preprocessing_threads)
if ignore_errors:
tf.logging.info('Ignoring any image with errors.')
data = data.apply(tf.data.experimental.ignore_errors())
return data.batch(batch_size, drop_remainder)
def _cache_data_if_possible(self, data, split_name, num_samples, for_eval):
del split_name
if ((not for_eval) and (num_samples <= 150000)):
data = data.cache()
return data
|
class ImageTfdsData(ImageData):
'Abstract data provider class for datasets available in Tensorflow Datasets.\n\n To add new datasets inherit from this class. This class implements a simple\n API that is used throughout the project and provides standardized way of data\n preprocessing and batching.\n '
@abc.abstractmethod
def __init__(self, dataset_builder, tfds_splits, image_key='image', **kwargs):
'Initializer for the base ImageData class.\n\n Args:\n dataset_builder: tfds dataset builder object.\n tfds_splits: a dictionary, that maps splits ("train", "trainval", "val",\n and "test") to the corresponding tfds `Split` objects.\n image_key: image key.\n **kwargs: Additional keyword arguments for the ImageData class.\n '
self._dataset_builder = dataset_builder
self._tfds_splits = tfds_splits
self._image_key = image_key
def _image_decoder(data):
decoder = dataset_builder.info.features[image_key].decode_example
data[image_key] = decoder(data[image_key])
return data
self._image_decoder = _image_decoder
kwargs.update({'image_decoder': _image_decoder})
super(ImageTfdsData, self).__init__(**kwargs)
def get_version(self):
return self._dataset_builder.version.__str__()
def _get_dataset_split(self, split_name, shuffle_files):
dummy_decoder = tfds.decode.SkipDecoding()
return self._dataset_builder.as_dataset(split=self._tfds_splits[split_name], shuffle_files=shuffle_files, decoders={self._image_key: dummy_decoder})
def _log_warning_if_direct_inheritance(self):
pass
|
@Registry.register('data.caltech101', 'class')
class Caltech101(base.ImageTfdsData):
'Provides the Caltech101 dataset.\n\n See the base class for additional details on the class.\n\n See TFDS dataset for details on the dataset:\n third_party/py/tensorflow_datasets/image/caltech.py\n\n The original (TFDS) dataset contains only a train and test split. We randomly\n sample _TRAIN_SPLIT_PERCENT% of the train split for our "train" set. The\n remainder of the TFDS train split becomes our "val" set. The full TFDS train\n split is called "trainval". The TFDS test split is used as our test set.\n\n Note that, in the TFDS dataset, the training split is class-balanced, but not\n the test split. Therefore, a significant difference between performance on the\n "val" and "test" sets should be expected.\n '
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('caltech101:3.0.1', data_dir=data_dir)
dataset_builder.download_and_prepare()
trainval_count = dataset_builder.info.splits['train'].num_examples
train_count = ((_TRAIN_SPLIT_PERCENT * trainval_count) // 100)
test_count = dataset_builder.info.splits['test'].num_examples
num_samples_splits = dict(train=train_count, val=(trainval_count - train_count), trainval=trainval_count, test=test_count, train800=800, val200=200, train800val200=1000)
tfds_splits = {'train': 'train[:{}]'.format(train_count), 'val': 'train[{}:]'.format(train_count), 'trainval': 'train', 'test': 'test', 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(train_count, (train_count + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(train_count, (train_count + 200))}
super(Caltech101, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=3000, base_preprocess_fn=base.make_get_tensors_fn(('image', 'label')), num_classes=dataset_builder.info.features['label'].num_classes)
|
@Registry.register('data.cifar', 'class')
class CifarData(base.ImageTfdsData):
'Provides Cifar10 or Cifar100 data.\n\n Cifar comes only with a training and test set. Therefore, the validation set\n is split out of the original training set, and the remaining examples are used\n as the "train" split. The "trainval" split corresponds to the original\n training set.\n\n For additional details and usage, see the base class.\n '
def __init__(self, num_classes=10, data_dir=None, train_split_percent=None):
if (num_classes == 10):
dataset_builder = tfds.builder('cifar10:3.*.*', data_dir=data_dir)
elif (num_classes == 100):
dataset_builder = tfds.builder('cifar100:3.*.*', data_dir=data_dir)
else:
raise ValueError('Number of classes must be 10 or 100, got {}'.format(num_classes))
dataset_builder.download_and_prepare()
train_split_percent = (train_split_percent or TRAIN_SPLIT_PERCENT)
trainval_count = dataset_builder.info.splits['train'].num_examples
test_count = dataset_builder.info.splits['test'].num_examples
num_samples_splits = {'train': ((train_split_percent * trainval_count) // 100), 'val': (trainval_count - ((train_split_percent * trainval_count) // 100)), 'trainval': trainval_count, 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
tfds_splits = {'train': 'train[:{}]'.format(num_samples_splits['train']), 'val': 'train[{}:]'.format(num_samples_splits['train']), 'trainval': 'train', 'test': 'test', 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200))}
super(CifarData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_tensors_fn(['image', 'label', 'id']), num_classes=dataset_builder.info.features['label'].num_classes)
|
@Registry.register('data.diabetic_retinopathy', 'class')
class RetinopathyData(base.ImageTfdsData):
'Provides Diabetic Retinopathy classification data.\n\n Retinopathy comes only with a training and test set. Therefore, the validation\n set is split out of the original training set, and the remaining examples are\n used as the "train" split. The "trainval" split corresponds to the original\n training set.\n\n For additional details and usage, see the base class.\n '
_CONFIGS_WITH_GREY_BACKGROUND = ['btgraham-300']
def __init__(self, config='btgraham-300', heavy_train_augmentation=False, data_dir=None):
'Initializer for Diabetic Retinopathy dataset.\n\n Args:\n config: Name of the TFDS config to use for this dataset.\n heavy_train_augmentation: If True, use heavy data augmentation on the\n training data. Recommended to achieve SOTA.\n data_dir: directory for downloading and storing the data.\n '
config_and_version = (config + ':3.*.*')
dataset_builder = tfds.builder('diabetic_retinopathy_detection/{}'.format(config_and_version), data_dir=data_dir)
self._config = config
self._heavy_train_augmentation = heavy_train_augmentation
dataset_builder.download_and_prepare()
tfds_splits = {'train': 'train', 'val': 'validation', 'trainval': 'train+validation', 'test': 'test', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'}
train_count = dataset_builder.info.splits['train'].num_examples
val_count = dataset_builder.info.splits['validation'].num_examples
test_count = dataset_builder.info.splits['test'].num_examples
num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
super(RetinopathyData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_tensors_fn(['image', 'label']), num_classes=dataset_builder.info.features['label'].num_classes)
@property
def config(self):
return self._config
@property
def heavy_train_augmentation(self):
return self._heavy_train_augmentation
def get_tf_data(self, split_name, batch_size, preprocess_fn=None, for_eval=False, **kwargs):
if (self._heavy_train_augmentation and (not for_eval)):
preprocess_fn = base.compose_preprocess_fn(self._heavy_train_augmentation, preprocess_fn)
return super(RetinopathyData, self).get_tf_data(split_name=split_name, batch_size=batch_size, preprocess_fn=preprocess_fn, for_eval=for_eval, **kwargs)
def _sample_heavy_data_augmentation_parameters(self):
s = tf.random.uniform(shape=(), minval=(- 0.1), maxval=0.1)
a = tf.random.uniform(shape=(), minval=0.0, maxval=(2.0 * 3.1415926535))
b = (tf.random.uniform(shape=(), minval=(- 0.2), maxval=0.2) + a)
hf = tf.random.shuffle([(- 1.0), 1.0])[0]
vf = tf.random.shuffle([(- 1.0), 1.0])[0]
dx = tf.random.uniform(shape=(), minval=(- 0.1), maxval=0.1)
dy = tf.random.uniform(shape=(), minval=(- 0.1), maxval=0.1)
return (s, a, b, hf, vf, dx, dy)
def _heavy_data_augmentation_fn(self, example):
'Perform heavy augmentation on a given input data example.\n\n This is the same data augmentation as the one done by Ben Graham, the winner\n of the 2015 Kaggle competition. See:\n https://github.com/btgraham/SparseConvNet/blob/a6bdb0c938b3556c1e6c23d5a014db9f404502b9/kaggleDiabetes1.cpp#L12\n\n Args:\n example: A dictionary containing an "image" key with the image to\n augment.\n\n Returns:\n The input dictionary with the key "image" containing the augmented image.\n '
image = example['image']
image_shape = tf.shape(image)
if (len(image.get_shape().as_list()) not in [2, 3]):
raise ValueError('Input image must be a rank-2 or rank-3 tensor, but rank-{} was given'.format(len(image.get_shape().as_list())))
height = tf.cast(image_shape[0], dtype=tf.float32)
width = tf.cast(image_shape[1], dtype=tf.float32)
(s, a, b, hf, vf, dx, dy) = self._sample_heavy_data_augmentation_parameters()
c00 = ((1 + s) * tf.cos(a))
c01 = ((1 + s) * tf.sin(a))
c10 = ((s - 1) * tf.sin(b))
c11 = ((1 - s) * tf.cos(b))
c00 = (c00 * hf)
c01 = (c01 * hf)
c10 = (c10 * vf)
c11 = (c11 * vf)
dx = (width * dx)
dy = (height * dy)
cy = (height / 2.0)
cx = (width / 2.0)
affine_matrix = [[c00, c01, ((((1.0 - c00) * cx) - (c01 * cy)) + dx)], [c10, c11, ((((1.0 - c11) * cy) - (c10 * cx)) + dy)], [0.0, 0.0, 1.0]]
affine_matrix = tf.convert_to_tensor(affine_matrix, dtype=tf.float32)
transform = tfa_image.transform_ops.matrices_to_flat_transforms(tf.linalg.inv(affine_matrix))
if (self._config in self._CONFIGS_WITH_GREY_BACKGROUND):
image = tf.cast(image, dtype=tf.float32)
image = ((image / 127.5) - 1.0)
image = tfa_image.transform(images=image, transforms=transform)
if (self._config in self._CONFIGS_WITH_GREY_BACKGROUND):
image = ((1.0 + image) * 127.5)
image = tf.cast(image, dtype=tf.uint8)
example['image'] = image
return example
|
@Registry.register('data.dmlab', 'class')
class DmlabData(base.ImageTfdsData):
'Dmlab dataset.\n\n The Dmlab dataset contains frames observed by the agent acting in the\n DMLab environment, which are annotated by the distance between\n the agent and various objects present in the environment. The goal is to\n is to evaluate the ability of a visual model to reason about distances\n from the visual input in 3D environments. The Dmlab dataset consists of\n 360x480 color images in 6 classes. The classes are\n {close, far, very far} x {positive reward, negative reward}\n respectively.\n '
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('dmlab:2.0.1', data_dir=data_dir)
tfds_splits = {'train': 'train', 'val': 'validation', 'trainval': 'train+validation', 'test': 'test', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'}
train_count = dataset_builder.info.splits['train'].num_examples
val_count = dataset_builder.info.splits['validation'].num_examples
test_count = dataset_builder.info.splits['test'].num_examples
num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
super(DmlabData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_and_cast_tensors_fn({'image': ('image', None), 'label': ('label', None)}), num_classes=dataset_builder.info.features['label'].num_classes, image_key='image')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.