code stringlengths 17 6.64M |
|---|
def _repo_path(repo, version):
'\n :param str repo:\n :param str|None version:\n :rtype: str\n '
if (not version):
return _dev_repo_path(repo)
return ('%s@v%s' % (_main_repo_path(repo), version))
|
def _repo_remote_url(repo):
'\n :param str repo:\n :rtype: str\n '
p = repo.find('/')
assert (p >= 0)
(host, path) = (repo[:p], repo[p:])
return ('https://%s:%s' % (host, path))
|
def _simple_validate_commit_rev(rev):
'\n :param str rev:\n '
assert ((len(rev) >= _MinNumHashDigits) and _RevDigitsRe.match(rev))
|
def _simple_validate_date(date):
'\n :param str date:\n '
assert ((len(date) in _DateFormatAllowedLens) and _DateRe.match(date))
|
def _simple_validate_version(version):
'\n :param str|None version:\n '
if (not version):
return
(date, rev) = version.split('-')
_simple_validate_date(date)
_simple_validate_commit_rev(rev)
|
def _rev_from_version(version):
'\n :param str version: e.g. "20211231-0123abcd0123"\n :return: e.g. "0123abcd0123"\n :rtype: str\n '
p = version.rfind('-')
if (p < 0):
_simple_validate_commit_rev(version)
return version
rev = version[(p + 1):]
_simple_validate_commit_rev(rev)
return rev
|
def _version_from_date_and_rev(date, rev):
'\n :param str date:\n :param str rev:\n '
_simple_validate_commit_rev(rev)
return ('%s-%s' % (date, rev))
|
def _sys_git_clone_repo(repo):
'\n :param str repo:\n '
url = _repo_remote_url(repo)
main_path = _main_repo_path(repo)
common.logger.info('Git clone %s', repo)
check_call(['git', 'clone', url, main_path])
|
def _sys_git_fetch(repo):
'\n :param str repo:\n '
main_path = _main_repo_path(repo)
common.logger.info('Git fetch %s', repo)
check_call(['git', 'fetch'], cwd=main_path)
|
def _sys_git_create_repo_workdir(repo, version):
'\n :param str repo:\n :param str version:\n '
main_path = _main_repo_path(repo)
rev = _rev_from_version(version)
version_path = _repo_path(repo, version)
common.logger.info('Git add worktree %s: %s', repo, version)
check_call(['git', 'worktree', 'add', version_path, rev], cwd=main_path)
|
def _sys_git_stat_local_rev(repo, rev):
'\n :param str repo:\n :param str rev:\n :return: (full_rev, date)\n :rtype: (str|None, str|None)\n '
main_path = _main_repo_path(repo)
try:
out = check_output(['git', '-c', 'log.showsignature=false', 'log', '-n1', '--format=format:%H %cd', ('--date=format:%s' % _DateFormat), rev, '--'], cwd=main_path)
except SubprocessError:
return (None, None)
out = out.decode('utf8')
(full_rev, date) = out.split()
assert (full_rev.startswith(rev) and (len(full_rev) == _FullNumHashDigits))
_simple_validate_commit_rev(full_rev)
_simple_validate_date(date)
return (full_rev, date)
|
class _Repo():
def __init__(self, name):
'\n :param str name: e.g. "github.com/rwth-i6/returnn-experiments"\n '
_simple_validate_repo_name(name)
self.name = name
self._cloned = False
self._loaded_local_work_dirs = False
self._dev_work_dir = None
self._work_dirs = {}
def get_dev_dir_path(self):
'\n :rtype: str\n '
self._clone()
return _dev_repo_path(self.name)
def _clone(self):
if self._cloned:
return
main_path = _main_repo_path(self.name)
if (not os.path.exists(main_path)):
_sys_git_clone_repo(self.name)
assert os.path.exists(main_path)
self._cloned = True
def _load_work_dirs(self):
if self._loaded_local_work_dirs:
return
self._clone()
main_path = _main_repo_path(self.name)
p = main_path.rfind('/')
assert (p > 0)
(dir_name, base_name) = (main_path[:p], main_path[(p + 1):])
prefix = (base_name + '@v')
for d in sorted(os.listdir(dir_name)):
if (not d.startswith(prefix)):
continue
version = d[len(prefix):]
rev = _rev_from_version(version)
if (rev in self._work_dirs):
continue
self._work_dirs[rev] = _RepoWorkDir(repo=self, version=version)
self._loaded_local_work_dirs = True
def _get_work_dir_stat_local(self, rev):
'\n :param str rev:\n :rtype: _RepoWorkDir\n '
(full_rev, date) = _sys_git_stat_local_rev(self.name, rev)
if full_rev:
default_rev = full_rev[:_DefaultNumHashDigits]
assert (default_rev not in self._work_dirs)
default_version = _version_from_date_and_rev(date=date, rev=default_rev)
_sys_git_create_repo_workdir(self.name, version=default_version)
work_dir = _RepoWorkDir(self, default_version)
self._work_dirs[default_rev] = work_dir
return work_dir
return None
def get_work_dir(self, version):
'\n :param str|None version:\n :rtype: _RepoWorkDir\n :return: work dir. this makes sure that the work dir also exists\n '
if (not version):
if self._dev_work_dir:
return self._dev_work_dir
self._clone()
self._dev_work_dir = _RepoWorkDir(self, None)
return self._dev_work_dir
self._load_work_dirs()
rev = _rev_from_version(version)
if (rev in self._work_dirs):
work_dir = self._work_dirs[rev]
work_dir.validate_version(version)
return work_dir
for (rev_, work_dir) in self._work_dirs.items():
if (rev_.startswith(rev) or ((len(rev) > len(rev_)) and rev.startswith(rev_))):
work_dir.validate_version(version)
self._work_dirs[rev] = work_dir
return work_dir
work_dir = self._get_work_dir_stat_local(rev)
if work_dir:
work_dir.validate_version(version)
return work_dir
_sys_git_fetch(self.name)
work_dir = self._get_work_dir_stat_local(rev)
assert work_dir, ('Git repo %s, version %s unknown.' % (self.name, version))
work_dir.validate_version(version)
return work_dir
|
class _RepoWorkDir():
def __init__(self, repo, version):
'\n :param str|_Repo repo:\n :param str|None version: (normalized)\n '
if (not isinstance(repo, _Repo)):
repo = _get_repo(repo)
_simple_validate_version(version)
self.repo = repo
self.version = version
if version:
(self.version_date, self.version_rev) = version.split('-')
else:
(self.version_date, self.version_rev) = (None, None)
def get_path(self):
'\n :rtype: str\n '
return _repo_path(self.repo.name, self.version)
def validate_version(self, version):
'\n :param str|None version:\n '
if version:
assert self.version
(date, rev) = version.split('-')
_simple_validate_date(date)
_simple_validate_commit_rev(rev)
if (not self.version_date.startswith(date)):
raise common.InvalidVersion(('Version %s, date does not match in %s' % (self.version, version)))
if (len(rev) <= len(self.version_rev)):
if (not self.version_rev.startswith(rev)):
raise common.InvalidVersion(('Version %s, revision does not match in %s' % (self.version, version)))
elif (not rev.startswith(self.version_rev)):
raise common.InvalidVersion(('Version %s, revision does not match in %s' % (self.version, version)))
elif self.version:
raise common.InvalidVersion(('Development version, got %s' % (version,)))
|
def _get_repo(repo):
'\n :param str repo:\n :rtype: _Repo\n '
assert isinstance(repo, str)
obj = _repo_cache.get(repo)
if obj:
return obj
obj = _Repo(repo)
_repo_cache[repo] = obj
return obj
|
def _report_usage_dev_version(repo_path, stack_frame_depth):
'\n :param str repo_path:\n :param int stack_frame_depth:\n '
common.logger.warn('Access to development working tree: %s', repo_path)
from returnn.util.basic import try_get_stack_frame
frame = try_get_stack_frame(depth=(stack_frame_depth + 1))
if frame:
from returnn.util.better_exchook import get_source_code, add_indent_lines
src = get_source_code(filename=frame.f_code.co_filename, lineno=frame.f_lineno, module_globals=frame.f_globals)
common.logger.warn(' Called from: %s:%i, code:\n%s', frame.f_code.co_filename, frame.f_lineno, add_indent_lines(' ', src.rstrip()))
else:
common.logger.warn(' (Could not get stack frame information from calling code.)')
from returnn.util.basic import git_commit_date, git_commit_rev, git_is_dirty
rev = git_commit_rev(git_dir=repo_path, length=_DefaultNumHashDigits)
commit_date = git_commit_date(git_dir=repo_path)
commit_date = commit_date.replace('.', '')
version = ('%s-%s' % (commit_date[:8], rev[:_MinNumHashDigits]))
import_str = ('v%s_%s' % (commit_date, rev))
common.logger.warn(' Current version: %s (%s)', version, import_str)
if git_is_dirty(git_dir=repo_path):
common.logger.warn(' (Warning, code is dirty. Commit your recent changes.)')
|
def import_(repo, path, version=None):
'\n :param str repo: e.g. "github.com/rwth-i6/returnn-experiments"\n :param str path: path inside the repo, without starting "/"\n :param str|None version: e.g. "20211231-0123abcd0123". None for development working copy\n :rtype: object|types.ModuleType\n '
assert (path and (path[:1] != '/') and ('..' not in path))
repo_path = get_repo_path(repo=repo, version=version)
mod_name = module_name(repo=repo, repo_path=repo_path, path=path, version=version)
return importlib.import_module(mod_name)
|
class LearningRateControl(object):
'\n Base class for learning rate control / scheduling.\n '
need_error_info = True
class EpochData():
'\n Encapsulates all relevant information for one epoch,\n needed to perform learning rate scheduling,\n such as the individual scores (cv or train; cross-entropy or frame-error or whatever).\n '
def __init__(self, *, learning_rate: float=None, error: Optional[Dict[(str, float)]]=None, meta: Optional[Dict[(str, Any)]]=None, **kwargs):
'\n :param learning_rate:\n :param error: scores (loss values) and errors (frame error rates, etc)\n :param meta: any other extra information (e.g. effective learning rate)\n\n Note that this is serialized as EpochData(learningRate=..., error=...),\n and we keep that for compatibility,\n so that is why we have special handling for kwargs.\n '
if (learning_rate is None):
learning_rate = kwargs.pop('learningRate', None)
if (not isinstance(learning_rate, float)):
raise TypeError(f'EpochData: unexpected learning_rate type: {type(learning_rate)}')
if kwargs:
raise TypeError(f'EpochData: unexpected kwargs: {kwargs}')
self.learning_rate = learning_rate
if isinstance(error, float):
error = {'old_format_score': error}
elif (error is None):
error = {}
if (not isinstance(error, dict)):
raise TypeError(f'EpochData: unexpected error type: {type(error)}')
if (meta is None):
meta = {k[len(':meta:'):]: v for (k, v) in error.items() if k.startswith(':meta:')}
error = {k: v for (k, v) in error.items() if (not k.startswith(':meta:'))}
if (not isinstance(meta, dict)):
raise TypeError(f'EpochData: unexpected meta type: {type(meta)}')
if any((k for k in error if k.startswith(':meta:'))):
raise ValueError(f'EpochData: unexpected error keys: {error}')
self.error = error
self.meta = meta
def __repr__(self):
return ('EpochData(learningRate=%s, error=%s)' % (better_repr(self.learning_rate), better_repr(util.dict_joined(self.error, {f':meta:{k}': v for (k, v) in self.meta.items()}))))
@classmethod
def load_initial_kwargs_from_config(cls, config):
'\n :type config: returnn.config.Config\n :rtype: dict[str]\n '
return {'default_learning_rate': config.float('learning_rate', 1.0), 'min_learning_rate': config.float('min_learning_rate', 0.0), 'default_learning_rates': (config.typed_value('learning_rates') or config.float_list('learning_rates')), 'error_measure_key': (config.typed_value('learning_rate_control_error_measure') or config.value('learning_rate_control_error_measure', None)), 'relative_error_also_relative_to_learning_rate': config.bool('learning_rate_control_relative_error_relative_lr', False), 'min_num_epochs_per_new_learning_rate': config.int('learning_rate_control_min_num_epochs_per_new_lr', 0), 'relative_error_div_by_old': config.bool('newbob_relative_error_div_by_old', False), 'learning_rate_decay': config.typed_value('learning_rate_decay', config.opt_typed_value('newbob_learning_rate_decay', 0.5)), 'learning_rate_growth': config.typed_value('learning_rate_growth', config.opt_typed_value('newbob_learning_rate_growth', 1.0)), 'filename': config.value('learning_rate_file', None)}
@classmethod
def load_initial_from_config(cls, config):
'\n :type config: returnn.config.Config\n :rtype: LearningRateControl\n '
kwargs = cls.load_initial_kwargs_from_config(config)
return cls(**kwargs)
def __init__(self, default_learning_rate, min_learning_rate=0.0, default_learning_rates=None, error_measure_key=None, relative_error_also_relative_to_learning_rate=False, min_num_epochs_per_new_learning_rate=0, relative_error_div_by_old=False, learning_rate_decay=1.0, learning_rate_growth=1.0, filename=None):
'\n :param float default_learning_rate: default learning rate. usually for epoch 1\n :param list[float] | dict[int,float] default_learning_rates: learning rates\n :param str|list[str]|None error_measure_key: for get_epoch_error_value() the key for EpochData.error\n which is a dict\n :param int min_num_epochs_per_new_learning_rate: if the lr was recently updated, use it for at least N epochs\n :param bool relative_error_div_by_old: if True, compute relative error as (new - old) / old.\n :param float|(float)->float learning_rate_decay:\n :param float|(float)->float learning_rate_growth:\n :param str filename: load from and save to file\n '
self.epoch_data = {}
self.filename = filename
if filename:
if os.path.exists(filename):
print(('Learning-rate-control: loading file %s' % filename), file=log.v4)
self.load()
else:
print(('Learning-rate-control: file %s does not exist yet' % filename), file=log.v4)
else:
print('Learning-rate-control: no file specified, not saving history (no proper restart possible)', file=log.v4)
self.default_learning_rate = default_learning_rate
self.min_learning_rate = min_learning_rate
if default_learning_rates:
if isinstance(default_learning_rates, list):
default_learning_rates = {(i + 1): v for (i, v) in enumerate(default_learning_rates)}
if isinstance(default_learning_rates, (str, unicode)):
default_learning_rates = eval(default_learning_rates)
assert isinstance(default_learning_rates, dict)
for (epoch, v) in default_learning_rates.items():
self.set_default_learning_rate_for_epoch(epoch, v)
self.default_learning_rates = default_learning_rates
self.error_measure_key = error_measure_key
self.relative_error_also_relative_to_learning_rate = relative_error_also_relative_to_learning_rate
self.min_num_epochs_per_new_learning_rate = min_num_epochs_per_new_learning_rate
self.relative_error_div_by_old = relative_error_div_by_old
self.learning_rate_decay = learning_rate_decay
self.learning_rate_growth = learning_rate_growth
__repr__ = simple_obj_repr
def __str__(self):
epochs = sorted(self.epoch_data.keys())
if (len(epochs) > 6):
epoch_str = ', '.join((([('%i: %s' % (epoch, self.epoch_data[epoch])) for epoch in epochs[:3]] + ['...']) + [('%i: %s' % (epoch, self.epoch_data[epoch])) for epoch in epochs[(- 3):]]))
else:
epoch_str = ', '.join([('%i: %s' % (epoch, self.epoch_data[epoch])) for epoch in epochs])
return ('%r, epoch data: %s, error key: %s' % (self, epoch_str, self.get_error_key(epoch=1)))
@staticmethod
def _calc_learning_rate_update(learning_rate, update):
'\n :param float learning_rate:\n :param None|float|(float)->float update: factor, or generic func\n :return: lr with update applied (e.g. decay factor)\n :rtype: float\n '
if (update is None):
return learning_rate
if isinstance(update, float):
return (learning_rate * update)
assert callable(update)
learning_rate = update(learning_rate)
assert isinstance(learning_rate, float)
return learning_rate
def _calc_learning_rate_decay(self, learning_rate):
'\n :param float learning_rate:\n :return: lr with decay applied\n :rtype: float\n '
return self._calc_learning_rate_update(learning_rate, update=self.learning_rate_decay)
def _calc_learning_rate_growth(self, learning_rate):
'\n :param float learning_rate:\n :return: lr with growth applied\n :rtype: float\n '
return self._calc_learning_rate_update(learning_rate, update=self.learning_rate_growth)
def calc_learning_rate_decay_or_grow(self, learning_rate, decay, grow=None):
'\n :param float learning_rate:\n :param bool decay:\n :param bool|None grow: default is not decay\n :return: lr with decay or growth applied\n :rtype: float\n '
assert isinstance(decay, bool)
if (grow is None):
grow = (not decay)
assert isinstance(grow, bool)
assert (not (grow and decay))
if decay:
learning_rate = self._calc_learning_rate_decay(learning_rate)
if (learning_rate < self.min_learning_rate):
learning_rate = self.min_learning_rate
if grow:
learning_rate = self._calc_learning_rate_growth(learning_rate)
return learning_rate
def calc_learning_rate_for_epoch(self, epoch):
'\n :type epoch: int\n :returns learning rate\n :rtype: float\n '
raise NotImplementedError
def calc_new_learning_rate_for_epoch(self, epoch):
'\n :param int epoch:\n :return: new learning rate for this epoch\n :rtype: float\n '
if (self.min_num_epochs_per_new_learning_rate > 1):
last_lrs = [self.epoch_data[e].learning_rate for e in self._last_epochs_for_epoch(epoch, num_epochs=self.min_num_epochs_per_new_learning_rate)]
if ((len(set(last_lrs)) >= 2) or (0 < len(last_lrs) < self.min_num_epochs_per_new_learning_rate)):
return last_lrs[(- 1)]
learning_rate = self.calc_learning_rate_for_epoch(epoch)
return learning_rate
def _last_epochs_for_epoch(self, epoch, num_epochs):
'\n :param int epoch:\n :param int num_epochs:\n :return: last N epochs where we have some epoch data\n :rtype: list[int]\n '
last_epochs = sorted([e for e in self.epoch_data.keys() if (e < epoch)])
if (not last_epochs):
return []
last_epochs = last_epochs[(- num_epochs):]
return last_epochs
def get_learning_rate_for_epoch(self, epoch):
'\n :type epoch: int\n :rtype: float\n '
assert (epoch >= 1)
if (epoch in self.epoch_data):
return self.epoch_data[epoch].learning_rate
learning_rate = self.calc_new_learning_rate_for_epoch(epoch)
self.set_default_learning_rate_for_epoch(epoch, learning_rate)
return learning_rate
def set_default_learning_rate_for_epoch(self, epoch, learning_rate):
'\n :type epoch: int\n :type learning_rate: float\n '
if (epoch in self.epoch_data):
if (not self.epoch_data[epoch].learning_rate):
self.epoch_data[epoch].learning_rate = learning_rate
else:
self.epoch_data[epoch] = self.EpochData(learning_rate=learning_rate)
def get_last_epoch(self, epoch):
'\n :param int epoch:\n :return: last epoch before ``epoch`` where we have some epoch data\n :rtype: int\n '
epochs = sorted([e for e in self.epoch_data.keys() if (e < epoch)])
if (not epochs):
return None
return epochs[(- 1)]
def get_most_recent_learning_rate(self, epoch, exclude_current=True):
'\n :param int epoch:\n :param bool exclude_current:\n :return: most learning rate before or including ``epoch``\n :rtype: float\n '
for (e, data) in reversed(sorted(self.epoch_data.items())):
assert isinstance(data, LearningRateControl.EpochData)
if (e > epoch):
continue
if (exclude_current and (e == epoch)):
continue
if (data.learning_rate is None):
continue
return data.learning_rate
return self.default_learning_rate
def calc_relative_error(self, old_epoch, new_epoch):
'\n :param int old_epoch:\n :param int new_epoch:\n :return: relative error between old epoch and new epoch\n :rtype: float\n '
(old_key, old_error) = self.get_epoch_error_key_value(old_epoch)
(new_key, new_error) = self.get_epoch_error_key_value(new_epoch)
if ((old_error is None) or (new_error is None)):
return None
if (old_key != new_key):
return None
if self.relative_error_div_by_old:
relative_error = ((new_error - old_error) / abs(old_error))
else:
relative_error = ((new_error - old_error) / abs(new_error))
if self.relative_error_also_relative_to_learning_rate:
learning_rate = self.get_most_recent_learning_rate(new_epoch, exclude_current=False)
if (learning_rate > 0):
relative_error /= (learning_rate / self.default_learning_rate)
return relative_error
def set_epoch_error(self, epoch, error):
'\n :type epoch: int\n :type error: dict[str,float|dict[str,float]]\n '
if (epoch not in self.epoch_data):
print(('Learning rate not set for epoch %i. Assuming default.' % epoch), file=log.v4)
self.get_learning_rate_for_epoch(epoch)
assert isinstance(error, dict)
error = error.copy()
for (k, v) in list(error.items()):
if isinstance(v, dict):
del error[k]
if (len(v) == 1):
error[k] = list(v.values())[0]
continue
for (k1, v1) in v.items():
if (':' in k1):
k1 = k1[(k1.index(':') + 1):]
error[((k + '_') + k1)] = v1
for v in error.values():
assert isinstance(v, float)
self.epoch_data[epoch].error.update(error)
if (epoch == 1):
print(('Learning-rate-control: error key %r from %r' % (self.get_error_key(epoch), error)), file=log.v4)
def get_error_key(self, epoch):
'\n :param int epoch:\n :return: key which we should look in scores/errors, for this epoch\n :rtype: str\n '
if (epoch not in self.epoch_data):
if isinstance(self.error_measure_key, list):
return self.error_measure_key[0]
assert isinstance(self.error_measure_key, (str, type(None)))
return self.error_measure_key
epoch_data = self.epoch_data[epoch]
if (not epoch_data.error):
return None
if ((len(epoch_data.error) == 1) and ('old_format_score' in epoch_data.error)):
return 'old_format_score'
keys = []
if isinstance(self.error_measure_key, list):
for key in self.error_measure_key:
keys += [key, (key + '_output')]
elif isinstance(self.error_measure_key, str):
keys += [self.error_measure_key, (self.error_measure_key + '_output')]
else:
assert (self.error_measure_key is None)
keys += ['dev_score', 'dev_score_output']
for key in keys:
if (key in epoch_data.error):
return key
for key in sorted(epoch_data.error.keys()):
if ((key == 'dev_score_output/output') or key.startswith('dev_score_output/output_')):
return key
for key in sorted(epoch_data.error.keys()):
if key.startswith('dev_score_output/'):
return key
for key in sorted(epoch_data.error.keys()):
if key.startswith('dev_'):
return key
for key in ['train_score', 'train_score_output']:
if (key in epoch_data.error):
return key
return min(epoch_data.error.keys())
def get_epoch_error_dict(self, epoch):
'\n :param int epoch:\n :rtype: dict[str,float]\n '
if (epoch not in self.epoch_data):
return {}
return self.epoch_data[epoch].error
def get_epoch_error_value(self, epoch):
'\n :param int epoch:\n :return: error/score for the specific epoch, given the error-key, see :func:`get_error_key`\n :rtype: float\n '
error = self.get_epoch_error_dict(epoch)
if (not error):
return None
key = self.get_error_key(epoch)
assert key
assert (key in error), ('%r not in %r. fix %r in config. set it to %r or so.' % (key, error, 'learning_rate_control_error_measure', 'dev_error'))
return error[key]
def get_epoch_error_key_value(self, epoch):
'\n :param int epoch:\n :return: key, error\n :rtype: (str, float)\n '
error = self.get_epoch_error_dict(epoch)
if (not error):
return (None, None)
key = self.get_error_key(epoch)
assert key
assert (key in error), ('%r not in %r. fix %r in config. set it to %r or so.' % (key, error, 'learning_rate_control_error_measure', 'dev_error'))
return (key, error[key])
def get_last_best_epoch(self, last_epoch, first_epoch=1, only_last_epochs=None, filter_score=float('inf'), only_last_n=(- 1), min_score_dist=0.0):
'\n :param int first_epoch: will check all epochs >= first_epoch\n :param int last_epoch: inclusive. will check all epochs <= last_epoch\n :param int|None only_last_epochs: if set, will only check the last N epochs, inclusive\n :param float filter_score: all epochs which values over this score are not considered\n :param int only_last_n: if set (>=1), *from the resulting list*, we consider only the last only_last_n\n :param float min_score_dist: filter out epochs where the diff to the most recent is not big enough\n :return: the last best epoch. to get the details then, you might want to use getEpochErrorDict.\n :rtype: int|None\n '
if only_last_epochs:
first_epoch = max(first_epoch, ((last_epoch - only_last_epochs) + 1))
if (first_epoch > last_epoch):
return None
values = [(self.get_epoch_error_key_value(ep), ep) for ep in range(first_epoch, (last_epoch + 1))]
values = [((key, v), ep) for ((key, v), ep) in values if (v is not None)]
if (not values):
return None
(last_key, latest_score) = values[(- 1)][0]
values = [(v, ep) for ((key, v), ep) in values if (key == last_key)]
values = [(v, ep) for (v, ep) in values if (v <= filter_score)]
if (not values):
return None
if (only_last_n >= 1):
values = values[(- only_last_n):]
values = [(v, ep) for (v, ep) in values if ((v + min_score_dist) < latest_score)]
if (not values):
return None
return min(values)[1]
def save(self):
'\n Save the current epoch data to file (self.filename).\n '
if (not self.filename):
return
directory = os.path.dirname(self.filename)
if (directory and (not os.path.exists(directory))):
os.makedirs(directory, exist_ok=True)
tmp_filename = (self.filename + '.new_tmp')
f = open(tmp_filename, 'w')
f.write(better_repr(self.epoch_data))
f.write('\n')
f.close()
os.rename(tmp_filename, self.filename)
def load(self):
'\n Loads the saved epoch data from file (self.filename).\n '
s = open(self.filename).read()
self.epoch_data = eval(s, {'nan': float('nan'), 'inf': float('inf')}, ObjAsDict(self))
|
class ConstantLearningRate(LearningRateControl):
'\n Just a constant learning rate.\n '
need_error_info = False
def calc_learning_rate_for_epoch(self, epoch):
'\n Dummy constant learning rate. Returns initial learning rate.\n :type epoch: int\n :returns learning rate\n :rtype: float\n '
while True:
last_epoch = self.get_last_epoch(epoch)
if (last_epoch is None):
return self.default_learning_rate
learning_rate = self.epoch_data[last_epoch].learning_rate
if (learning_rate is None):
epoch = last_epoch
continue
return learning_rate
|
class NewbobRelative(LearningRateControl):
'\n If relative diff between old and new error is over some threshold, decay learning rate.\n '
@classmethod
def load_initial_kwargs_from_config(cls, config):
'\n :type config: returnn.config.Config\n :rtype: dict[str]\n '
kwargs = super(NewbobRelative, cls).load_initial_kwargs_from_config(config)
kwargs.update({'relative_error_threshold': config.float('newbob_relative_error_threshold', (- 0.01))})
return kwargs
def __init__(self, relative_error_threshold, **kwargs):
'\n :type relative_error_threshold: float\n '
super(NewbobRelative, self).__init__(**kwargs)
self.relative_error_threshold = relative_error_threshold
def calc_learning_rate_for_epoch(self, epoch):
'\n Newbob+ on train data.\n :type epoch: int\n :returns learning rate\n :rtype: float\n '
last_epoch = self.get_last_epoch(epoch)
if (last_epoch is None):
return self.default_learning_rate
learning_rate = self.epoch_data[last_epoch].learning_rate
if (learning_rate is None):
return self.default_learning_rate
last2_epoch = self.get_last_epoch(last_epoch)
if (last2_epoch is None):
return learning_rate
relative_error = self.calc_relative_error(last2_epoch, last_epoch)
if (relative_error is None):
return learning_rate
learning_rate = self.calc_learning_rate_decay_or_grow(learning_rate, decay=(relative_error > self.relative_error_threshold))
return learning_rate
|
class NewbobAbs(LearningRateControl):
'\n If absolute diff between old and new error is over some threshold, decay learning rate.\n '
@classmethod
def load_initial_kwargs_from_config(cls, config):
'\n :type config: returnn.config.Config\n :rtype: dict[str]\n '
kwargs = super(NewbobAbs, cls).load_initial_kwargs_from_config(config)
kwargs.update({'error_threshold': config.float('newbob_error_threshold', (- 0.01))})
return kwargs
def __init__(self, error_threshold, **kwargs):
'\n :type error_threshold: float\n '
super(NewbobAbs, self).__init__(**kwargs)
self.error_threshold = error_threshold
def calc_learning_rate_for_epoch(self, epoch):
'\n Newbob+ on train data.\n\n :type epoch: int\n :returns learning rate\n :rtype: float\n '
last_epoch = self.get_last_epoch(epoch)
if (last_epoch is None):
return self.default_learning_rate
learning_rate = self.epoch_data[last_epoch].learning_rate
if (learning_rate is None):
return self.default_learning_rate
last2_epoch = self.get_last_epoch(last_epoch)
if (last2_epoch is None):
return learning_rate
(old_key, old_error) = self.get_epoch_error_key_value(last2_epoch)
(new_key, new_error) = self.get_epoch_error_key_value(last_epoch)
if ((old_error is None) or (new_error is None)):
return learning_rate
if (old_key != new_key):
return learning_rate
error_diff = (new_error - old_error)
learning_rate = self.calc_learning_rate_decay_or_grow(learning_rate, decay=(error_diff > self.error_threshold))
return learning_rate
|
class NewbobMultiEpoch(LearningRateControl):
'\n Like :class:`NewbobRelative`, but looks at the average relative error over multiple epochs.\n This is useful together with ``partition_epoch`` from :class:`Dataset`.\n '
@classmethod
def load_initial_kwargs_from_config(cls, config):
'\n :type config: returnn.config.Config\n :rtype: dict[str]\n '
kwargs = super(NewbobMultiEpoch, cls).load_initial_kwargs_from_config(config)
kwargs.update({'num_epochs': config.int('newbob_multi_num_epochs', 5), 'update_interval': config.int('newbob_multi_update_interval', config.int('newbob_multi_num_epochs', 5)), 'relative_error_threshold': config.float('newbob_relative_error_threshold', (- 0.01)), 'relative_error_grow_threshold': config.float('newbob_relative_error_grow_threshold', config.float('newbob_relative_error_threshold', (- 0.01)))})
return kwargs
def __init__(self, num_epochs, update_interval, relative_error_threshold, relative_error_grow_threshold, **kwargs):
'\n :param int num_epochs:\n :param int update_interval:\n :param float relative_error_threshold:\n :param float relative_error_grow_threshold:\n '
super(NewbobMultiEpoch, self).__init__(**kwargs)
self.num_epochs = num_epochs
assert (self.num_epochs >= 1)
self.update_interval = update_interval
assert (self.update_interval >= 1)
self.relative_error_threshold = relative_error_threshold
self.relative_error_grow_threshold = relative_error_grow_threshold
def _calc_mean_relative_error(self, epochs):
'\n :param list[int] epochs:\n :return: mean of relative errors\n :rtype: float|None\n '
assert (len(epochs) >= 2)
errors = [self.calc_relative_error(epochs[i], epochs[(i + 1)]) for i in range((len(epochs) - 1))]
if any([(e is None) for e in errors]):
return None
return float(numpy.mean(errors))
def _calc_recent_mean_relative_error(self, epoch):
'\n :param int epoch:\n :return: recent mean of relative errors\n :rtype: float|None\n '
last_epochs = self._last_epochs_for_epoch(epoch, num_epochs=(self.num_epochs + 1))
if (not last_epochs):
return None
if (len(last_epochs) <= 1):
return None
return self._calc_mean_relative_error(last_epochs)
def calc_learning_rate_for_epoch(self, epoch):
'\n Newbob+ on train data.\n :type epoch: int\n :returns learning rate\n :rtype: float\n '
learning_rate = self.get_most_recent_learning_rate(epoch)
if ((self.update_interval > 1) and ((epoch % self.update_interval) != 1)):
return learning_rate
mean_relative_error = self._calc_recent_mean_relative_error(epoch)
if (mean_relative_error is None):
return learning_rate
learning_rate = self.calc_learning_rate_decay_or_grow(learning_rate, decay=(mean_relative_error > self.relative_error_threshold), grow=(mean_relative_error < self.relative_error_grow_threshold))
return learning_rate
|
def learning_rate_control_type(type_name):
'\n :param str type_name:\n :rtype: type[LearningRateControl]|LearningRateControl\n '
if (type_name == 'constant'):
return ConstantLearningRate
elif (type_name in ('newbob', 'newbob_rel', 'newbob_relative')):
return NewbobRelative
elif (type_name == 'newbob_abs'):
return NewbobAbs
elif (type_name == 'newbob_multi_epoch'):
return NewbobMultiEpoch
else:
assert False, ('unknown learning-rate-control type %s' % type_name)
|
def load_learning_rate_control_from_config(config):
'\n :type config: returnn.config.Config\n :rtype: LearningRateControl\n '
control_type = config.value('learning_rate_control', 'constant')
cls = learning_rate_control_type(control_type)
return cls.load_initial_from_config(config)
|
def demo():
'\n Demo run. Given some learning rate file (with scores / existing lrs), will calculate how lrs would have been set,\n given some config.\n '
from returnn.util import better_exchook
better_exchook.install()
import returnn.__main__ as rnn
import sys
if (len(sys.argv) <= 1):
print(('usage: python %s [config] [other options] [++check_learning_rates 1]' % __file__))
print(('example usage: python %s ++learning_rate_control newbob ++learning_rate_file newbob.data ++learning_rate 0.001' % __file__))
rnn.init_config(command_line_options=sys.argv[1:])
rnn.config._hack_value_reading_debug()
rnn.config.update({'log': []})
rnn.init_log()
rnn.init_backend_engine()
check_lr = rnn.config.bool('check_learning_rates', False)
from returnn.pretrain import pretrain_from_config
pretrain = pretrain_from_config(rnn.config)
first_non_pretrain_epoch = 1
pretrain_learning_rate = None
if pretrain:
first_non_pretrain_epoch = (pretrain.get_train_num_epochs() + 1)
log.initialize(verbosity=[5])
control = load_learning_rate_control_from_config(rnn.config)
print(('LearningRateControl: %r' % control))
if (not control.epoch_data):
print('No epoch data so far.')
return
first_epoch = min(control.epoch_data.keys())
if (first_epoch != 1):
print(('Strange, first epoch from epoch data is %i.' % first_epoch))
print(('Error key: %s from %r' % (control.get_error_key(epoch=first_epoch), control.epoch_data[first_epoch].error)))
if pretrain:
pretrain_learning_rate = rnn.config.float('pretrain_learning_rate', control.default_learning_rate)
max_epoch = max(control.epoch_data.keys())
for epoch in range(1, (max_epoch + 2)):
old_learning_rate = None
if (epoch in control.epoch_data):
old_learning_rate = control.epoch_data[epoch].learning_rate
if (epoch < first_non_pretrain_epoch):
learning_rate = pretrain_learning_rate
s = ('Pretrain epoch %i, fixed learning rate: %s (was: %s)' % (epoch, learning_rate, old_learning_rate))
elif (1 < first_non_pretrain_epoch == epoch):
learning_rate = control.default_learning_rate
s = ('First epoch after pretrain, epoch %i, fixed learning rate: %s (was %s)' % (epoch, learning_rate, old_learning_rate))
else:
learning_rate = control.calc_new_learning_rate_for_epoch(epoch)
s = ('Calculated learning rate for epoch %i: %s (was: %s)' % (epoch, learning_rate, old_learning_rate))
if (learning_rate < control.min_learning_rate):
learning_rate = control.min_learning_rate
s += (', clipped to %s' % learning_rate)
s += (', previous relative error: %s' % control.calc_relative_error((epoch - 2), (epoch - 1)))
if hasattr(control, '_calc_recent_mean_relative_error'):
s += (', previous mean relative error: %s' % control._calc_recent_mean_relative_error(epoch))
print(s)
if (check_lr and (old_learning_rate is not None)):
if (old_learning_rate != learning_rate):
print(('Learning rate is different in epoch %i!' % epoch))
sys.exit(1)
if (epoch in control.epoch_data):
control.epoch_data[epoch].learning_rate = learning_rate
else:
control.epoch_data[epoch] = control.EpochData(learning_rate=learning_rate)
print(('Finished, last stored epoch was %i.' % max_epoch))
|
class Stream():
'\n Simple stream wrapper, which provides :func:`write` and :func:`flush`.\n '
def __init__(self, log, lvl):
'\n :type log: logging.Logger\n :type lvl: int\n '
self.buf = io.StringIO()
self.log = log
self.lvl = lvl
self.lock = RLock()
def write(self, msg):
'\n :param str msg:\n '
with self.lock:
if (msg == '\n'):
self.flush()
else:
self.buf.write(msg)
def flush(self):
'\n Flush, i.e. writes to the log.\n '
with self.lock:
self.buf.flush()
self.log.log(self.lvl, self.buf.getvalue())
self.buf.truncate(0)
self.buf.seek(0)
|
class Log():
'\n The main logging class.\n '
def __init__(self):
self.initialized = False
self.filename = None
self.v = None
self.verbose = None
self.v1 = None
self.v2 = None
self.v3 = None
self.v4 = None
self.v5 = None
self._printed_warning_history = set()
def initialize(self, logs=None, verbosity=None, formatter=None, propagate=False):
'\n This resets and configures the "returnn" logger.\n\n :param list[str|logging.Handler] logs: "stdout", "|<pipe-cmd>", "<filename>"|"<filename>$date<ext>".\n "stdout" is always added when propagate=False.\n :param list[int] verbosity: levels 0-5 for the log handlers\n :param list[str] formatter: \'default\', \'timed\', \'raw\' or \'verbose\', for the log handlers\n :param bool propagate:\n '
if (formatter is None):
formatter = []
if (verbosity is None):
verbosity = []
if (logs is None):
logs = []
self.initialized = True
fmt = {'default': logging.Formatter('%(message)s'), 'timed': logging.Formatter('%(asctime)s %(message)s', datefmt='%Y-%m-%d,%H:%M:%S.%MS'), 'raw': logging.Formatter('%(message)s'), 'verbose': logging.Formatter('%(levelname)s - %(asctime)s %(message)s', datefmt='%Y-%m-%d,%H:%M:%S.%MS')}
logger = logging.getLogger('returnn')
logger.propagate = propagate
logger.handlers = []
self.v = ([logger] * 6)
if (('stdout' not in logs) and (not propagate)):
logs.append('stdout')
if (len(formatter) == 1):
formatter = ([formatter[0]] * len(logs))
logging.addLevelName((logging.DEBUG + 2), 'DEBUG')
logging.addLevelName((logging.DEBUG + 1), 'DEBUG')
logging.addLevelName((logging.DEBUG + 0), 'DEBUG')
logging.addLevelName((logging.INFO + 1), 'INFO')
logging.addLevelName((logging.INFO + 0), 'INFO')
_VerbosityToLogLevel = {0: logging.ERROR, 1: (logging.INFO + 1), 2: logging.INFO, 3: (logging.DEBUG + 2), 4: (logging.DEBUG + 1), 5: logging.DEBUG}
self.verbose = ([False] * 6)
for i in range(len(logs)):
t = logs[i]
v = 3
if (i < len(verbosity)):
v = verbosity[i]
elif (len(verbosity) == 1):
v = verbosity[0]
assert (v <= 5), ('invalid verbosity: ' + str(v))
for j in range((v + 1)):
self.verbose[j] = True
f = (fmt['default'] if ((i >= len(formatter)) or (formatter[i] not in fmt)) else fmt[formatter[i]])
if isinstance(t, logging.Handler):
handler = t
elif (t == 'stdout'):
handler = StdoutHandler()
elif t.startswith('|'):
proc_cmd = t[1:].strip()
from subprocess import Popen, PIPE
proc = Popen(proc_cmd, shell=True, stdin=PIPE)
handler = logging.StreamHandler(proc.stdin)
elif os.path.isdir(os.path.dirname(t)):
if ('$' in t):
from returnn.util.basic import get_utc_start_time_filename_part
t = string.Template(t).substitute(date=get_utc_start_time_filename_part())
self.filename = t
handler = logging.FileHandler(t)
else:
assert False, ('invalid log target %r' % t)
handler.setLevel(_VerbosityToLogLevel[v])
handler.setFormatter(f)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if (not logger.handlers):
logger.addHandler(logging.NullHandler())
self.v1 = Stream(self.v[1], _VerbosityToLogLevel[1])
self.v2 = Stream(self.v[2], _VerbosityToLogLevel[2])
self.v3 = Stream(self.v[3], _VerbosityToLogLevel[3])
self.v4 = Stream(self.v[4], _VerbosityToLogLevel[4])
self.v5 = Stream(self.v[5], _VerbosityToLogLevel[5])
def init_by_config(self, config):
'\n :param returnn.config.Config config:\n '
logs = config.list('log', [])
log_verbosity = config.int_list('log_verbosity', [])
log_format = config.list('log_format', [])
if (config.typed_value('torch_distributed') is not None):
import returnn.torch.distributed
torch_distributed = returnn.torch.distributed.get_ctx(config=config)
new_logs = []
for fn in logs:
(fn_prefix, fn_ext) = os.path.splitext(fn)
fn_ext = ('.torch-distrib-%i-%i%s' % (torch_distributed.rank(), torch_distributed.size(), fn_ext))
new_logs.append((fn_prefix + fn_ext))
logs = new_logs
elif config.is_true('use_horovod'):
assert (config.bool('use_tensorflow', False) or config.value('backend', '').startswith('tensorflow'))
import returnn.tf.horovod
hvd = returnn.tf.horovod.get_ctx(config=config)
new_logs = []
for fn in logs:
(fn_prefix, fn_ext) = os.path.splitext(fn)
fn_ext = ('.horovod-%i-%i%s' % (hvd.rank(), hvd.size(), fn_ext))
new_logs.append((fn_prefix + fn_ext))
logs = new_logs
self.initialize(logs=logs, verbosity=log_verbosity, formatter=log_format)
def print_warning(self, text, prefix_text='WARNING:', extra_text=None):
'\n Write a warning to log.v2. Does not write repeated warnings.\n\n :param str text:\n :param str prefix_text:\n :param str|None extra_text:\n '
if (text in self._printed_warning_history):
return
self._printed_warning_history.add(text)
print(prefix_text, text, file=log.v2)
if extra_text:
print(extra_text, file=log.v2)
def print_deprecation_warning(self, text, behavior_version=None):
'\n Write a deprecation warning to log.v2. Does not write repeated warnings.\n\n :param str text:\n :param int|None behavior_version: if this deprecation is already covered by a behavior_version check\n '
if behavior_version:
behavior_text = ('This will be disallowed with behavior_version %d.' % behavior_version)
else:
behavior_text = 'This might be disallowed with a future behavior_version.'
self.print_warning(text, prefix_text='DEPRECATION WARNING:', extra_text=behavior_text)
def flush(self):
'\n Flush all streams.\n '
for stream in [self.v1, self.v2, self.v3, self.v4, self.v5]:
if stream:
stream.flush()
|
class StdoutHandler(logging.StreamHandler):
'\n This class is like a StreamHandler using sys.stdout, but always uses\n whatever sys.stdout is currently set to rather than the value of\n sys.stdout at handler construction time.\n\n Copied and adopted from logging._StderrHandler.\n '
@property
def stream(self):
'\n stream\n '
return sys.stdout
@stream.setter
def stream(self, stream):
pass
|
class StreamThreadLocal(threading.local):
'\n This will just buffer everything, thread-locally, and not forward it to any stream.\n The idea is that multiple tasks will run in multiple threads and you want to catch all the logging/stdout\n of each to not clutter the output, and also you want to keep it separate for each.\n '
def __init__(self):
self.buf = io.StringIO()
def write(self, msg):
'\n :param str msg:\n '
self.buf.write(msg)
def flush(self):
'\n Ignored.\n '
|
class StreamDummy():
'\n This will just discard any data.\n '
def write(self, msg):
'\n Ignored.\n\n :param str msg:\n '
pass
def flush(self):
'\n Ignored.\n '
|
@contextlib.contextmanager
def wrap_log_streams(alternative_stream, also_sys_stdout=False, tf_log_verbosity=None):
'\n :param StreamThreadLocal|StreamDummy alternative_stream:\n :param bool also_sys_stdout: wrap sys.stdout as well\n :param int|str|None tf_log_verbosity: e.g. "WARNING"\n :return: context manager which yields (original info stream v1, alternative_stream)\n '
v_attrib_keys = ([('v%i' % i) for i in range(6)] + ['error'])
orig_v_list = log.v
orig_v_attribs = {key: getattr(log, key) for key in v_attrib_keys}
orig_stdout = sys.stdout
log.v = ([alternative_stream] * len(orig_v_list))
for key in v_attrib_keys:
setattr(log, key, alternative_stream)
if also_sys_stdout:
sys.stdout = alternative_stream
orig_tf_log_verbosity = None
if (tf_log_verbosity is not None):
import returnn.tf.compat as tf_compat
orig_tf_log_verbosity = tf_compat.v1.logging.get_verbosity()
tf_compat.v1.logging.set_verbosity(tf_log_verbosity)
try:
(yield (orig_v_attribs['v1'], alternative_stream))
finally:
log.v = orig_v_list
for (key, value) in orig_v_attribs.items():
setattr(log, key, value)
if also_sys_stdout:
sys.stdout = orig_stdout
if (tf_log_verbosity is not None):
import returnn.tf.compat as tf_compat
tf_compat.v1.logging.set_verbosity(orig_tf_log_verbosity)
|
class NativeOpBaseMixin(object):
'\n The purpose of having this as a separate base class is to make this independent of any Theano specific\n functionality so that we can also use this base for example for TensorFlow.\n '
def __init__(self, in_info, out_info, c_fw_code, c_bw_code=None, c_extra_support_code=None, code_version=None, cpu_support=True, grad_input_map=None, name=None):
'\n :param list[dict(str)] in_info: each dict describes one input var.\n attribs in the dict:\n int ndim: the ndim.\n tuple shape: tuple and can contain None for specific dimensions.\n optional attribs:\n str dtype: "float32" by default.\n bool need_contiguous: false by default.\n int want_inplace: -1 by default. try to optimize to destroy input, on output-index.\n "dummy_out" is a special value which will add another output.\n bool is_inplace: false by default. whether the optimization was applied.\n str gradient: can be "disconnected". see grad().\n bool bw_input: True by default. add this param to the bw input.\n other attribs are just ignored.\n :param list[dict(str)] out_info: like in_info.\n slightly different behavior for:\n shape: we also allow refs to the in_info in the form (in-idx,dim). see infer_shape().\n need_contiguous/want_inplace: used for bw, in case for bw_input == True.\n :param str c_fw_code: C code for forward pass\n :param str|dict[str] c_extra_support_code: C support code (for c_support_code)\n :param str|None c_bw_code: C code for backward pass (for gradient)\n :param tuple[int] code_version: will be returned by c_code_cache_version.\n :param bool cpu_support:\n :param tuple[int]|callable grad_input_map: selection of grad inputs.\n by default, we get all inputs + all outputs + all grad outputs.\n :param str name: name\n '
assert isinstance(in_info, (list, tuple))
assert isinstance(out_info, (list, tuple))
(in_info, out_info, num_dummy_outs) = self._resolve_want_inplace_dummy(in_info, out_info)
self.in_info = make_hashable(in_info)
self.out_info = make_hashable(out_info)
self.num_dummy_outs = num_dummy_outs
self.c_fw_code = c_fw_code
self.c_bw_code = c_bw_code
self.c_extra_support_code = self._reduce_c_extra_support_code(c_extra_support_code)
self.code_version = (code_version or ())
self.cpu_support = cpu_support
self.name = (name or '<anonNativeOp>')
self.grad_input_map = self._convert_grad_input_map(grad_input_map, (len(in_info) + (len(out_info) * 2)))
@classmethod
def _resolve_want_inplace_dummy(cls, in_info, out_info):
in_info = [dict(info) for info in in_info]
out_info = list(out_info)
num_dummy_outs = 0
for (in_idx, info) in enumerate(in_info):
if (info.get('want_inplace', None) == 'dummy_out'):
num_dummy_outs += 1
dummy_out_idx = len(out_info)
dummy_out = {'ndim': info['ndim'], 'shape': [(in_idx, i) for i in range(info['ndim'])], 'dtype': info.get('dtype', 'float32'), 'name': ('dummy_out_%i' % num_dummy_outs)}
out_info += [dummy_out]
info['want_inplace'] = dummy_out_idx
return (in_info, out_info, num_dummy_outs)
@classmethod
def _reduce_c_extra_support_code(cls, c):
if (c is None):
return ''
if isinstance(c, dict):
c = [v for (k, v) in sorted(c.items())]
if isinstance(c, (list, tuple)):
c = '\n'.join([(v + '\n\n') for v in c])
assert isinstance(c, (str, unicode))
return c
@classmethod
def _convert_grad_input_map(cls, gi_map, num_params):
'\n :param gi_map: see grad_input_map argument for self.__init__\n :param int num_params:\n :return: tuple of int\n :rtype: tuple[int]\n '
if (gi_map is None):
gi_map = tuple(range(num_params))
if callable(gi_map):
gi_map = gi_map(*range(num_params))
if isinstance(gi_map, list):
gi_map = tuple(gi_map)
assert isinstance(gi_map, tuple)
return gi_map
def _filter_grad_inputs(self, inputs):
'\n :param list[T] inputs: inputs + outputs + output_grads. can be either symbolic tensors or info dicts\n :return: filtered list, via self.grad_input_map\n :rtype: list[T]\n '
assert (len(inputs) == (len(self.in_info) + (len(self.out_info) * 2)))
return [inputs[i] for i in self.grad_input_map]
def infer_shape(self, node, input_shapes):
'\n :param node:\n :param input_shapes:\n :rtype: list[tuple[int]]\n '
assert (len(input_shapes) == len(self.in_info))
out_shapes = []
for info in self.out_info:
out_shape = list(info['shape'])
for (idx, s) in enumerate(out_shape):
if isinstance(s, tuple):
assert (len(s) == 2), ('dim %r invalid in info %r' % (s, info))
assert (0 <= s[0] < len(input_shapes)), ('dim %r invalid in info %r' % (s, info))
assert (0 <= s[1] < self.in_info[s[0]]['ndim']), ('dim idx %r invalid in input %i %r, info %r' % (s[1], s[0], self.in_info[s[0]], info))
out_shape[idx] = input_shapes[s[0]][s[1]]
assert (not any([(s is None) for s in out_shape])), ('out_shape %r, out_info %r' % (out_shape, self.out_info))
out_shapes += [tuple(out_shape)]
return out_shapes
@classmethod
def _bw_in_var_info(cls, info):
'\n :param dict[str] info:\n :return: updated info dict for the gradient (bwd) as input\n :rtype: dict[str]\n '
if ('bw_in_var' in info):
info = dict(info)
info.update(info.pop('bw_in_var'))
return info
@classmethod
def _bw_grad_var_info(cls, info):
'\n :param dict[str] info: backward gradient input for one of our outputs\n :return: updated info dict for the gradient (bwd) as input\n :rtype: dict[str]\n '
info = dict(info)
if ('bw_grad_var' in info):
info.update(info.pop('bw_grad_var'))
if ('name' in info):
info['name'] = ('D_' + info['name'])
return info
def kwargs_for_grad_op(self):
'\n :returns: the kwargs for creating a NativeOp for the gradient op. e.g. includes in_info, out_info, etc\n :rtype: dict[str]\n\n Note: The inputs of the gradient are by default: fwd_op.inputs + fwd_op.outputs + output_grads.\n We filter them via self._filter_grad_inputs.\n '
in_info = [self._bw_in_var_info(info) for info in self.in_info]
in_info += [self._bw_in_var_info(info) for info in self.out_info]
in_info += [self._bw_grad_var_info(info) for info in self.out_info]
in_info = self._filter_grad_inputs(in_info)
in_idx_rev = {v: k for (k, v) in enumerate(self.grad_input_map)}
out_info = [info.copy() for info in self.in_info]
for (idx, info) in enumerate(out_info):
info.pop('shape')
if ('bw_out_var' in info):
info.update(info['bw_out_var'])
if ('shape' not in info):
info['shape'] = [(in_idx_rev[idx], i) for i in range(info['ndim'])]
out_info = [info for info in out_info if (info.get('gradient', '') != 'disconnected')]
return dict(name=('GradOf%s' % self.name), in_info=in_info, out_info=out_info, c_fw_code=self.c_bw_code, c_extra_support_code=self.c_extra_support_code, code_version=self.code_version, cpu_support=self.cpu_support)
def make_results_of_gradient(self, grad_op_outputs, disconnected_type=None):
'\n :param list[T]|tuple[T] grad_op_outputs: this is already with dummy outputs removed\n :param S disconnected_type:\n :return: gradient for each input of our op\n :rtype: list[T|S]\n '
if (disconnected_type is None):
def disconnected_type():
'Dummy'
grad_op_outputs = list(grad_op_outputs)
results = []
for info in self.in_info:
if (info.get('gradient', '') == 'disconnected'):
results += [disconnected_type()]
else:
results += grad_op_outputs[:1]
grad_op_outputs = grad_op_outputs[1:]
assert (len(grad_op_outputs) == 0)
assert (len(results) == len(self.in_info))
return results
|
class NativeOpGenBase():
'\n Base interface for op generation.\n See NativeOp.__init__() for attribs.\n '
in_info = None
out_info = None
c_fw_code = None
c_bw_code = None
c_extra_support_code = None
code_version = None
grad_input_map = None
theano_custom_grad = None
cpu_support = True
@classmethod
def map_layer_inputs_to_op(cls, *inputs):
'\n :param inputs:\n :return: inputs\n '
return inputs
@classmethod
def map_layer_output_from_op(cls, *outputs):
'\n :param outputs:\n :return: outputs[0]\n '
return outputs[0]
|
class LstmGenericBase(NativeOpGenBase):
'\n inputs:\n :param Z: {input,output,forget} gate + cell state. 3d (time,batch,dim*4)\n :param V_h: recurrent matrix. 2d (dim,dim*4)\n :param c: initial cell state. 2d (batch,dim)\n :param i: index. 2d (time,batch) -> 0 or 1\n outputs:\n :param Y: output. 3d (time,batch,dim)\n :param H: gates and cell state. 3d (time,batch,dim*4)\n :param d: final cell state. 2d (batch,dim)\n '
in_info = ({'name': 'Z', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True, 'want_inplace': 1, 'bw_out_var': {'shape': ((2, 0), (2, 1), (0, 1))}}, {'name': 'V_h', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'c', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'i', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'Y', 'ndim': 3, 'shape': ((0, 0), (0, 1), (1, 0)), 'need_contiguous': True, 'bw_grad_var': {'want_inplace': 'dummy_out'}}, {'name': 'H', 'ndim': 3, 'shape': ((0, 0), (0, 1), (0, 2)), 'need_contiguous': True, 'bw_in_var': {'want_inplace': 0}}, {'name': 'd', 'ndim': 2, 'shape': ((2, 0), (2, 1)), 'need_contiguous': True})
@classmethod
def grad_input_map(cls, Z, V_h, c, i, Y, H, d, DY, DH, Dd):
'\n Map grads.\n '
return (V_h, c, i, Y, H, DY, Dd)
c_extra_support_code = {'lstm_kernel': '\n DEF_KERNEL\n void lstm_kernel(float* data, const float* old_state, bool old_state_strided,\n float* output, float* state_out, int n_cells, int n_batch, const float* i) {\n //layout:\n //data[0*n_cells..1*n_cells-1] : cell state\n //data[1*n_cells..2*n_cells-1] : input gate\n //data[2*n_cells..3*n_cells-1] : forget gate\n //data[3*n_cells..4*n_cells-1] : output gate\n //output[0*n_cells..1*n_cells-1]: cell output\n //repeated for every mini-batch\n\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_cells * n_batch) {\n int batch_idx = idx / n_cells;\n int start = batch_idx * 4 * n_cells + idx % n_cells;\n float i_batch = i[batch_idx];\n\n //input, forget and output gates\n float inpGate = 1.f / (1.f + expf(-data[start + n_cells]));\n float fgtGate = 1.f / (1.f + expf(-data[start + 2 * n_cells]));\n float outGate = 1.f / (1.f + expf(-data[start + 3 * n_cells]));\n float state = inpGate * tanhf(data[start]);\n float old_state_batch = old_state_strided ? old_state[start] : old_state[idx];\n\n state += fgtGate * old_state_batch;\n state = state * i_batch + old_state_batch * (1.f - i_batch);\n\n //cell output\n output[idx] = outGate * tanhf(state) * i_batch;\n\n data[start] = state;\n data[start + n_cells] = inpGate;\n data[start + 2 * n_cells] = fgtGate;\n data[start + 3 * n_cells] = outGate;\n if(state_out)\n state_out[idx] = state;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', 'lstm_bwd_kernel': '\n DEF_KERNEL\n void lstm_bwd_kernel(\n float* delta, float* epsilon, const float* next_epsilon, const float* old_state,\n bool old_state_strided, const float* Y, int n_cells, int n_batch, const float* i) {\n //layout:\n //delta[0*n_cells..1*n_cells-1] : input gate\n //delta[1*n_cells..2*n_cells-1] : forget gate\n //delta[2*n_cells..3*n_cells-1] : output gate\n //delta[3*n_cells..4*n_cells-1] : cell state\n //epsilon[0*n_cells..1*n_cells-1]: cell output derivative (later overwritten, see below)\n //next_epsilon[0*n_cells..1*n_cells-1]: cell state derivative * forget_gate (of next timestep)\n //repeated for every mini-batch\n\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_cells * n_batch) {\n int batch_idx = idx / n_cells;\n int batch_offset = batch_idx * 4 * n_cells;\n int cell_offset = idx % n_cells;\n int start = batch_offset + cell_offset;\n float i_batch = i[batch_idx];\n\n float inpGate = delta[start + n_cells];\n float fgtGate = delta[start + 2 * n_cells];\n float outGate = delta[start + 3 * n_cells];\n float oldState = old_state_strided ? old_state[start] : old_state[idx];\n float state = delta[start];\n float eps = epsilon[idx];\n\n //avoid division by 0\n float gc = tanhf(state); //g(c(t))\n float gzc = (state - fgtGate * oldState) / fmaxf(inpGate, float(1e-16)); //g(z_c(t))\n\n //delta_output\n delta[start + 3 * n_cells] = outGate * (1.f - outGate) * gc * eps * i_batch;\n\n //epsilon_c\n float epsilon_c = (1.f - (gc * gc)) * outGate * eps;\n epsilon_c += next_epsilon[idx];\n epsilon[idx] = epsilon_c * fgtGate * i_batch + next_epsilon[idx] * (1.f - i_batch);\n\n //delta_cell\n delta[start] = inpGate * (1.f - (gzc * gzc)) * epsilon_c * i_batch;\n\n //delta_forget\n delta[start + 2 * n_cells] = fgtGate * (1.f - fgtGate) * oldState * epsilon_c * i_batch;\n\n //delta_input\n delta[start + n_cells] = inpGate * (1.f - inpGate) * gzc * epsilon_c * i_batch;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n // Z*, V_h, c, i = input_names (*: inplace)\n // Y, H, d = output_names\n assert(n_inputs == 4);\n assert(n_outputs == 3);\n Ndarray* V_h = inputs[1];\n Ndarray* c = inputs[2];\n Ndarray* i = inputs[3];\n Ndarray* Y = *outputs[0];\n Ndarray* H = *outputs[1]; // inplace on Z\n Ndarray* d = *outputs[2];\n\n long T = Ndarray_DIMS(i)[0];\n int n_batch = Ndarray_DIMS(i)[1];\n assert(Ndarray_DIMS(H)[2] %% 4 == 0); // 3 gates + cell\n int n_cells = Ndarray_DIMS(H)[2] / 4;\n\n assert(T > 0);\n for(int x = 0; x < T; ++x) {\n if(x > 0) {\n //H += Y[x-1]*V_h\n affine_y_x(x-1, Y, x, V_h, x, H);\n }\n\n start_dev_kernel(lstm_kernel, (\n data_ptr(H, x),\n x > 0 ? data_ptr(H, x - 1) : Ndarray_DEV_DATA(c),\n x > 0,\n data_ptr(Y, x),\n (x == T - 1) ? Ndarray_DEV_DATA(d) : 0,\n n_cells,\n n_batch,\n Ndarray_DEV_DATA(i) + x * n_batch\n ));\n }\n HANDLE_LAST_ERROR();\n '
c_bw_code = '\n // V_h, c, i, Y, H*, DY*, Dd = input_names (*: inplace)\n // DZ, DV_h, Dc, tmpDc = output_names\n assert(n_inputs == 7);\n assert(n_outputs == 4);\n Ndarray* V_h = inputs[0];\n Ndarray* c = inputs[1];\n Ndarray* i = inputs[2];\n Ndarray* Y = inputs[3];\n Ndarray* Dd = inputs[6];\n Ndarray* DZ = *outputs[0]; // inplace on H\n Ndarray* DV_h = *outputs[1];\n Ndarray* Dc = *outputs[2];\n Ndarray* tmpDc = *outputs[3]; // (old DY), inplace buffer\n\n long T = Ndarray_DIMS(i)[0];\n int n_batch = Ndarray_DIMS(i)[1];\n assert(Ndarray_DIMS(DZ)[2] %% 4 == 0); // 3 gates + cell\n int n_cells = Ndarray_DIMS(DZ)[2] / 4;\n\n assert(T > 0);\n for(int x = T - 1; x >= 0; --x) {\n // add recurrent\n bool rightBorder = (x == T - 1);\n if(!rightBorder)\n affine_y_x(x+1, DZ, x, V_h, x, tmpDc, false, true);\n\n start_dev_kernel(lstm_bwd_kernel, (\n data_ptr(DZ, x),\n data_ptr(tmpDc, x),\n rightBorder ? Ndarray_DEV_DATA(Dd) : data_ptr(tmpDc, x + 1),\n x > 0 ? data_ptr(DZ, x - 1) : Ndarray_DEV_DATA(c),\n x > 0,\n data_ptr(Y, x),\n n_cells,\n n_batch,\n Ndarray_DEV_DATA(i) + x * n_batch\n ));\n }\n\n //DV_h = Y[0..end-1]^T * DZ[1..end]\n affine_global(Y, DZ, DV_h, true, false, 1, 0.0f);\n\n Ndarray_DIMS_Type Dc_dim = Ndarray_HOST_DIMS(Dc);\n Ndarray_memcpy(\n Ndarray_DEV_DATA(Dc), Ndarray_DEV_DATA(tmpDc),\n Dc_dim[0] * Dc_dim[1] * sizeof(float));\n HANDLE_LAST_ERROR();\n '
code_version = ()
|
class LstmLowMem(NativeOpGenBase):
'\n This is designed to require minimal memory during training.\n It only stores the outputs and the cell states,\n i.e. it requires time * cells * 2 floats for memory in total.\n\n inputs:\n :param X: (time,batch,in_dim)\n :param W: forward+recurrent matrix. 2d (in_dim+dim,dim*4)\n :param b: bias. 1d (dim*4,)\n :param y0: initial output|hidden state. 2d (batch,dim)\n :param c0: initial cell state. 2d (batch,dim)\n :param i: index. 2d (time,batch) -> 0 or 1\n :param start: where to start. must be >=0, default is usually 0. dtype int, scalar.\n :param step: +1 for fwd, -1 for bwd direction. can also be |step|>1 for wider steps. dtype int, scalar.\n for bwd (<0), will start at T-start-1.\n outputs:\n :param Y: output. 3d (time,batch,dim)\n :param C: cell states. 3d (time,batch,dim). gradient ignored!\n :param d: final cell state. 2d (batch,dim)\n '
in_info = ({'name': 'X', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True}, {'name': 'W', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'b', 'ndim': 1, 'shape': (None,), 'need_contiguous': True}, {'name': 'y0', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'c0', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'i', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'start', 'ndim': 0, 'shape': (), 'gradient': 'disconnected', 'dtype': 'int32', 'host_memory': True}, {'name': 'step', 'ndim': 0, 'shape': (), 'gradient': 'disconnected', 'dtype': 'int32', 'host_memory': True})
out_info = ({'name': 'Y', 'ndim': 3, 'shape': ((0, 0), (0, 1), (4, 1)), 'need_contiguous': True}, {'name': 'C', 'ndim': 3, 'shape': ((0, 0), (0, 1), (4, 1)), 'need_contiguous': True}, {'name': 'd', 'ndim': 2, 'shape': ((0, 1), (4, 1)), 'need_contiguous': True})
@classmethod
def grad_input_map(cls, X, W, b, y0, c0, i, start, step, Y, C, d, DY, DC, Dd):
'\n Map args.\n '
return (X, W, b, y0, c0, i, start, step, Y, C, DY, Dd)
c_extra_support_code = {'lstm_kernel': '\n DEF_KERNEL\n void lstm_kernel(\n int n_batch, int n_cells, const float* mask,\n float* intern,\n float* prev_c,\n float* y,\n float* c)\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_cells * n_batch) {\n int batch_idx = idx / n_cells;\n int cell_idx = idx % n_cells;\n int intern_offset = batch_idx * 4 * n_cells + cell_idx;\n float prev_c_b = prev_c[idx];\n float mask_b = mask[batch_idx];\n\n // cell-in + input, forget and output gates\n float cellIn = tanhf(intern[intern_offset]);\n float inpGate = 1.f / (1.f + expf(-intern[intern_offset + n_cells]));\n float fgtGate = 1.f / (1.f + expf(-intern[intern_offset + 2 * n_cells]));\n float outGate = 1.f / (1.f + expf(-intern[intern_offset + 3 * n_cells]));\n\n float c_b = (prev_c_b * fgtGate + cellIn * inpGate) * mask_b\n + prev_c_b * (1.f - mask_b);\n c[idx] = c_b;\n y[idx] = tanhf(c_b) * outGate * mask_b;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', 'lstm_bwd_kernel': '\n DEF_KERNEL\n void lstm_bwd_kernel(\n int n_batch, int n_in, int n_cells, const float* mask,\n float* x_h,\n float* intern,\n float* prev_c,\n float* y,\n float* c,\n float* d_y,\n float* d_h,\n float* d_c,\n float* d_intern,\n float* d_b)\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_cells * n_batch) {\n int batch_idx = idx / n_cells;\n int cell_idx = idx % n_cells;\n int intern_offset = batch_idx * 4 * n_cells + cell_idx;\n float mask_b = mask[batch_idx];\n float d_y_b = d_y[idx] * mask_b + d_h[idx];\n float d_c_b = d_c[idx] * mask_b;\n float prev_c_b = prev_c[idx];\n\n // cell-in + input, forget and output gates\n float cellIn = tanhf(intern[intern_offset]);\n float inpGate = 1.f / (1.f + expf(-intern[intern_offset + n_cells]));\n float fgtGate = 1.f / (1.f + expf(-intern[intern_offset + 2 * n_cells]));\n float outGate = 1.f / (1.f + expf(-intern[intern_offset + 3 * n_cells]));\n\n float c_b = prev_c_b * fgtGate + cellIn * inpGate;\n float gc = tanhf(c_b);\n float d_outGate_in = (1.f - outGate) * outGate * gc * d_y_b;\n float d_c2 = d_c_b + outGate * d_y_b * (1.f - gc * gc);\n float d_cellIn_in = (1.f - cellIn * cellIn) * inpGate * d_c2;\n float d_inpGate_in = (1.f - inpGate) * inpGate * cellIn * d_c2;\n float d_fgtGate_in = (1.f - fgtGate) * fgtGate * prev_c_b * d_c2;\n d_c[idx] = fgtGate * d_c2 + d_c[idx] * (1.f - mask_b);\n\n d_intern[intern_offset] = d_cellIn_in;\n d_intern[intern_offset + n_cells] = d_inpGate_in;\n d_intern[intern_offset + 2 * n_cells] = d_fgtGate_in;\n d_intern[intern_offset + 3 * n_cells] = d_outGate_in;\n\n elem_atomic_add(&d_b[cell_idx], d_cellIn_in);\n elem_atomic_add(&d_b[cell_idx + n_cells], d_inpGate_in);\n elem_atomic_add(&d_b[cell_idx + 2 * n_cells], d_fgtGate_in);\n elem_atomic_add(&d_b[cell_idx + 3 * n_cells], d_outGate_in);\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', 'add_bias_kernel': '\n DEF_KERNEL\n void add_bias_kernel(int n_batch, int n_dim, float* x, float* b) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_batch * n_dim) {\n int dim_idx = idx % n_dim;\n x[idx] += b[dim_idx];\n idx += gridDim.x * blockDim.x;\n }\n }\n ', 'copy_x_h_kernel': '\n DEF_KERNEL\n void copy_x_h_kernel(\n int n_batch, int n_in, int n_cells,\n float* x_h,\n float* x,\n float* h)\n {\n int n_total_in = n_in + n_cells;\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_batch * n_total_in) {\n int batch_idx = idx / n_total_in;\n int in_dim_idx = idx % n_total_in;\n\n if(in_dim_idx < n_in)\n x_h[idx] = x[batch_idx * n_in + in_dim_idx];\n else\n x_h[idx] = h[batch_idx * n_cells + in_dim_idx - n_in];\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', 'inv_copy_x_h_kernel': '\n DEF_KERNEL\n void inv_copy_x_h_kernel(\n int n_batch, int n_in, int n_cells,\n float* x_h,\n float* x,\n float* h)\n {\n int n_total_in = n_in + n_cells;\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_batch * n_total_in) {\n int batch_idx = idx / n_total_in;\n int in_dim_idx = idx % n_total_in;\n\n if(in_dim_idx < n_in)\n x[batch_idx * n_in + in_dim_idx] = x_h[idx];\n else\n h[batch_idx * n_cells + in_dim_idx - n_in] = x_h[idx];\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n // X, W, b, y0, c0, i, start, step = input_names\n // Y, C, d = output_names\n assert(n_inputs == 8);\n assert(n_outputs == 3);\n Ndarray* X = inputs[0];\n Ndarray* W = inputs[1];\n Ndarray* b = inputs[2];\n Ndarray* y0 = inputs[3];\n Ndarray* c0 = inputs[4];\n Ndarray* i = inputs[5];\n assert_cmp(Ndarray_NDIM(inputs[6]), ==, 0);\n assert_cmp(Ndarray_NDIM(inputs[7]), ==, 0);\n int start = Ndarray_DEV_DATA_int32_scalar(inputs[6]);\n int step = Ndarray_DEV_DATA_int32_scalar(inputs[7]);\n Ndarray* Y = *outputs[0];\n Ndarray* C = *outputs[1];\n Ndarray* d = *outputs[2];\n\n assert_cmp(Ndarray_NDIM(X), ==, 3);\n assert_cmp(Ndarray_NDIM(W), ==, 2);\n assert_cmp(Ndarray_NDIM(b), ==, 1);\n assert_cmp(Ndarray_NDIM(y0), ==, 2);\n assert_cmp(Ndarray_NDIM(c0), ==, 2);\n assert_cmp(Ndarray_NDIM(i), ==, 2);\n assert_cmp(Ndarray_NDIM(Y), ==, 3);\n assert_cmp(Ndarray_NDIM(C), ==, 3);\n assert_cmp(Ndarray_NDIM(d), ==, 2);\n long T = Ndarray_DIMS(i)[0];\n int n_batch = Ndarray_DIMS(i)[1];\n int n_cells = Ndarray_DIMS(y0)[1];\n int n_in = Ndarray_DIMS(X)[2];\n assert_cmp(Ndarray_DIMS(X)[0], ==, T);\n assert_cmp(Ndarray_DIMS(X)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(W)[0], ==, n_in + n_cells);\n assert_cmp(Ndarray_DIMS(W)[1], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(b)[0], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(y0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(y0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(c0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(c0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(Y)[0], ==, T);\n assert_cmp(Ndarray_DIMS(Y)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Y)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(C)[0], ==, T);\n assert_cmp(Ndarray_DIMS(C)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(C)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(d)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(d)[1], ==, n_cells);\n\n float* x_h = (float*) device_malloc(n_batch * (n_in + n_cells) * sizeof(float));\n float* intern = (float*) device_malloc(n_batch * n_cells * 4 * sizeof(float)); // 3 gates + in\n\n assert_cmp(T, >, 0);\n assert_cmp(start, >=, 0);\n assert_cmp(start, <, T);\n assert_cmp(step, !=, 0);\n int end = T - 1;\n if(step < 0) {\n end = start;\n start = T - start - 1;\n }\n int t = start;\n for(; (step > 0) ? (t <= end) : (t >= end); t += step) {\n // x_h = X[t], Y[t-1]\n start_dev_kernel(copy_x_h_kernel,\n (n_batch, n_in, n_cells, x_h, data_ptr(X, t), (t != start) ? data_ptr(Y, t-step) : Ndarray_DEV_DATA(y0)));\n // intern = x_h * W\n affine_raw(\n x_h, n_batch, n_in + n_cells,\n Ndarray_DEV_DATA(W), n_in + n_cells, n_cells * 4,\n intern, n_batch, n_cells * 4,\n false, false, 0.0);\n // intern += b\n start_dev_kernel(add_bias_kernel, (\n n_batch, n_cells * 4, intern, Ndarray_DEV_DATA(b)));\n\n start_dev_kernel(lstm_kernel, (\n n_batch,\n n_cells,\n Ndarray_DEV_DATA(i) + t * n_batch,\n intern,\n (t != start) ? data_ptr(C, t-step) : Ndarray_DEV_DATA(c0),\n data_ptr(Y, t), // out\n data_ptr(C, t) // out\n ));\n }\n HANDLE_LAST_ERROR();\n\n device_free(x_h);\n device_free(intern);\n\n Ndarray_memcpy(Ndarray_DEV_DATA(d), data_ptr(C, t - step), n_batch * n_cells * sizeof(float));\n '
c_bw_code = '\n // X, W, b, y0, c0, i, start, step, Y, C, DY, Dd = input_names\n // DX, DW, Db, Dh, Dc = output_names\n assert(n_inputs == 12);\n assert(n_outputs == 5);\n Ndarray* X = inputs[0];\n Ndarray* W = inputs[1];\n Ndarray* b = inputs[2];\n Ndarray* y0 = inputs[3];\n Ndarray* c0 = inputs[4];\n Ndarray* i = inputs[5];\n assert_cmp(Ndarray_NDIM(inputs[6]), ==, 0);\n assert_cmp(Ndarray_NDIM(inputs[7]), ==, 0);\n int start = Ndarray_DEV_DATA_int32_scalar(inputs[6]);\n int step = Ndarray_DEV_DATA_int32_scalar(inputs[7]);\n Ndarray* Y = inputs[8];\n Ndarray* C = inputs[9];\n Ndarray* DY = inputs[10];\n Ndarray* Dd = inputs[11];\n Ndarray* DX = *outputs[0];\n Ndarray* DW = *outputs[1];\n Ndarray* Db = *outputs[2];\n Ndarray* Dh = *outputs[3];\n Ndarray* Dc = *outputs[4];\n\n assert_cmp(Ndarray_NDIM(X), ==, 3);\n assert_cmp(Ndarray_NDIM(W), ==, 2);\n assert_cmp(Ndarray_NDIM(b), ==, 1);\n assert_cmp(Ndarray_NDIM(y0), ==, 2);\n assert_cmp(Ndarray_NDIM(c0), ==, 2);\n assert_cmp(Ndarray_NDIM(i), ==, 2);\n assert_cmp(Ndarray_NDIM(Y), ==, 3);\n assert_cmp(Ndarray_NDIM(C), ==, 3);\n assert_cmp(Ndarray_NDIM(DY), ==, 3);\n assert_cmp(Ndarray_NDIM(Dd), ==, 2);\n assert_cmp(Ndarray_NDIM(DX), ==, 3);\n assert_cmp(Ndarray_NDIM(DW), ==, 2);\n assert_cmp(Ndarray_NDIM(Db), ==, 1);\n assert_cmp(Ndarray_NDIM(Dh), ==, 2);\n assert_cmp(Ndarray_NDIM(Dc), ==, 2);\n long T = Ndarray_DIMS(i)[0];\n int n_batch = Ndarray_DIMS(i)[1];\n int n_cells = Ndarray_DIMS(y0)[1];\n int n_in = Ndarray_DIMS(X)[2];\n assert_cmp(Ndarray_DIMS(X)[0], ==, T);\n assert_cmp(Ndarray_DIMS(X)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(W)[0], ==, n_in + n_cells);\n assert_cmp(Ndarray_DIMS(W)[1], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(b)[0], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(y0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(y0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(c0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(c0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(Y)[0], ==, T);\n assert_cmp(Ndarray_DIMS(Y)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Y)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(C)[0], ==, T);\n assert_cmp(Ndarray_DIMS(C)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(C)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(DY)[0], ==, T);\n assert_cmp(Ndarray_DIMS(DY)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(DY)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(Dd)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Dd)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(DX)[0], ==, T);\n assert_cmp(Ndarray_DIMS(DX)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(DX)[2], ==, n_in);\n assert_cmp(Ndarray_DIMS(DW)[0], ==, n_in + n_cells);\n assert_cmp(Ndarray_DIMS(DW)[1], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(Db)[0], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(Dh)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Dh)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(Dc)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Dc)[1], ==, n_cells);\n\n float* x_h = (float*) device_malloc(n_batch * (n_in + n_cells) * sizeof(float));\n float* intern = (float*) device_malloc(n_batch * n_cells * 4 * sizeof(float)); // 3 gates + in\n float* Dx_h = (float*) device_malloc(n_batch * (n_in + n_cells) * sizeof(float));\n float* Dintern = (float*) device_malloc(n_batch * n_cells * 4 * sizeof(float)); // 3 gates + in\n\n // We will work inplace on DX/DW/Db.\n Ndarray_memset(Ndarray_DEV_DATA(DX), 0, T * n_batch * n_in * sizeof(float));\n Ndarray_memset(Ndarray_DEV_DATA(DW), 0, (n_in + n_cells) * n_cells * 4 * sizeof(float));\n Ndarray_memset(Ndarray_DEV_DATA(Db), 0, n_cells * 4 * sizeof(float));\n // We will work inplace on Dh.\n Ndarray_memset(Ndarray_DEV_DATA(Dh), 0, n_batch * n_cells * sizeof(float));\n // We will work inplace on Dc, and init it with Dd.\n Ndarray_memcpy(Ndarray_DEV_DATA(Dc), Ndarray_DEV_DATA(Dd), n_batch * n_cells * sizeof(float));\n\n assert_cmp(T, >, 0);\n assert_cmp(start, >=, 0);\n assert_cmp(start, <, T);\n assert_cmp(step, !=, 0);\n int end = T - 1;\n if(step < 0) {\n end = start;\n start = T - start - 1;\n }\n int t = end; // go backwards\n for(; (step > 0) ? (t >= start) : (t <= start); t -= step) {\n bool right = (step > 0) ? (t - step >= start) : (t - step <= start);\n\n // TODO: correct handling of mask in grad, fwd, initial cell,hidden, etc\n // x_h = X[t], Y[t-1]\n start_dev_kernel(copy_x_h_kernel,\n (n_batch, n_in, n_cells,\n x_h, data_ptr(X, t), right ? data_ptr(Y, t-step) : Ndarray_DEV_DATA(y0)));\n\n // intern = x_h * W\n affine_raw(\n x_h, n_batch, n_in + n_cells,\n Ndarray_DEV_DATA(W), n_in + n_cells, n_cells * 4,\n intern, n_batch, n_cells * 4,\n false, false, 0.0);\n // intern += b\n start_dev_kernel(add_bias_kernel, (\n n_batch, n_cells * 4, intern, Ndarray_DEV_DATA(b)));\n\n start_dev_kernel(lstm_bwd_kernel, (\n n_batch,\n n_in,\n n_cells,\n Ndarray_DEV_DATA(i) + t * n_batch,\n x_h,\n intern,\n right ? data_ptr(C, t-step) : Ndarray_DEV_DATA(c0),\n data_ptr(Y, t),\n data_ptr(C, t),\n data_ptr(DY, t),\n Ndarray_DEV_DATA(Dh), // error from prev frame, excluding DY. updated below\n Ndarray_DEV_DATA(Dc), // in+out, working inplace. also error from prev frame, initially Dd\n Dintern, // out\n Ndarray_DEV_DATA(Db) // out\n ));\n\n // Dx_h = Dintern * W^T\n affine_raw(\n Dintern, n_batch, n_cells * 4,\n Ndarray_DEV_DATA(W), n_in + n_cells, n_cells * 4,\n Dx_h, n_batch, n_in + n_cells,\n false, true, 0.0);\n\n // DW += x_h^T * Dintern\n affine_raw(\n x_h, n_batch, n_in + n_cells,\n Dintern, n_batch, n_cells * 4,\n Ndarray_DEV_DATA(DW), n_in + n_cells, n_cells * 4,\n true, false);\n\n // DX[t], Dh = Dx_h\n start_dev_kernel(inv_copy_x_h_kernel,\n (n_batch, n_in, n_cells, Dx_h, data_ptr(DX, t), Ndarray_DEV_DATA(Dh)));\n }\n HANDLE_LAST_ERROR();\n\n device_free(x_h);\n device_free(intern);\n device_free(Dx_h);\n device_free(Dintern);\n '
|
class NativeLstm2(NativeOpGenBase):
'\n Yet another LSTM kernel.\n This kernel is about 27% than NativeLstm,\n and also has some more options (like the direction).\n But it requires time * batch * cells more memory,\n thus time * batch * cells * 6 in total.\n\n inputs:\n :param X: (time,batch,dim*4)\n :param W: recurrent matrix. 2d (dim,dim*4)\n :param y0: initial output|hidden state. 2d (batch,dim)\n :param c0: initial cell state. 2d (batch,dim)\n :param i: index. 2d (time,batch) -> 0 or 1\n :param start: where to start. must be >=0, default is usually 0. dtype int, scalar.\n :param step: +1 for fwd, -1 for bwd direction. can also be |step|>1 for wider steps. dtype int, scalar.\n for bwd (<0), will start at T-start-1.\n outputs:\n :param Y: output. 3d (time,batch,dim)\n :param C: cell states. 3d (time,batch,dim). gradient ignored!\n :param H: cell-in + gates. 3d (time,batch,dim*4). gradient ignored!\n :param d: final cell state. 2d (batch,dim)\n '
in_info = ({'name': 'X', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True}, {'name': 'W', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'y0', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'c0', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'i', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'start', 'ndim': 0, 'shape': (), 'gradient': 'disconnected', 'dtype': 'int32', 'host_memory': True}, {'name': 'step', 'ndim': 0, 'shape': (), 'gradient': 'disconnected', 'dtype': 'int32', 'host_memory': True})
out_info = ({'name': 'Y', 'ndim': 3, 'shape': ((0, 0), (0, 1), (1, 0)), 'need_contiguous': True}, {'name': 'C', 'ndim': 3, 'shape': ((0, 0), (0, 1), (1, 0)), 'need_contiguous': True}, {'name': 'H', 'ndim': 3, 'shape': ((0, 0), (0, 1), (1, 1)), 'need_contiguous': True}, {'name': 'd', 'ndim': 2, 'shape': ((0, 1), (1, 0)), 'need_contiguous': True})
@classmethod
def grad_input_map(cls, X, W, y0, c0, i, start, step, Y, C, H, d, DY, DC, DH, Dd):
return (X, W, y0, c0, i, start, step, Y, C, H, DY, Dd)
c_extra_support_code = {'lstm_kernel': '\n DEF_KERNEL\n void lstm_kernel(\n int n_batch, int n_cells, const float* mask,\n float* h,\n float* prev_y,\n float* prev_c,\n float* y,\n float* c,\n float* y_prev_out)\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_cells * n_batch) {\n int batch_idx = idx / n_cells;\n int cell_idx = idx % n_cells;\n int intern_offset = batch_idx * 4 * n_cells + cell_idx;\n float prev_c_b = prev_c[idx];\n float mask_b = mask[batch_idx];\n\n // cell-in + input, forget and output gates\n float cellIn = tanhf(h[intern_offset]);\n float inpGate = 1.f / (1.f + expf(-h[intern_offset + n_cells]));\n float fgtGate = 1.f / (1.f + expf(-h[intern_offset + 2 * n_cells]));\n float outGate = 1.f / (1.f + expf(-h[intern_offset + 3 * n_cells]));\n\n h[intern_offset] = cellIn;\n h[intern_offset + n_cells] = inpGate;\n h[intern_offset + 2 * n_cells] = fgtGate;\n h[intern_offset + 3 * n_cells] = outGate;\n\n float c_b = (prev_c_b * fgtGate + cellIn * inpGate) * mask_b\n + prev_c_b * (1.f - mask_b);\n c[idx] = c_b;\n float y_b = tanhf(c_b) * outGate * mask_b;\n y[idx] = y_b;\n y_prev_out[idx] = y_b + prev_y[idx] * (1.f - mask_b);\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', 'lstm_bwd_kernel': '\n DEF_KERNEL\n void lstm_bwd_kernel(\n int n_batch, int n_cells, const float* mask,\n float* h,\n float* prev_c,\n float* y,\n float* c,\n float* d_y,\n float* d_h,\n float* d_c,\n float* d_x,\n float* d_x0)\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_cells * n_batch) {\n int batch_idx = idx / n_cells;\n int cell_idx = idx % n_cells;\n int intern_offset = batch_idx * 4 * n_cells + cell_idx;\n float mask_b = mask[batch_idx];\n float d_y_b = (d_y[idx] + d_h[idx]) * mask_b;\n float d_c_b = d_c[idx] * mask_b;\n float prev_c_b = prev_c[idx];\n\n // cell-in + input, forget and output gates\n float cellIn = h[intern_offset];\n float inpGate = h[intern_offset + n_cells];\n float fgtGate = h[intern_offset + 2 * n_cells];\n float outGate = h[intern_offset + 3 * n_cells];\n\n float c_b = prev_c_b * fgtGate + cellIn * inpGate;\n float gc = tanhf(c_b);\n float d_outGate_in = (1.f - outGate) * outGate * gc * d_y_b;\n float d_c2 = d_c_b + outGate * d_y_b * (1.f - gc * gc);\n float d_cellIn_in = (1.f - cellIn * cellIn) * inpGate * d_c2;\n float d_inpGate_in = (1.f - inpGate) * inpGate * cellIn * d_c2;\n float d_fgtGate_in = (1.f - fgtGate) * fgtGate * prev_c_b * d_c2;\n d_c[idx] = fgtGate * d_c2 + d_c[idx] * (1.f - mask_b);\n\n d_x[intern_offset] = d_cellIn_in;\n d_x[intern_offset + n_cells] = d_inpGate_in;\n d_x[intern_offset + 2 * n_cells] = d_fgtGate_in;\n d_x[intern_offset + 3 * n_cells] = d_outGate_in;\n\n #define set_x0(off) { d_x0[off] = d_x[off] + d_x0[off] * (1.f - mask_b); }\n set_x0(intern_offset);\n set_x0(intern_offset + n_cells);\n set_x0(intern_offset + 2 * n_cells);\n set_x0(intern_offset + 3 * n_cells);\n #undef set_x0\n\n // Reset if used frame, otherwise leave as-is.\n d_h[idx] *= (1.f - mask_b);\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n // X, W, y0, c0, i, start, step = input_names\n // Y, C, H, d = output_names\n assert(n_inputs == 7);\n assert(n_outputs == 4);\n Ndarray* X = inputs[0];\n Ndarray* W = inputs[1];\n Ndarray* y0 = inputs[2];\n Ndarray* c0 = inputs[3];\n Ndarray* i = inputs[4];\n assert_cmp(Ndarray_NDIM(inputs[5]), ==, 0);\n assert_cmp(Ndarray_NDIM(inputs[6]), ==, 0);\n int start = Ndarray_DEV_DATA_int32_scalar(inputs[5]);\n int step = Ndarray_DEV_DATA_int32_scalar(inputs[6]);\n Ndarray* Y = *outputs[0];\n Ndarray* C = *outputs[1];\n Ndarray* H = *outputs[2];\n Ndarray* d = *outputs[3];\n\n assert_cmp(Ndarray_NDIM(X), ==, 3);\n assert_cmp(Ndarray_NDIM(W), ==, 2);\n assert_cmp(Ndarray_NDIM(y0), ==, 2);\n assert_cmp(Ndarray_NDIM(c0), ==, 2);\n assert_cmp(Ndarray_NDIM(i), ==, 2);\n assert_cmp(Ndarray_NDIM(Y), ==, 3);\n assert_cmp(Ndarray_NDIM(C), ==, 3);\n assert_cmp(Ndarray_NDIM(H), ==, 3);\n assert_cmp(Ndarray_NDIM(d), ==, 2);\n long T = Ndarray_DIMS(i)[0];\n int n_batch = Ndarray_DIMS(i)[1];\n int n_cells = Ndarray_DIMS(y0)[1];\n assert_cmp(Ndarray_DIMS(X)[0], ==, T);\n assert_cmp(Ndarray_DIMS(X)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(X)[2], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(W)[0], ==, n_cells);\n assert_cmp(Ndarray_DIMS(W)[1], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(y0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(y0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(c0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(c0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(Y)[0], ==, T);\n assert_cmp(Ndarray_DIMS(Y)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Y)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(C)[0], ==, T);\n assert_cmp(Ndarray_DIMS(C)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(C)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(H)[0], ==, T);\n assert_cmp(Ndarray_DIMS(H)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(H)[2], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(d)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(d)[1], ==, n_cells);\n\n if(T == 0) {\n Ndarray_memcpy(Ndarray_DEV_DATA(d), Ndarray_DEV_DATA(c0), n_batch * n_cells * sizeof(float));\n\n } else { // T > 0\n // It makes the backprop with step<0 easier to implement,\n // esp. the DW = Y[0..T-2]^T * DX[1..T-1] calculation,\n // if we can have Y[t] = 0 where mask[t] = 0.\n // That is why we need to keep track of Y[t-1] explicitly.\n float* y_prev = (float*) device_malloc(n_batch * n_cells * sizeof(float));\n\n // H = X\n Ndarray_memcpy(Ndarray_DEV_DATA(H), Ndarray_DEV_DATA(X), T * n_batch * n_cells * 4 * sizeof(float));\n\n assert_cmp(T, >, 0);\n assert_cmp(start, >=, 0);\n assert_cmp(start, <, T);\n assert_cmp(step, !=, 0);\n int end = T - 1;\n if(step < 0) {\n end = 0;\n start = T - start - 1;\n }\n int t = start;\n for(; (step > 0) ? (t <= end) : (t >= end); t += step) {\n // H[t] += Y[t-1] * W\n affine_raw(\n (t != start) ? y_prev : Ndarray_DEV_DATA(y0), n_batch, n_cells,\n Ndarray_DEV_DATA(W), n_cells, n_cells * 4,\n data_ptr(H, t), n_batch, n_cells * 4,\n false, false);\n\n start_dev_kernel(lstm_kernel, (\n n_batch,\n n_cells,\n Ndarray_DEV_DATA(i) + t * n_batch,\n data_ptr(H, t), // inplace\n (t != start) ? y_prev : Ndarray_DEV_DATA(y0),\n (t != start) ? data_ptr(C, t-step) : Ndarray_DEV_DATA(c0),\n data_ptr(Y, t), // out\n data_ptr(C, t), // out\n y_prev // out\n ));\n }\n HANDLE_LAST_ERROR();\n\n Ndarray_memcpy(Ndarray_DEV_DATA(d), data_ptr(C, t - step), n_batch * n_cells * sizeof(float));\n\n device_free(y_prev);\n }\n '
c_bw_code = '\n // X, W, y0, c0, i, start, step, Y, C, H, DY, Dd = input_names\n // DX, DW, Dy0, Dc0 = output_names\n assert(n_inputs == 12);\n assert(n_outputs == 4);\n Ndarray* X = inputs[0];\n Ndarray* W = inputs[1];\n Ndarray* y0 = inputs[2];\n Ndarray* c0 = inputs[3];\n Ndarray* i = inputs[4];\n assert_cmp(Ndarray_NDIM(inputs[5]), ==, 0);\n assert_cmp(Ndarray_NDIM(inputs[6]), ==, 0);\n int start = Ndarray_DEV_DATA_int32_scalar(inputs[5]);\n int step = Ndarray_DEV_DATA_int32_scalar(inputs[6]);\n Ndarray* Y = inputs[7];\n Ndarray* C = inputs[8];\n Ndarray* H = inputs[9];\n Ndarray* DY = inputs[10];\n Ndarray* Dd = inputs[11];\n Ndarray* DX = *outputs[0];\n Ndarray* DW = *outputs[1];\n Ndarray* Dy0 = *outputs[2];\n Ndarray* Dc0 = *outputs[3];\n\n assert_cmp(Ndarray_NDIM(X), ==, 3);\n assert_cmp(Ndarray_NDIM(W), ==, 2);\n assert_cmp(Ndarray_NDIM(y0), ==, 2);\n assert_cmp(Ndarray_NDIM(c0), ==, 2);\n assert_cmp(Ndarray_NDIM(i), ==, 2);\n assert_cmp(Ndarray_NDIM(Y), ==, 3);\n assert_cmp(Ndarray_NDIM(C), ==, 3);\n assert_cmp(Ndarray_NDIM(H), ==, 3);\n assert_cmp(Ndarray_NDIM(DY), ==, 3);\n assert_cmp(Ndarray_NDIM(Dd), ==, 2);\n assert_cmp(Ndarray_NDIM(DX), ==, 3);\n assert_cmp(Ndarray_NDIM(DW), ==, 2);\n assert_cmp(Ndarray_NDIM(Dy0), ==, 2);\n assert_cmp(Ndarray_NDIM(Dc0), ==, 2);\n long T = Ndarray_DIMS(i)[0];\n int n_batch = Ndarray_DIMS(i)[1];\n int n_cells = Ndarray_DIMS(y0)[1];\n assert_cmp(Ndarray_DIMS(X)[0], ==, T);\n assert_cmp(Ndarray_DIMS(X)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(X)[2], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(W)[0], ==, n_cells);\n assert_cmp(Ndarray_DIMS(W)[1], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(y0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(y0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(c0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(c0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(Y)[0], ==, T);\n assert_cmp(Ndarray_DIMS(Y)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Y)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(C)[0], ==, T);\n assert_cmp(Ndarray_DIMS(C)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(C)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(H)[0], ==, T);\n assert_cmp(Ndarray_DIMS(H)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(H)[2], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(DY)[0], ==, T);\n assert_cmp(Ndarray_DIMS(DY)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(DY)[2], ==, n_cells);\n assert_cmp(Ndarray_DIMS(Dd)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Dd)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(DX)[0], ==, T);\n assert_cmp(Ndarray_DIMS(DX)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(DX)[2], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(DW)[0], ==, n_cells);\n assert_cmp(Ndarray_DIMS(DW)[1], ==, n_cells * 4);\n assert_cmp(Ndarray_DIMS(Dy0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Dy0)[1], ==, n_cells);\n assert_cmp(Ndarray_DIMS(Dc0)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(Dc0)[1], ==, n_cells);\n\n // We will work inplace on DW.\n Ndarray_memset(Ndarray_DEV_DATA(DW), 0, n_cells * n_cells * 4 * sizeof(float));\n // We will work inplace on (Dy0) DY[t], initially 0.\n Ndarray_memset(Ndarray_DEV_DATA(Dy0), 0, n_batch * n_cells * sizeof(float));\n // We will work inplace on (Dc0) DC[t], and init it with Dd.\n Ndarray_memcpy(Ndarray_DEV_DATA(Dc0), Ndarray_DEV_DATA(Dd), n_batch * n_cells * sizeof(float));\n\n if(T == 0) {\n // just do nothing. at least do not crash\n\n } else {\n // Need to keep track of (logical) DX[0], which in practice (masking, step<0)\n // can be different from data_ptr(DX, start).\n float* dx0 = (float*) device_malloc(n_batch * n_cells * 4 * sizeof(float));\n Ndarray_memset(dx0, 0, n_batch * n_cells * 4 * sizeof(float));\n\n assert_cmp(T, >, 0);\n assert_cmp(start, >=, 0);\n assert_cmp(start, <, T);\n assert_cmp(step, !=, 0);\n int abs_step = std::abs(step);\n\n if(abs_step > 1 || start > 0)\n // Normally the kernel would visit and reset all DX.\n // But with abs_step>1 or start>0, we will not visit all. Reset now.\n Ndarray_memset(Ndarray_DEV_DATA(DX), 0, T * n_batch * n_cells * 4 * sizeof(float));\n\n // e.g.:\n // step=1, start=0, T=10 -> num_steps=10=T\n // step=5, start=0, T=10 -> num_steps=2=T/step\n // step=5, start=0, T=9 -> num_steps=2=(T+step-1)/step\n // step=5, start=0, T=6 -> num_steps=2=(T+step-1)/step\n // step=5, start=0, T=5 -> num_steps=1=(T+step-1)/step\n // step=5, start=4, T=10 -> num_steps=2=(T-start+step-1)/step\n // step=-5, start=0, T=10 -> num_steps=2=T/abs_step\n // step=-5, start=0, T=9 -> num_steps=2=(T+abs_step-1)/abs_step\n // step=-5, start=4, T=10 -> num_steps=2=(T-start+abs_step-1)/abs_step\n int num_steps = (T - start + abs_step - 1) / abs_step;\n assert_cmp(num_steps, >, 0);\n if(step < 0)\n start = T - start - 1;\n int end = start + (num_steps - 1) * step; // inclusive\n assert_cmp(end, >=, 0);\n assert_cmp(end, <, T);\n int t = end; // go backwards\n for(; (step > 0) ? (t >= start) : (t <= start); t -= step) {\n bool right = (step > 0) ? (t - step >= start) : (t - step <= start);\n\n start_dev_kernel(lstm_bwd_kernel, (\n n_batch,\n n_cells,\n Ndarray_DEV_DATA(i) + t * n_batch,\n data_ptr(H, t),\n right ? data_ptr(C, t-step) : Ndarray_DEV_DATA(c0),\n data_ptr(Y, t),\n data_ptr(C, t),\n data_ptr(DY, t),\n Ndarray_DEV_DATA(Dy0), // in+out, error from prev frame, excluding DY. reset here, updated below\n Ndarray_DEV_DATA(Dc0), // in+out, working inplace. also error from prev frame, initially Dd\n data_ptr(DX, t), // out\n dx0 // out\n ));\n\n // (Dy0) DY[t-1] += DX[t] * W^T\n affine_raw(\n data_ptr(DX, t), n_batch, n_cells * 4,\n Ndarray_DEV_DATA(W), n_cells, n_cells * 4,\n Ndarray_DEV_DATA(Dy0), n_batch, n_cells,\n false, true);\n }\n\n //DW = Y[0..T-2]^T * DX[1..T-1] (if step==1)\n if(num_steps > 1) {\n if(abs_step == 1) {\n affine_raw(\n data_ptr(Y, std::min(start, end) + std::max(0, -step)), (num_steps - 1) * n_batch, n_cells,\n data_ptr(DX, std::min(start, end) + std::max(0, step)), (num_steps - 1) * n_batch, n_cells * 4,\n Ndarray_DEV_DATA(DW), n_cells, n_cells * 4,\n true, false, 0.0f, 1.0f);\n } else {\n // Unfortunately we cannot do efficient striding. Thus loop again.\n t = end - step; // one before\n for(; (step > 0) ? (t >= start) : (t <= start); t -= step) {\n affine_raw(\n data_ptr(Y, t), n_batch, n_cells,\n data_ptr(DX, t + step), n_batch, n_cells * 4,\n Ndarray_DEV_DATA(DW), n_cells, n_cells * 4,\n true, false);\n }\n }\n }\n HANDLE_LAST_ERROR();\n\n //DW += y0^T * DX[0]\n affine_raw(\n Ndarray_DEV_DATA(y0), n_batch, n_cells,\n dx0, n_batch, n_cells * 4,\n Ndarray_DEV_DATA(DW), n_cells, n_cells * 4,\n true, false);\n\n device_free(dx0);\n }\n '
|
class TwoDLSTM(NativeOpGenBase):
'\n inputs:\n :param X: {input,output,forget,lambda} gate + cell state. 3d (timeT,timeS,batch,dim*5) // dim*5 or dim*1 ?\n :param V_h: recurrent matrix. 2d (dim,dim*5)\n :param V_v: recurrent matrix. 2d (dim,dim*5)\n :param W: recurrent matrix. 2d (dim,dim*5)\n :param b: bias. 2d (batch,dim)\n :param ptr_storage: ptr_storage. 1d (1 * 5 * max_diag_size * sizeof(float*) / sizeof(float))\n :param valid: used internally to store which cells are valid (have to be computed).\n 1d (1 * max_diag_size * n_minibatch)\n :param workmem2: used internally. 3d (H[0], H[2], H[3])\n :param sizes: height (target) x width (source) of the unpadded sentences. 2d (batch, 2)\n outputs:\n :param CompleteY: output. 4d (timeS,timeT,batch,dim)\n :param H: gates and cell state. 4d (timeS,timeT,batch,dim*5) ?\n :param d: final cell state. 3d (timeT,batch,dim)\n '
in_info = ({'name': 'X', 'ndim': 4, 'shape': (None, None, None, None), 'need_contiguous': True, 'bw_out_var': {'shape': ((0, 0), (0, 1), (0, 2), (0, 3))}}, {'name': 'V_h', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'V_v', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'W', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'b', 'ndim': 1, 'shape': (None,), 'need_contiguous': True}, {'name': 'ptr_storage_fwd', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'ptr_storage_bwd', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'valid', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'workmem', 'ndim': 5, 'shape': (None, None, None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'workmem2', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'sizes', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'DYDummy', 'ndim': 4, 'shape': (None, None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'initialState', 'ndim': 4, 'shape': (None, None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'initialOutput', 'ndim': 4, 'shape': (None, None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'iteration', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'CompleteY', 'ndim': 4, 'shape': ((0, 0), (0, 1), (0, 2), (1, 0)), 'need_contiguous': True}, {'name': 'H', 'ndim': 4, 'shape': ((0, 0), (0, 1), (0, 2), (3, 1)), 'need_contiguous': True})
@classmethod
def grad_input_map(cls, X, V_h, V_v, W, b, ptr_storage_fwd, ptr_storage_bwd, valid, workmem, workmem2, sizes, DYDummy, initialState, initialOutput, iteration, CompleteY, H, DCompleteY, DH):
return (X, V_h, V_v, W, b, ptr_storage_fwd, ptr_storage_bwd, valid, workmem, workmem2, sizes, DYDummy, initialState, initialOutput, iteration, CompleteY, H, DCompleteY, DH)
@classmethod
def map_layer_inputs_to_op(cls, Zs, Zt, V_h, V_v, W, b, ptr_storage):
assert False
c_extra_support_code = {'01_repvec': '\n DEF_KERNEL\n void repvec(const float * v, int vlen, int nCopies, float * dest)\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < vlen * nCopies)\n {\n dest[idx] = v[idx % vlen];\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '02_fillmat': '\n void fillmat(OpKernelContext* context, const Ndarray * b, Ndarray * dst)\n {\n const float * data_b = Ndarray_DEV_DATA(b);\n float * data_dst = Ndarray_DEV_DATA(dst);\n Ndarray_DIMS_Type dims_b = Ndarray_HOST_DIMS(b);\n int dims_dst[2];\n lastTwoDims(dst, dims_dst);\n assert(dims_b[0] == dims_dst[1]);\n start_dev_kernel(repvec, (\n data_b,\n dims_dst[1],\n Ndarray_SIZE(dst)/dims_dst[1],\n data_dst\n ));\n }\n ', '03_data_ptr': '\n // if nd is 2 then assume a weight matrix and just return beginning of data\n // else nd should be 3 and we pick the x part\n float* data_ptr(const Ndarray* a, int y, int x, int outer_dim=0) {\n assert(Ndarray_NDIM(a) == 2 || Ndarray_NDIM(a) == 4 || Ndarray_NDIM(a) == 5);\n if(Ndarray_NDIM(a) == 2)\n return Ndarray_DEV_DATA(a);\n else if(Ndarray_NDIM(a) == 4) {\n Ndarray_DIMS_Type dims = Ndarray_HOST_DIMS(a);\n return Ndarray_DEV_DATA(a)\n + y * dims[1] * dims[2] * dims[3]\n + x * dims[2] * dims[3]; // row-major or minor?\n }\n else {\n Ndarray_DIMS_Type dims = Ndarray_HOST_DIMS(a);\n return Ndarray_DEV_DATA(a)\n + outer_dim * dims[1] * dims[2] * dims[3] * dims[4]\n + y * dims[2] * dims[3] * dims[4]\n + x * dims[3] * dims[4];\n }\n }\n\n float * data_ptr(Ndarray * a, int y, int x, int outer_dim=0)\n {\n const Ndarray * ca = a;\n return const_cast<float *>(data_ptr(ca, y, x, outer_dim));\n }\n ', '04_affine_y_x_batched_onedir': "\n // ys and xs: base indices, offset by y_A, x_A (-1,0,1)\n void affine_y_x_batched_onedir(OpKernelContext* context, int y_A, int x_A,\n const Ndarray * A1,\n const Ndarray * B1,\n Ndarray * C1,\n const std::vector<int>& ys, const std::vector<int>& xs, Ndarray * ptr_storage, int height, int width,\n cudaStream_t stream = 0, bool transpose_A=false, bool transpose_B=false)\n {\n const int batch_size = ys.size();\n if(batch_size == 0)\n {\n return;\n }\n std::vector<const float*> ABC_ptrs(3 * 1 * batch_size); //content layout: 3x1xbatch_size (3: A,B,C, 1: dirs)\n\n for(int i = 0; i < batch_size; ++i)\n {\n //A\n //y not flipped, x not flipped\n ABC_ptrs[0 * 1 * batch_size + 0 * batch_size + i] = data_ptr(A1, y_A + ys[i], x_A + xs[i]);\n\n //B\n //index doesent matter here, as B is only 2dimensional\n ABC_ptrs[1 * 1 * batch_size + 0 * batch_size + i] = data_ptr(B1, 0, 0);\n\n //we write the result (C) in the same destination (y,x) as the source (A), so we don't need to flip later\n //C\n //y not flipped, x not flipped\n ABC_ptrs[2 * 1 * batch_size + 0 * batch_size + i] = data_ptr(C1, ys[i], xs[i]);\n }\n const float ** ptr_storage_data = reinterpret_cast<const float**>(&(ABC_ptrs[0]));\n const float ** A_ptrs_data = (const float**) ptr_storage_data + 0 * 1 * batch_size;\n const float ** B_ptrs_data = (const float**) ptr_storage_data + 1 * 1 * batch_size;\n const float ** C_ptrs_data = ptr_storage_data + 2 * 1 * batch_size;\n\n int A_dim[2], B_dim[2];\n lastTwoDims(A1, A_dim);\n lastTwoDims(B1, B_dim);\n int ldB = B_dim[1];\n int ldA = A_dim[1];\n char transA = transpose_A ? 'T' : 'N';\n char transB = transpose_B ? 'T' : 'N';\n if (transpose_A)\n {\n std::swap(A_dim[0], A_dim[1]);\n }\n if (transpose_B)\n {\n std::swap(B_dim[0], B_dim[1]);\n }\n\n const float alpha = 1;\n const float beta = 1;\n\n Ndarray_sgemm_batched(\n transB, transA, B_dim[1], A_dim[0], A_dim[1], &alpha,\n B_ptrs_data, ldB, A_ptrs_data, ldA, &beta,\n C_ptrs_data, B_dim[1], 1 * batch_size, batch_size == 1);\n }\n ", '05_lstm_stable_cell_kernel_batched': '\n DEF_KERNEL\n void lstm_stable_cell_kernel_batched(float ** datas, const float ** old_state_ys, const float ** old_state_xs,\n float ** outputs, const float ** valids, int n_outer_batch, int n_cells, int n_minibatch)\n {\n //layout (for every outer batch):\n //data[0*n_cells..1*n_cells-1] : input gate\n //data[1*n_cells..2*n_cells-1] : forget gate\n //data[2*n_cells..3*n_cells-1] : lambda gate\n //data[3*n_cells..4*n_cells-1] : output gate\n //data[5*n_cells..6*n_cells-1] : cell state\n //output[0*n_cells..1*n_cells-1]: cell output\n //valids: either 1.0 or 0.0, indicating if the current (y,x) position\n // is still inside the image in this minibatch\n //repeated for every mini-batch\n\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_outer_batch * n_cells * n_minibatch)\n {\n int size_per_outer_batch = n_cells * n_minibatch;\n int outer_batch_idx = idx / size_per_outer_batch;\n float * data = datas[outer_batch_idx];\n const float * old_state_y = old_state_ys[outer_batch_idx];\n const float * old_state_x = old_state_xs[outer_batch_idx];\n float * output = outputs[outer_batch_idx];\n const float * valid = valids[outer_batch_idx];\n\n int inner_idx = idx % size_per_outer_batch;\n int minibatch_idx = inner_idx / n_cells;\n int batch_offset = minibatch_idx * 5 * n_cells;\n int cell_offset = inner_idx % n_cells;\n int start = batch_offset + cell_offset;\n\n float valid_batch = valid[minibatch_idx];\n\n //input, forget and output gates\n float inpGate = 1.f / (1.f + expf(-data[start]));\n float fgtGate = 1.f / (1.f + expf(-data[start + n_cells]));\n float lambdaGate = 1.f / (1.f + expf(-data[start + 2 * n_cells]));\n float outGate = 1.f / (1.f + expf(-data[start + 3 * n_cells]));\n float state = inpGate * tanhf(data[start + 4 * n_cells]);\n if (old_state_y)\n {\n state += fgtGate * lambdaGate * old_state_y[start];\n }\n if (old_state_x)\n {\n state += fgtGate * (1.0f - lambdaGate) * old_state_x[start];\n }\n state *= valid_batch;\n\n //cell output\n output[inner_idx] = outGate * tanhf(state) * valid_batch;\n\n data[start] = inpGate;\n data[start + n_cells] = fgtGate;\n data[start + 2 * n_cells] = lambdaGate;\n data[start + 3 * n_cells] = outGate;\n data[start + 4 * n_cells] = state;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '06_do_lstm_batched_onedir': '\n // H, CompleteY, ys, xs, ptr_storage\n void do_lstm_batched_onedir(\n OpKernelContext* context, Ndarray* H, Ndarray* initialState, float iteration, Ndarray* completeOut,\n const std::vector<int>& ys, const std::vector<int>& xs,\n Ndarray* ptr_storage, Ndarray* valid_storage, Ndarray* sizes)\n {\n int n_outer_batch = ys.size();\n Ndarray_DIMS_Type H_dims = Ndarray_HOST_DIMS(H);\n int height = H_dims[0];\n int width = H_dims[1];\n int n_minibatch = H_dims[2];\n int n_cells = H_dims[3] / 5;\n assert(H_dims[3] % 5 == 0); //4 gates + cell\n\n std::vector<float*> ptrs(1 * 5 * n_outer_batch); //1 dirs * 5 arrays\n std::vector<float> valid(1 * n_minibatch * n_outer_batch, 1.0f);\n\n float* h_sizes; // the sizes array is stored on the GPU, we have to copy it to the CPU\n int dsize =\n (n_outer_batch) * (n_minibatch) * sizeof(float) * 2; // (*2), because we have 2 (height, width) numbers\n h_sizes = (float*)malloc(dsize);\n HANDLE_ERROR(cudaMemcpy(h_sizes, Ndarray_DEV_DATA(sizes), dsize, cudaMemcpyDeviceToHost));\n\n for(int i = 0; i < n_outer_batch; ++i)\n {\n int y = ys[i];\n int x = xs[i];\n\n //fill valid\n for(int n = 0; n < n_minibatch; ++n) // iterates through all examples in the current batch\n {\n float img_height = *(h_sizes + 2*n);\n float img_width = *(h_sizes + 2*n +1);\n\n valid[i * 1 * n_minibatch + 0 * n_minibatch + n] = float(y < img_height && x < img_width);\n }\n\n //y not flipped, x not flipped\n float * data_H = data_ptr(H, y, x);\n\n //y not flipped, x not flipped\n float * data_old_state_y;\n data_old_state_y = y > 0 ? data_ptr(H, y - 1, x) + 4 * n_cells : data_ptr(initialState, 0, x) + 4 * n_cells;\n\n //y not flipped, x not flipped\n float * data_old_state_x = x > 0 ? data_ptr(H, y, x - 1) + 4 * n_cells : 0;\n\n //y not flipped, x not flipped\n float * data_out = data_ptr(completeOut, y, x);\n\n float * valid = Ndarray_DEV_DATA(valid_storage) + i * 1 * n_minibatch + 0 * n_minibatch;\n\n ptrs[0 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_H;\n ptrs[1 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_old_state_y;\n ptrs[2 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_old_state_x;\n ptrs[3 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_out;\n ptrs[4 * 1 * n_outer_batch + 0 * n_outer_batch + i] = valid;\n }\n\n free(h_sizes);\n\n HANDLE_ERROR(cudaMemcpy(Ndarray_DEV_DATA(valid_storage), valid.data(),\n valid.size() * sizeof(float), cudaMemcpyHostToDevice));\n HANDLE_ERROR(cudaMemcpy(Ndarray_DEV_DATA(ptr_storage), ptrs.data(),\n ptrs.size() * sizeof(float*), cudaMemcpyHostToDevice));\n float ** ptr_storage_data = reinterpret_cast<float**>(Ndarray_DEV_DATA(ptr_storage));\n float ** data_Hs = ptr_storage_data + 0 * 1 * n_outer_batch;\n const float ** data_old_state_ys = (const float**) ptr_storage_data + 1 * 1 * n_outer_batch;\n const float ** data_old_state_xs = (const float**) ptr_storage_data + 2 * 1 * n_outer_batch;\n float ** data_outs = ptr_storage_data + 3 * 1 * n_outer_batch;\n const float ** data_valids = (const float**) (ptr_storage_data + 4 * 1 * n_outer_batch);\n\n start_dev_kernel(lstm_stable_cell_kernel_batched, (\n data_Hs,\n data_old_state_ys,\n data_old_state_xs,\n data_outs,\n data_valids,\n 1 * n_outer_batch,\n n_cells,\n n_minibatch\n ));\n }\n ', '07_lstm_bwd_stable_cell_kernel_batched': '\n DEF_KERNEL\n void lstm_bwd_stable_cell_kernel_batched(float ** deltas, const float ** epsilons,\n const float ** next_epsilon_ys, const float ** next_epsilon_xs, float ** epsilon_ys, float ** epsilon_xs,\n const float ** last_state_ys, const float ** last_state_xs, const float ** Ys, const float ** valids,\n int n_outer_batch, int n_cells, int n_minibatch)\n {\n //layout (for every outer batch):\n //delta[0*n_cells..1*n_cells-1] : input gate\n //delta[1*n_cells..2*n_cells-1] : forget gate\n //delta[2*n_cells..3*n_cells-1] : lambda gate\n //delta[3*n_cells..4*n_cells-1] : output gate\n //delta[4*n_cells..5*n_cells-1] : cell state\n //epsilon[0*n_cells..1*n_cells-1]: cell output derivative\n //next_epsilon_y[0*n_cells..1*n_cells-1]: cell state derivative * forget_gate * lambda_gate (of next timestep)\n //next_epsilon_x[0*n_cells..1*n_cells-1]:\n // cell state derivative * forget_gate * (-1*)lambda_gate (of next timestep)\n //epsilon_y[0*n_cells..1*n_cells-1]:\n // cell state derivative * forget_gate * lambda_gate (of current timestep, as output)\n //epsilon_x[0*n_cells..1*n_cells-1]:\n // cell state derivative * forget_gate * (1-lambda_gate) (of current timestep, as output)\n //valids: either 1.0 or 0.0, indicating if the current (y,x) position\n // is still inside the image in this minibatch\n //repeated for every mini-batch\n\n float near_zero = 0.00000000001f;\n\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while (idx < n_outer_batch * n_cells * n_minibatch)\n {\n int size_per_outer_batch = n_cells * n_minibatch;\n int outer_batch_idx = idx / size_per_outer_batch;\n const float * valid = valids[outer_batch_idx];\n\n float * delta = deltas[outer_batch_idx];\n const float * epsilon = epsilons[outer_batch_idx];\n const float * next_epsilon_y = next_epsilon_ys[outer_batch_idx];\n const float * next_epsilon_x = next_epsilon_xs[outer_batch_idx];\n float * epsilon_y = epsilon_ys[outer_batch_idx];\n float * epsilon_x = epsilon_xs[outer_batch_idx];\n const float * last_state_y = last_state_ys[outer_batch_idx];\n const float * last_state_x = last_state_xs[outer_batch_idx];\n const float * Y = Ys[outer_batch_idx];\n\n int inner_idx = idx % size_per_outer_batch;\n int minibatch_idx = inner_idx / n_cells;\n int batch_offset = minibatch_idx * 5 * n_cells;\n int cell_offset = inner_idx % n_cells;\n int start = batch_offset + cell_offset;\n float valid_batch = valid[minibatch_idx];\n\n float inpGate = delta[start];\n float fgtGate = delta[start + n_cells];\n float lambdaGate = delta[start + 2 * n_cells];\n float outGate = delta[start + 3 * n_cells];\n float state = delta[start + 4 * n_cells];\n float lastState_y = last_state_y ? last_state_y[start] : 0.f;\n float lastState_x = last_state_x ? last_state_x[start] : 0.f;\n float eps = epsilon[inner_idx];\n\n //avoid division by 0\n float gc = 0.f; //g(c(t))\n float gzc = 0.f; //g(z_c(t))\n if (outGate < -near_zero || outGate > near_zero)\n {\n gc = Y[inner_idx] / outGate;\n }\n\n if (inpGate < -near_zero || inpGate > near_zero)\n {\n gzc = (state - fgtGate * lambdaGate * lastState_y - fgtGate * (1.0f - lambdaGate) * lastState_x) / inpGate;\n }\n\n //delta_output\n delta[start + 3 * n_cells] = outGate * (1.f - outGate) * gc * eps * valid_batch;\n\n //epsilon_c\n float epsilon_c = (1.f - (gc * gc)) * outGate * eps;\n if (next_epsilon_y)\n {\n epsilon_c += next_epsilon_y[inner_idx];\n }\n if (next_epsilon_x)\n {\n epsilon_c += next_epsilon_x[inner_idx];\n }\n\n //TODO: clip epsilon_c?\n //epsilon_c = max(epsilon_c, -10.f);\n //epsilon_c = min(epsilon_c, 10.f);\n\n epsilon_y[inner_idx] = epsilon_c * fgtGate * lambdaGate * valid_batch;\n epsilon_x[inner_idx] = epsilon_c * fgtGate * (1.0f - lambdaGate) * valid_batch;\n\n //delta_cell\n delta[start + 4 * n_cells] = inpGate * (1.f - (gzc * gzc)) * epsilon_c * valid_batch;\n\n //delta_forget\n delta[start + n_cells] = fgtGate * (1.f - fgtGate) * epsilon_c *\n (lastState_y * lambdaGate + lastState_x * (1.0f - lambdaGate)) * valid_batch;\n\n //delta_lambda\n delta[start + 2 * n_cells] = fgtGate * lambdaGate * (1.f - lambdaGate) * epsilon_c\n * (lastState_y - lastState_x) * valid_batch;\n\n //delta_input\n delta[start] = inpGate * (1.f - inpGate) * gzc * epsilon_c * valid_batch;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '08_do_lstm_bwd_batched_onedir': '\n //epsilon are the derivates w.r.t. Z, delta stores the gate and cell activations\n // and will store the derivatives later\n void do_lstm_bwd_batched_onedir(OpKernelContext* context, Ndarray * delta1, Ndarray * epsilon1,\n const Ndarray* CompleteY, Ndarray * workmem1,\n int height, int width, const std::vector<int>& ys, const std::vector<int>& xs,\n Ndarray * ptr_storage, Ndarray * valid_storage, Ndarray* sizes, int iteration, cudaStream_t stream=0)\n {\n int n_outer_batch = ys.size();\n int dims[2];\n lastTwoDims(delta1, dims);\n assert(dims[1] % 5 == 0); //4 gates + cell\n int n_cells = dims[1] / 5;\n int n_minibatch = dims[0];\n\n std::vector<const float*> ptrs(1 * 10 * n_outer_batch); //1 dirs * 10 arrays\n std::vector<float> valid(1 * n_minibatch * n_outer_batch, 1.0f);\n\n float* h_sizes; // the sizes array is stored on the GPU, we have to copy it to the CPU\n int dsize =\n (n_outer_batch) * (n_minibatch) * sizeof(float) * 2; // (*2), because we have 2 (height, width) numbers\n h_sizes = (float*)malloc(dsize);\n HANDLE_ERROR(cudaMemcpy(h_sizes, Ndarray_DEV_DATA(sizes), dsize, cudaMemcpyDeviceToHost));\n\n for(int i = 0; i < n_outer_batch; ++i)\n {\n int y = ys[i];\n int x = xs[i];\n //fill valid\n for(int n = 0; n < n_minibatch; ++n)\n {\n //these are the sizes of a single image in the batch, while height/width are the maximum sizes in the batch\n float img_height = *(h_sizes + 2*n);\n float img_width = *(h_sizes + 2*n +1);\n valid[i * 1 * n_minibatch + 0 * n_minibatch + n] = float(y < img_height && x < img_width);\n }\n\n bool botBorder = (y == height-1);\n bool rightBorder = (x == width-1);\n int yp1 = y + 1;\n int xp1 = x + 1;\n int ym1 = y - 1;\n int xm1 = x - 1;\n\n float * data_delta1 = data_ptr(delta1, y, x);\n const float * data_epsilon1 = data_ptr(epsilon1, y, x);\n const float * data_next_epsilon_y1 = botBorder ? 0 : data_ptr(workmem1, (iteration-1)%2, x, 0);\n const float * data_next_epsilon_x1 = rightBorder ? 0 : data_ptr(workmem1, (iteration-1)%2, xp1, 1);\n float * data_epsilon_y1 = data_ptr(workmem1, iteration%2, x, 0);\n float * data_epsilon_x1 = data_ptr(workmem1, iteration%2, x, 1);\n const float * data_last_state_y1 = y > 0 ? data_ptr(delta1, ym1, x) + 4 * n_cells : 0;\n const float * data_last_state_x1 = x > 0 ? data_ptr(delta1, y, xm1) + 4 * n_cells : 0;\n const float * data_Y1 = data_ptr(CompleteY, y, x);\n float * valid1 = Ndarray_DEV_DATA(valid_storage) + i * 1 * n_minibatch + 0 * n_minibatch;\n\n ptrs[0 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_delta1;\n ptrs[1 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_epsilon1;\n ptrs[2 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_next_epsilon_y1;\n ptrs[3 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_next_epsilon_x1;\n ptrs[4 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_epsilon_y1;\n ptrs[5 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_epsilon_x1;\n ptrs[6 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_last_state_y1;\n ptrs[7 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_last_state_x1;\n ptrs[8 * 1 * n_outer_batch + 0 * n_outer_batch + i] = data_Y1;\n ptrs[9 * 1 * n_outer_batch + 0 * n_outer_batch + i] = valid1;\n }\n\n free(h_sizes);\n\n HANDLE_ERROR(cudaMemcpy(Ndarray_DEV_DATA(valid_storage), valid.data(),\n valid.size() * sizeof(float), cudaMemcpyHostToDevice));\n HANDLE_ERROR(cudaMemcpy(Ndarray_DEV_DATA(ptr_storage), ptrs.data(),\n ptrs.size() * sizeof(float*), cudaMemcpyHostToDevice));\n float ** ptr_storage_data = reinterpret_cast<float**>(Ndarray_DEV_DATA(ptr_storage));\n float ** data_deltas = ptr_storage_data + 0 * 1 * n_outer_batch;\n const float ** data_epsilons = (const float**) ptr_storage_data + 1 * 1 * n_outer_batch;\n const float ** data_next_epsilon_ys = (const float**) ptr_storage_data + 2 * 1 * n_outer_batch;\n const float ** data_next_epsilon_xs = (const float**) ptr_storage_data + 3 * 1 * n_outer_batch;\n float ** data_epsilon_ys = ptr_storage_data + 4 * 1 * n_outer_batch;\n float ** data_epsilon_xs = ptr_storage_data + 5 * 1 * n_outer_batch;\n const float ** data_last_state_ys = (const float**) ptr_storage_data + 6 * 1 * n_outer_batch;\n const float ** data_last_state_xs = (const float**) ptr_storage_data + 7 * 1 * n_outer_batch;\n const float ** data_Ys = (const float**) ptr_storage_data + 8 * 1 * n_outer_batch;\n const float ** data_valids = (const float**) (ptr_storage_data + 9 * 1 * n_outer_batch);\n\n start_dev_kernel(lstm_bwd_stable_cell_kernel_batched, (\n data_deltas,\n data_epsilons,\n data_next_epsilon_ys,\n data_next_epsilon_xs,\n data_epsilon_ys,\n data_epsilon_xs,\n data_last_state_ys,\n data_last_state_xs,\n data_Ys,\n data_valids,\n 1 * n_outer_batch,\n n_cells,\n n_minibatch\n ));\n }\n '}
c_fw_code = '\n // X*, V_h, V_v, W, b, ptr_storage_fwd, ptr_storage_bwd, valid, workmem, sizes, DYDummy,\n // initialState, initialOutput, iteration = input_names (*: inplace)\n // CompleteY, H = output_names\n\n assert(n_inputs == 15);\n assert(n_outputs == 2);\n\n Ndarray* X = inputs[0];\n Ndarray* V_h = inputs[1];\n Ndarray* V_v = inputs[2];\n Ndarray* W = inputs[3];\n Ndarray* b = inputs[4];\n Ndarray* ptr_storage_fwd = inputs[5];\n Ndarray* ptr_storage_bwd = inputs[6]; // not used in fwd\n Ndarray* valid = inputs[7];\n Ndarray* workmem = inputs[8]; // not used in fwd\n Ndarray* workmem2 = inputs[9]; // not used in fwd\n Ndarray* sizes = inputs[10];\n Ndarray* DYDummy = inputs[11]; // not used in fwd\n Ndarray* initialState = inputs[12];\n Ndarray* initialOutput = inputs[13];\n Ndarray* iteration = inputs[14];\n\n assert(sizeof(float) == 4 && "ptr_storage has wrong size if sizeof(float) != 4");\n assert(sizeof(float*) == 8 && "ptr_storage has wrong size if sizeof(float*) != 8");\n\n Ndarray* CompleteY = *outputs[0];\n Ndarray* H = *outputs[1];\n\n Ndarray_DIMS_Type X_dim = Ndarray_DIMS(X);\n Ndarray_DIMS_Type W_dim = Ndarray_DIMS(W);\n Ndarray_DIMS_Type V_dim = Ndarray_DIMS(V_h);\n assert(W_dim[1] %% 5 == 0 && "W has wrong shape");\n assert(5 * V_dim[0] == V_dim[1] && "V has wrong shape");\n assert(W_dim[1] == V_dim[1]);\n assert(W_dim[0] == X_dim[3]);\n const long long Y_dim[] = {X_dim[0], X_dim[1], X_dim[2], W_dim[1] / 5};\n const long long H_dim[] = {X_dim[0], X_dim[1], X_dim[2], W_dim[1]};\n const long long height = X_dim[0];\n const long long width = X_dim[1];\n const long long n_minibatch = X_dim[2];\n const long long max_diag_size = std::min(height, width);\n const long long n_diags = width + height - 1;\n\n //H = XW (+ b, currently always 0)\n fillmat(context, b, H);\n affine_global(X, W, H);\n\n // The iteration is stored on the GPU, but we need it on the CPU to controll the programm flow (use explicitly\n // provided previous state/output on first iteration). Maybe this could be optimized by storing the tensor\n // directly on the CPU?\n // We only look at the first value of the tensor with shape (batch,), as every entry has the same value by design\n float h_iteration;\n HANDLE_ERROR(cudaMemcpy(&h_iteration, Ndarray_DEV_DATA(iteration), 1*sizeof(float), cudaMemcpyDeviceToHost));\n\n for(long long diag = 0; diag < n_diags; ++diag)\n {\n int diag_size = min(diag+1, min((long long) abs(n_diags-diag), min(width, height)));\n int y_high = min(diag, height-1);\n int x_low = max(diag-height+1,(long long) 0);\n std::vector<int> ys_h, xs_h, ys_v, xs_v, ys, xs;\n for(int idx = 0; idx < diag_size; ++idx)\n {\n int y = y_high - idx;\n int x = x_low + idx;\n if(x > 0)\n {\n ys_h.push_back(y);\n xs_h.push_back(x);\n }\n if(y > 0 || h_iteration >= 1) {\n ys_v.push_back(y);\n xs_v.push_back(x);\n }\n ys.push_back(y);\n xs.push_back(x);\n }\n\n affine_y_x_batched_onedir(context, 0, -1,\n CompleteY, V_h, H, ys_h, xs_h, ptr_storage_fwd, height, width);\n\n // If it\'s not the first iteration, we need to use the explicitly provided initial output\n if(h_iteration >= 1) {\n assert(ys_v.size() == 1); // Otherwise, the target length would be != 1, we don\'t support that yet.\n affine_y_x_batched_onedir(context, 0, 0,\n initialOutput, V_v, H, ys_v, xs_v, ptr_storage_fwd, height, width);\n }\n else {\n affine_y_x_batched_onedir(context, -1, 0,\n CompleteY, V_v, H, ys_v, xs_v, ptr_storage_fwd, height, width);\n }\n\n do_lstm_batched_onedir(context, H, initialState, h_iteration, CompleteY, ys, xs, ptr_storage_fwd, valid, sizes);\n }\n '
c_bw_code = "\n // X, V_h, V_v, W, b, ptr_storage_fwd, ptr_storage_bwd, valid, workmem, workmem2, sizes, DYDummy, initialState,\n // initialOutput, iteration, CompleteY, H, DCompleteY, DH = inputs\n // DX, DV_h, DV_v, DW, Db = outputs\n\n assert(n_inputs == 19);\n assert(n_outputs == 5);\n\n Ndarray* X = inputs[0];\n Ndarray* V_h = inputs[1];\n Ndarray* V_v = inputs[2];\n Ndarray* W = inputs[3];\n Ndarray* b = inputs[4];\n Ndarray* ptr_storage_fwd = inputs[5]; // not used in bwd\n Ndarray* ptr_storage_bwd = inputs[6];\n Ndarray* valid_storage = inputs[7];\n Ndarray* workmem = inputs[8];\n Ndarray* workmem2 = inputs[9];\n Ndarray* sizes = inputs[10];\n Ndarray* DYDummy = inputs[11];\n Ndarray* initialState = inputs[12];\n Ndarray* initialOutput = inputs[13];\n Ndarray* iteration = inputs[14]; // not used in bwd (only for asserting it's == 0)\n Ndarray* CompleteY = inputs[15];\n Ndarray* H = inputs[16];\n Ndarray* DCompleteY = inputs[17];\n Ndarray* DH = inputs[18];\n\n Ndarray* DX = *outputs[0];\n Ndarray* DV_h = *outputs[1];\n Ndarray* DV_v = *outputs[2];\n Ndarray* DW = *outputs[3];\n Ndarray* Db = *outputs[4];\n\n Ndarray_DIMS_Type X_dim = Ndarray_HOST_DIMS(X);\n Ndarray_DIMS_Type Y_dim = Ndarray_HOST_DIMS(CompleteY);\n Ndarray_DIMS_Type Vh_dim = Ndarray_HOST_DIMS(V_h);\n const int height = X_dim[0];\n const int width = X_dim[1];\n const int n_minibatch = X_dim[2];\n const int n_diags = width + height - 1;\n const int max_diag_size = std::min(Y_dim[0], Y_dim[1]);\n\n Ndarray * delta1 = H;\n Ndarray * epsilon = DYDummy;\n\n int size = X_dim[0] * X_dim[1] * X_dim[2] * Vh_dim[0] * sizeof(float);\n HANDLE_ERROR(cudaMemcpy(Ndarray_DEV_DATA(epsilon), Ndarray_DEV_DATA(DCompleteY), size, cudaMemcpyDeviceToDevice));\n\n for(int diag = n_diags-1; diag >= 0; --diag)\n {\n int diag_size = std::min(diag+1, std::min(std::abs(n_diags-diag), std::min(width, height)));\n int y_high = std::min(diag, height-1);\n int x_low = std::max(diag-height+1,0);\n std::vector<int> ys_h, xs_h, ys_v, xs_v, ys, xs;\n for(int idx = 0; idx < diag_size; ++idx)\n {\n int y = y_high - idx;\n int x = x_low + idx;\n bool rightBorder = (x == X_dim[1]-1);\n if(!rightBorder)\n {\n ys_h.push_back(y);\n xs_h.push_back(x);\n }\n bool botBorder = (y == X_dim[0]-1);\n if(!botBorder)\n {\n ys_v.push_back(y);\n xs_v.push_back(x);\n }\n ys.push_back(y);\n xs.push_back(x);\n }\n\n affine_y_x_batched_onedir(context, 0, 1, delta1, V_h,\n epsilon, ys_h, xs_h, ptr_storage_bwd, height, width, 0, false, true);\n affine_y_x_batched_onedir(context, 1, 0, delta1, V_v,\n epsilon, ys_v, xs_v, ptr_storage_bwd, height, width, 0, false, true);\n\n do_lstm_bwd_batched_onedir(\n context, delta1, epsilon, CompleteY, workmem,\n X_dim[0], X_dim[2], ys, xs, ptr_storage_bwd, valid_storage, sizes, diag+1);\n }\n\n //DW = X^T * delta\n affine_global(X, delta1, DW, true, false, 0, 0.0f);\n //important! mind the order, first use X, then update DX, which might be aligned to X\n //DX = delta * W^T\n affine_global(delta1, W, DX, false, true, 0, 0.0f);\n\n // Currently, the bias is not trained\n //Db = (1 ... 1) * delta\n\n //copy left/right part to workmem2 and set to 0\n // (could be done more efficient, but profiling shows, it's not worth it)\n Ndarray_DIMS_Type H_dim = Ndarray_HOST_DIMS(H);\n const int block_size = H_dim[2] * H_dim[3];\n for(int y = 0; y < Y_dim[0]; ++y)\n {\n float * workmem2_1_data_ptr = Ndarray_DEV_DATA(workmem2) + y * block_size;\n float * delta1_data_ptr = data_ptr(delta1, y, 0);\n HANDLE_ERROR(cudaMemcpy(\n workmem2_1_data_ptr, delta1_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));\n HANDLE_ERROR(cudaMemset(delta1_data_ptr, 0, sizeof(float) * H_dim[2] * H_dim[3]));\n }\n\n //DV_h = Y[0..end-1]^T * delta[1..end]\n affine_global(CompleteY, delta1, DV_h, true, false, 1, 0.0f);\n\n //copy left/right part back\n for(int y = 0; y < Y_dim[0]; ++y)\n {\n float * workmem2_1_data_ptr = Ndarray_DEV_DATA(workmem2) + y * block_size;\n float * delta1_data_ptr = data_ptr(delta1, y, 0);\n HANDLE_ERROR(cudaMemcpy(\n delta1_data_ptr, workmem2_1_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));\n }\n\n //DV_v = Y[0..end-1]^T * delta[1..end]\n affine_global(CompleteY, delta1, DV_v, true, false, Y_dim[1], 0.0f);\n "
cpu_support = False
code_version = ()
|
class Chunking(NativeOpGenBase):
'\n Given an input in 3d (n_time,n_batch,n_dim), we chunk up the time dimension\n in chunks of size chunk_size, every chunk_step frames.\n This results in an 3d output (chunk_size, n_batch * n_chunks, n_dim)\n where n_chunks = floor( max(n_time - chunk_size + chunk_step - 1, 0) / chunk_step ) + 1.\n Examples:\n n_time=1, chunk_size=50, chunk_step=10 -> n_chunks=1\n n_time=49, chunk_size=50, chunk_step=10 -> n_chunks=1\n n_time=50, chunk_size=50, chunk_step=10 -> n_chunks=1\n n_time=51, chunk_size=50, chunk_step=10 -> n_chunks=2\n n_time=60, chunk_size=50, chunk_step=10 -> n_chunks=2\n n_time=61, chunk_size=50, chunk_step=10 -> n_chunks=3\n n_time=99, chunk_size=50, chunk_step=10 -> n_chunks=6\n n_time=100, chunk_size=50, chunk_step=10 -> n_chunks=6\n n_time=101, chunk_size=50, chunk_step=10 -> n_chunks=7\n '
in_info = ({'name': 'input', 'ndim': 3, 'shape': (None, None, None)}, {'name': 'index', 'ndim': 2, 'shape': (None, None), 'gradient': 'disconnected'}, {'name': 'output_buffer', 'ndim': 3, 'shape': (None, None, None), 'want_inplace': 0, 'gradient': 'disconnected'}, {'name': 'oindex_buffer', 'ndim': 2, 'shape': (None, None), 'want_inplace': 1, 'gradient': 'disconnected'}, {'name': 'chunk_params', 'ndim': 1, 'shape': (2,), 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 3, 'shape': ((2, 0), (2, 1), (2, 2))}, {'name': 'oindex', 'ndim': 2, 'shape': ((3, 0), (3, 1))})
c_extra_support_code = {'copy_kernel': '\n DEF_KERNEL\n void copy_kernel(\n float* chunk_params,\n float* input, long in_dim0, long in_dim1, long in_dim2, long in_stride0, long in_stride1, long in_stride2,\n float* index, long idx_stride0, long idx_stride1,\n float* output, long out_dim0, long out_dim1, long out_stride0, long out_stride1, long out_stride2,\n float* oindex, long oidx_stride0, long oidx_stride1\n ) {\n assert_cmp(out_dim1 % in_dim1, ==, 0);\n const long n_chunks = out_dim1 / in_dim1;\n assert_cmp(n_chunks, >, 0);\n const long chunk_size = out_dim0;\n assert_cmp(long(chunk_params[0]), ==, chunk_size);\n const long chunk_step = long(chunk_params[1]);\n assert_cmp(chunk_step, >, 0);\n assert_cmp(chunk_step * (n_chunks - 1) + chunk_size, >=, in_dim0);\n assert_cmp(chunk_step * (n_chunks - 1), <, in_dim0);\n\n // Iterate over output (chunked) x/y coordinates.\n // In an inner loop, we will loop over z.\n const long max_idx = out_dim0 * out_dim1;\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n long out_x = idx % out_dim0; // time\n long out_y = idx / out_dim0; // batch\n\n long chunk_idx = out_y % n_chunks;\n long in_y = out_y / n_chunks;\n\n long in_x = chunk_step * chunk_idx + out_x;\n\n if(in_x < in_dim0 && index[in_x * idx_stride0 + in_y * idx_stride1] > 0.1) {\n for(long z = 0; z < in_dim2; ++z)\n output[out_x * out_stride0 + out_y * out_stride1 + z * out_stride2] =\n input[in_x * in_stride0 + in_y * in_stride1 + z * in_stride2];\n oindex[out_x * oidx_stride0 + out_y * oidx_stride1] = 1;\n }\n else {\n for(long z = 0; z < in_dim2; ++z)\n output[out_x * out_stride0 + out_y * out_stride1 + z * out_stride2] = 0;\n oindex[out_x * oidx_stride0 + out_y * oidx_stride1] = 0;\n }\n }\n }\n '}
c_fw_code = '\n assert_cmp(n_inputs, ==, 5);\n assert_cmp(n_outputs, ==, 2);\n Ndarray* input = inputs[0];\n Ndarray* index = inputs[1];\n Ndarray* chunk_params = inputs[4];\n Ndarray* output = *outputs[0];\n Ndarray* oindex = *outputs[1];\n\n assert_cmp(Ndarray_NDIM(input), ==, 3);\n assert_cmp(Ndarray_NDIM(index), ==, 2);\n assert_cmp(Ndarray_DIMS(input)[0], ==, Ndarray_DIMS(index)[0]);\n assert_cmp(Ndarray_DIMS(input)[1], ==, Ndarray_DIMS(index)[1]);\n assert_cmp(Ndarray_NDIM(chunk_params), ==, 1);\n assert_cmp(Ndarray_DIMS(chunk_params)[0], ==, 2);\n assert_cmp(Ndarray_NDIM(output), ==, 3);\n assert_cmp(Ndarray_NDIM(oindex), ==, 2);\n assert_cmp(Ndarray_DIMS(output)[0], ==, Ndarray_DIMS(oindex)[0]);\n assert_cmp(Ndarray_DIMS(output)[1], ==, Ndarray_DIMS(oindex)[1]);\n assert_cmp(Ndarray_DIMS(output)[2], ==, Ndarray_DIMS(input)[2]);\n\n start_dev_kernel(copy_kernel, (\n Ndarray_DEV_DATA(chunk_params),\n Ndarray_DEV_DATA(input),\n Ndarray_DIMS(input)[0],\n Ndarray_DIMS(input)[1],\n Ndarray_DIMS(input)[2],\n Ndarray_STRIDE(input, 0),\n Ndarray_STRIDE(input, 1),\n Ndarray_STRIDE(input, 2),\n Ndarray_DEV_DATA(index),\n Ndarray_STRIDE(index, 0),\n Ndarray_STRIDE(index, 1),\n Ndarray_DEV_DATA(output),\n Ndarray_DIMS(output)[0],\n Ndarray_DIMS(output)[1],\n Ndarray_STRIDE(output, 0),\n Ndarray_STRIDE(output, 1),\n Ndarray_STRIDE(output, 2),\n Ndarray_DEV_DATA(oindex),\n Ndarray_STRIDE(oindex, 0),\n Ndarray_STRIDE(oindex, 1)\n ));\n HANDLE_LAST_ERROR();\n '
code_version = ()
@staticmethod
def naive_chunk_start_frames(n_time, chunk_size, chunk_step):
'\n This is just for documentation / demonstration. Also used by testing code.\n '
t = 0
chunk_start_frames = []
while True:
chunk_start_frames.append(t)
if ((t + chunk_size) >= n_time):
break
t += chunk_step
return chunk_start_frames
|
class UnChunking(NativeOpGenBase):
'\n This reverses the output from `Chunking`, i.e. chunking the time dimension.\n We get a 3d input (chunk_size, n_batch * n_chunks, n_dim)\n and return an 3d output (n_time, n_batch, n_dim)\n where the chunks are of size chunk_size, every chunk_step frames.\n Because of overlaps, we have to combine the overlapping chunks somehow.\n We will do that with a uniform distribution, i.e. take the mean of all overlaps per frame.\n '
in_info = ({'name': 'input', 'ndim': 3, 'shape': (None, None, None)}, {'name': 'index', 'ndim': 2, 'shape': (None, None), 'gradient': 'disconnected'}, {'name': 'output_buffer', 'ndim': 3, 'shape': (None, None, None), 'want_inplace': 0, 'gradient': 'disconnected'}, {'name': 'oindex_buffer', 'ndim': 2, 'shape': (None, None), 'want_inplace': 1, 'gradient': 'disconnected'}, {'name': 'ofactors_buffer', 'ndim': 2, 'shape': (None, None), 'want_inplace': 2, 'gradient': 'disconnected'}, {'name': 'chunk_params', 'ndim': 1, 'shape': (2,), 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 3, 'shape': ((2, 0), (2, 1), (2, 2))}, {'name': 'oindex', 'ndim': 2, 'shape': ((3, 0), (3, 1))}, {'name': 'ofactors', 'ndim': 2, 'shape': ((4, 0), (4, 1))})
c_extra_support_code = {'unchunk_kernel': '\n DEF_KERNEL\n void unchunk_kernel(\n float* chunk_params,\n float* input, long in_dim0, long in_dim1, long in_dim2, long in_stride0, long in_stride1, long in_stride2,\n float* index, long idx_stride0, long idx_stride1,\n float* output, long out_dim0, long out_dim1, long out_stride0, long out_stride1, long out_stride2,\n float* oindex, long oidx_stride0, long oidx_stride1,\n float* ofactors, long ofac_stride0, long ofac_stride1\n ) {\n assert_cmp(in_dim1 % out_dim1, ==, 0);\n const long n_chunks = in_dim1 / out_dim1;\n assert_cmp(n_chunks, >, 0);\n const long chunk_size = in_dim0;\n assert_cmp(long(chunk_params[0]), ==, chunk_size);\n const long chunk_step = long(chunk_params[1]);\n assert_cmp(chunk_step, >, 0);\n assert_cmp(chunk_step * (n_chunks - 1) + chunk_size, >=, out_dim0);\n assert_cmp(chunk_step * (n_chunks - 1), <, out_dim0);\n\n // Iterate over output (unchunked) x/y coordinates.\n // In an inner loop, we will loop over z.\n const long max_idx = out_dim0 * out_dim1;\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n long out_x = idx % out_dim0; // time\n long out_y = idx / out_dim0; // batch\n\n float c = 0;\n for(long z = 0; z < in_dim2; ++z)\n output[out_x * out_stride0 + out_y * out_stride1 + z * out_stride2] = 0;\n\n // in_x = out_x - chunk_step * chunk_idx,\n // thus in_x < 0 when chunk_idx * chunk_step > out_x,\n // and in_x >= chunk_size when chunk_idx * chunk_step <= out_x - chunk_size,\n // thus we need chunk_idx <= out_x / chunk_step,\n // and chunk_idx > (out_x - chunk_size) / chunk_step.\n // Examples:\n // out_x=0, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=0,1\n // out_x=3, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=0,1\n // out_x=4, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=0,2\n // out_x=7, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=0,2\n // out_x=8, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=0,3\n // out_x=9, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=0,3\n // out_x=10, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=1,3\n // out_x=11, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=1,3\n // out_x=12, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=1,4\n // out_x=13, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=1,4\n // out_x=14, chunk_size=10, chunk_step=4 -> chunk_idx_start,end=2,4\n long chunk_idx_start = (out_x - chunk_size + chunk_step) / chunk_step;\n if(chunk_idx_start < 0) chunk_idx_start = 0;\n long chunk_idx_end = out_x / chunk_step + 1;\n if(chunk_idx_end > n_chunks) chunk_idx_end = n_chunks;\n assert_cmp(chunk_idx_start, <, chunk_idx_end);\n for(long chunk_idx = chunk_idx_start; chunk_idx < chunk_idx_end; ++chunk_idx) {\n long in_y = out_y * n_chunks + chunk_idx;\n long in_x = out_x - chunk_step * chunk_idx;\n assert_cmp(in_x, >=, 0);\n assert_cmp(in_x, <, chunk_size);\n if(index[in_x * idx_stride0 + in_y * idx_stride1] > 0.1) {\n c += 1;\n for(long z = 0; z < in_dim2; ++z)\n output[out_x * out_stride0 + out_y * out_stride1 + z * out_stride2] +=\n input[in_x * in_stride0 + in_y * in_stride1 + z * in_stride2];\n }\n }\n\n if(c > 0.1) {\n for(long z = 0; z < in_dim2; ++z)\n output[out_x * out_stride0 + out_y * out_stride1 + z * out_stride2] /= c;\n oindex[out_x * oidx_stride0 + out_y * oidx_stride1] = 1;\n ofactors[out_x * ofac_stride0 + out_y * ofac_stride1] = 1.0 / c;\n } else {\n oindex[out_x * oidx_stride0 + out_y * oidx_stride1] = 0;\n ofactors[out_x * ofac_stride0 + out_y * ofac_stride1] = 1.0;\n }\n }\n }\n '}
c_fw_code = '\n assert_cmp(n_inputs, ==, 6);\n assert_cmp(n_outputs, ==, 3);\n Ndarray* input = inputs[0];\n Ndarray* index = inputs[1];\n Ndarray* chunk_params = inputs[5];\n Ndarray* output = *outputs[0];\n Ndarray* oindex = *outputs[1];\n Ndarray* ofactors = *outputs[2];\n\n assert_cmp(Ndarray_NDIM(input), ==, 3);\n assert_cmp(Ndarray_NDIM(index), ==, 2);\n assert_cmp(Ndarray_DIMS(input)[0], ==, Ndarray_DIMS(index)[0]);\n assert_cmp(Ndarray_DIMS(input)[1], ==, Ndarray_DIMS(index)[1]);\n assert_cmp(Ndarray_NDIM(chunk_params), ==, 1);\n assert_cmp(Ndarray_DIMS(chunk_params)[0], ==, 2);\n assert_cmp(Ndarray_NDIM(output), ==, 3);\n assert_cmp(Ndarray_NDIM(oindex), ==, 2);\n assert_cmp(Ndarray_NDIM(ofactors), ==, 2);\n assert_cmp(Ndarray_DIMS(output)[0], ==, Ndarray_DIMS(oindex)[0]);\n assert_cmp(Ndarray_DIMS(output)[1], ==, Ndarray_DIMS(oindex)[1]);\n assert_cmp(Ndarray_DIMS(output)[2], ==, Ndarray_DIMS(input)[2]);\n assert_cmp(Ndarray_DIMS(oindex)[0], ==, Ndarray_DIMS(ofactors)[0]);\n assert_cmp(Ndarray_DIMS(oindex)[1], ==, Ndarray_DIMS(ofactors)[1]);\n\n start_dev_kernel(unchunk_kernel, (\n Ndarray_DEV_DATA(chunk_params),\n Ndarray_DEV_DATA(input),\n Ndarray_DIMS(input)[0],\n Ndarray_DIMS(input)[1],\n Ndarray_DIMS(input)[2],\n Ndarray_STRIDE(input, 0),\n Ndarray_STRIDE(input, 1),\n Ndarray_STRIDE(input, 2),\n Ndarray_DEV_DATA(index),\n Ndarray_STRIDE(index, 0),\n Ndarray_STRIDE(index, 1),\n Ndarray_DEV_DATA(output),\n Ndarray_DIMS(output)[0],\n Ndarray_DIMS(output)[1],\n Ndarray_STRIDE(output, 0),\n Ndarray_STRIDE(output, 1),\n Ndarray_STRIDE(output, 2),\n Ndarray_DEV_DATA(oindex),\n Ndarray_STRIDE(oindex, 0),\n Ndarray_STRIDE(oindex, 1),\n Ndarray_DEV_DATA(ofactors),\n Ndarray_STRIDE(ofactors, 0),\n Ndarray_STRIDE(ofactors, 1)\n ));\n HANDLE_LAST_ERROR();\n '
code_version = ()
|
class SubtensorBatchedIndex(NativeOpGenBase):
'\n Consider you have:\n idx: 2d (n_time, n_batch) -> idx (in [0..n_dim-1])\n x: 3d (n_time, n_batch, n_dim)\n Then, this op will calculate:\n x[..., idx[...]]: 2d (n_time, n_batch)\n '
in_info = ({'name': 'x', 'ndim': 3, 'shape': (None, None, None), 'bw_in_var': {'want_inplace': 0}}, {'name': 'idx', 'ndim': 2, 'shape': (None, None), 'gradient': 'disconnected'})
out_info = ({'name': 'y', 'ndim': 2, 'shape': ((0, 0), (0, 1))},)
@classmethod
def grad_input_map(cls, x, idx, y, DY):
'\n Map.\n '
return (x, idx, DY)
c_extra_support_code = {'select_kernel': '\n DEF_KERNEL\n void select_kernel(\n float* x, long x_dim0, long x_dim1, long x_dim2, long x_stride0, long x_stride1, long x_stride2,\n float* index, long idx_stride0, long idx_stride1,\n float* y, long y_stride0, long y_stride1\n ) {\n const long max_idx = x_dim0 * x_dim1;\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n long d0 = idx % x_dim0;\n long d1 = idx / x_dim0;\n long d2 = long(index[d0 * idx_stride0 + d1 * idx_stride1]);\n if(d2 < 0) d2 = 0;\n if(d2 >= x_dim2) d2 = x_dim2 - 1;\n y[d0 * y_stride0 + d1 * y_stride1] = x[d0 * x_stride0 + d1 * x_stride1 + d2 * x_stride2];\n }\n }\n ', 'select_bw_kernel': '\n DEF_KERNEL\n void select_bw_kernel(\n float* Dx, long Dx_dim0, long Dx_dim1, long Dx_dim2, long Dx_stride0, long Dx_stride1, long Dx_stride2,\n float* index, long idx_stride0, long idx_stride1,\n float* Dy, long Dy_stride0, long Dy_stride1\n ) {\n const long max_idx = Dx_dim0 * Dx_dim1;\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n long d0 = idx % Dx_dim0;\n long d1 = idx / Dx_dim0;\n long d2 = long(index[d0 * idx_stride0 + d1 * idx_stride1]);\n if(d2 < 0) d2 = 0;\n if(d2 >= Dx_dim2) d2 = Dx_dim2 - 1;\n Dx[d0 * Dx_stride0 + d1 * Dx_stride1 + d2 * Dx_stride2] = Dy[d0 * Dy_stride0 + d1 * Dy_stride1];\n }\n }\n '}
c_fw_code = '\n assert_cmp(n_inputs, ==, 2);\n assert_cmp(n_outputs, ==, 1);\n Ndarray* x = inputs[0];\n Ndarray* idx = inputs[1];\n Ndarray* y = *outputs[0];\n\n assert_cmp(Ndarray_NDIM(x), ==, 3);\n assert_cmp(Ndarray_NDIM(idx), ==, 2);\n assert_cmp(Ndarray_DIMS(x)[0], ==, Ndarray_DIMS(idx)[0]);\n assert_cmp(Ndarray_DIMS(x)[1], ==, Ndarray_DIMS(idx)[1]);\n assert_cmp(Ndarray_NDIM(y), ==, 2);\n assert_cmp(Ndarray_DIMS(y)[0], ==, Ndarray_DIMS(idx)[0]);\n assert_cmp(Ndarray_DIMS(y)[1], ==, Ndarray_DIMS(idx)[1]);\n\n start_dev_kernel(select_kernel, (\n Ndarray_DEV_DATA(x),\n Ndarray_DIMS(x)[0],\n Ndarray_DIMS(x)[1],\n Ndarray_DIMS(x)[2],\n Ndarray_STRIDE(x, 0),\n Ndarray_STRIDE(x, 1),\n Ndarray_STRIDE(x, 2),\n Ndarray_DEV_DATA(idx),\n Ndarray_STRIDE(idx, 0),\n Ndarray_STRIDE(idx, 1),\n Ndarray_DEV_DATA(y),\n Ndarray_STRIDE(y, 0),\n Ndarray_STRIDE(y, 1)\n ));\n HANDLE_LAST_ERROR();\n '
c_bw_code = '\n assert_cmp(n_inputs, ==, 3);\n assert_cmp(n_outputs, ==, 1);\n Ndarray* x = inputs[0];\n Ndarray* idx = inputs[1];\n Ndarray* Dy = inputs[2];\n Ndarray* Dx = *outputs[0]; // inplace on x\n\n assert_cmp(Ndarray_NDIM(x), ==, 3);\n assert_cmp(Ndarray_NDIM(idx), ==, 2);\n assert_cmp(Ndarray_DIMS(x)[0], ==, Ndarray_DIMS(idx)[0]);\n assert_cmp(Ndarray_DIMS(x)[1], ==, Ndarray_DIMS(idx)[1]);\n assert_cmp(Ndarray_NDIM(Dy), ==, 2);\n assert_cmp(Ndarray_DIMS(Dy)[0], ==, Ndarray_DIMS(idx)[0]);\n assert_cmp(Ndarray_DIMS(Dy)[1], ==, Ndarray_DIMS(idx)[1]);\n assert_cmp(Ndarray_NDIM(Dx), ==, 3);\n assert_cmp(Ndarray_DIMS(Dx)[0], ==, Ndarray_DIMS(x)[0]);\n assert_cmp(Ndarray_DIMS(Dx)[1], ==, Ndarray_DIMS(x)[1]);\n assert_cmp(Ndarray_DIMS(Dx)[2], ==, Ndarray_DIMS(x)[2]);\n\n Ndarray_set_zero(Dx);\n start_dev_kernel(select_bw_kernel, (\n Ndarray_DEV_DATA(Dx),\n Ndarray_DIMS(Dx)[0],\n Ndarray_DIMS(Dx)[1],\n Ndarray_DIMS(Dx)[2],\n Ndarray_STRIDE(Dx, 0),\n Ndarray_STRIDE(Dx, 1),\n Ndarray_STRIDE(Dx, 2),\n Ndarray_DEV_DATA(idx),\n Ndarray_STRIDE(idx, 0),\n Ndarray_STRIDE(idx, 1),\n Ndarray_DEV_DATA(Dy),\n Ndarray_STRIDE(Dy, 0),\n Ndarray_STRIDE(Dy, 1)\n ));\n HANDLE_LAST_ERROR();\n '
|
class SparseToDense(NativeOpGenBase):
'\n Expects a sparse matrix in COOrdinate format,\n where W[s0[i,b],b,s1[i]] = weight[i,b] for all i, and all batches b.\n Will return W (time,batch,dim).\n '
in_info = ({'name': '_initial_W', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True, 'want_inplace': 0}, {'name': 's0', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 's1', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'weight', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'mask', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True})
out_info = ({'name': 'W', 'ndim': 3, 'shape': ((0, 0), (0, 1), (0, 2))},)
c_extra_support_code = {'assign_kernel': '\n DEF_KERNEL\n void assign_kernel(\n float* out, float* s0, float* s1, float* w, float* mask,\n long n_sparse_idx, long n_time, long n_batch, long n_dim)\n {\n long max_idx = n_batch * n_sparse_idx;\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n if(mask[idx] < 0.1) continue;\n long batch = idx % n_batch;\n long t = (long) s0[idx];\n long j = (long) s1[idx];\n float y = w[idx];\n if(t < 0 || t >= n_time) continue; // error somehow?\n if(j < 0 || j >= n_dim) continue; // error somehow?\n long out_idx = t * n_batch * n_dim + batch * n_dim + j;\n out[out_idx] += y;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 5);\n assert(n_outputs == 1);\n Ndarray* s0 = inputs[1];\n Ndarray* s1 = inputs[2];\n Ndarray* weight = inputs[3];\n Ndarray* mask = inputs[4];\n Ndarray* out_W = *outputs[0];\n\n assert(Ndarray_NDIM(s0) == 2);\n assert(Ndarray_NDIM(s1) == 2);\n assert(Ndarray_NDIM(weight) == 2);\n assert(Ndarray_NDIM(mask) == 2);\n assert(Ndarray_NDIM(out_W) == 3);\n int n_sparse_idx = Ndarray_DIMS(s0)[0];\n assert(n_sparse_idx == Ndarray_DIMS(s1)[0]);\n assert(n_sparse_idx == Ndarray_DIMS(weight)[0]);\n assert(n_sparse_idx == Ndarray_DIMS(mask)[0]);\n int n_batch = Ndarray_DIMS(s0)[1];\n assert(n_batch == Ndarray_DIMS(s1)[1]);\n assert(n_batch == Ndarray_DIMS(weight)[1]);\n assert(n_batch == Ndarray_DIMS(mask)[1]);\n assert(n_batch == Ndarray_DIMS(out_W)[1]);\n int n_time = Ndarray_DIMS(out_W)[0];\n int n_dim = Ndarray_DIMS(out_W)[2];\n\n start_dev_kernel(assign_kernel, (\n Ndarray_DEV_DATA(out_W),\n Ndarray_DEV_DATA(s0),\n Ndarray_DEV_DATA(s1),\n Ndarray_DEV_DATA(weight),\n Ndarray_DEV_DATA(mask),\n n_sparse_idx, n_time, n_batch, n_dim\n ));\n HANDLE_LAST_ERROR();\n '
|
class MaxAndArgmaxSparse(NativeOpGenBase):
'\n Expects a sparse matrix in COOrdinate format,\n where W[s0[i,b],s1[i],b] = weight[i,b] for all i, and all batches b.\n It will return the max and argmax for all W[:,:,b]\n over the second axis.\n '
in_info = ({'name': 's0', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 's1', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'weight', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'mask', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': '_out_max', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'want_inplace': 0, 'gradient': 'disconnected'}, {'name': '_out_arg', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'want_inplace': 1, 'gradient': 'disconnected'})
out_info = ({'name': 'out_max', 'ndim': 2, 'shape': ((4, 0), (4, 1))}, {'name': 'out_arg', 'ndim': 2, 'shape': ((5, 0), (5, 1))})
c_extra_support_code = {'doit_kernel': '\n DEF_KERNEL\n void doit_kernel(\n long n_batch, long n_in_time, long n_out_time,\n float* s0, float* s1, float* weight, float* mask,\n float* out_max, float* out_arg) {\n long batch_idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(batch_idx < n_batch) {\n for(long i = 0; i < n_in_time; ++i) {\n long idx = i * n_batch + batch_idx;\n if(mask[idx] < 0.1) continue;\n long t = (long) s0[idx];\n long j = (long) s1[idx];\n float w = weight[idx];\n if(t < 0 || t >= n_out_time) continue; // error somehow?\n long out_idx = t * n_batch + batch_idx;\n if(w > out_max[out_idx]) {\n out_max[out_idx] = w;\n out_arg[out_idx] = (float) j;\n }\n }\n batch_idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 6);\n assert(n_outputs == 2);\n Ndarray* s0 = inputs[0];\n Ndarray* s1 = inputs[1];\n Ndarray* weight = inputs[2];\n Ndarray* mask = inputs[3];\n Ndarray* out_max = *outputs[0];\n Ndarray* out_arg = *outputs[1];\n\n assert(Ndarray_NDIM(s0) == 2);\n assert(Ndarray_NDIM(s1) == 2);\n assert(Ndarray_NDIM(weight) == 2);\n assert(Ndarray_NDIM(mask) == 2);\n assert(Ndarray_NDIM(out_max) == 2);\n assert(Ndarray_NDIM(out_arg) == 2);\n int n_in_time = Ndarray_DIMS(s0)[0];\n assert(n_in_time == Ndarray_DIMS(s1)[0]);\n assert(n_in_time == Ndarray_DIMS(weight)[0]);\n assert(n_in_time == Ndarray_DIMS(mask)[0]);\n int n_batch = Ndarray_DIMS(s0)[1];\n assert(n_batch == Ndarray_DIMS(s1)[1]);\n assert(n_batch == Ndarray_DIMS(weight)[1]);\n assert(n_batch == Ndarray_DIMS(mask)[1]);\n assert(n_batch == Ndarray_DIMS(out_arg)[1]);\n assert(n_batch == Ndarray_DIMS(out_max)[1]);\n int n_out_time = Ndarray_DIMS(out_arg)[0];\n assert(n_out_time == Ndarray_DIMS(out_max)[0]);\n assert(out_max != out_arg); // earlier bug in NativeOp\n\n start_dev_kernel(doit_kernel, (\n n_batch, n_in_time, n_out_time,\n Ndarray_DEV_DATA(s0),\n Ndarray_DEV_DATA(s1),\n Ndarray_DEV_DATA(weight),\n Ndarray_DEV_DATA(mask),\n Ndarray_DEV_DATA(out_max),\n Ndarray_DEV_DATA(out_arg)\n ));\n HANDLE_LAST_ERROR();\n '
code_version = ()
|
class CrossEntropySoftmaxAndGradientZSparse(NativeOpGenBase):
'\n y_target is given in sparse COOrdinate format.\n We will calculate CE[t,b] = \\sum_i y_target[t,b,i] * log(softmax(z[t,b])[i]),\n for any timeframe t and batch b,\n and grad(CE[t,b], z[t,b]) = softmax(z[t,b]) - y_target[t,b].\n We also support an index-mask for z, i.e. for the possible [t,b].\n '
in_info = ({'name': 'z', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True}, {'name': 'z_mask', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'y_target_t', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'y_target_i', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'y_target_w', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True}, {'name': 'y_target_mask', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True})
out_info = ({'name': 'out_ce', 'ndim': 2, 'shape': ((0, 0), (0, 1))}, {'name': 'out_grad_z', 'ndim': 3, 'shape': ((0, 0), (0, 1), (0, 2))}, {'name': '_out_max_z', 'ndim': 2, 'shape': ((0, 0), (0, 1))})
c_extra_support_code = {'max_kernel': '\n DEF_KERNEL\n void max_kernel(float* out, float* v, float* mask, long stride, long max_idx) {\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n if(mask[idx] < 0.1)\n continue;\n long start = idx * stride;\n float last_max = v[start];\n out[idx] = last_max;\n for(long i = 1; i < stride; ++i) {\n float cur = v[start + i];\n if(cur > last_max) {\n last_max = cur;\n out[idx] = cur;\n }\n }\n }\n }\n ', 'softmax_kernel': '\n DEF_KERNEL\n void softmax_kernel(\n float* out_softmax,\n float* z, float* max_z, float* mask,\n long stride, long max_idx)\n {\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n long start = idx * stride;\n float s = 0;\n for(long i = 0; i < stride; ++i) {\n s += exp(z[start + i] - max_z[idx]);\n }\n if(s < 1e-16) s = 1e-16;\n for(long i = 0; i < stride; ++i) {\n float y = exp(z[start + i] - max_z[idx]) / s;\n out_softmax[start + i] = (mask[idx] > 0.5) ? y : 0;\n }\n }\n }\n ', 'ce_sm_grad_kernel': '\n DEF_KERNEL\n void ce_sm_grad_kernel(\n float* out_ce, float* out_grad_z,\n float* z, float* max_z, float* z_mask,\n float* s0, float* s1, float* w, float* s_mask,\n long n_time, long n_batch, long n_dim, long n_sparse_index)\n {\n long max_idx = n_batch * n_sparse_index;\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n if(s_mask[idx] < 0.1) continue;\n long batch = idx % n_batch;\n long t = (long) s0[idx];\n long j = (long) s1[idx];\n float y_target = w[idx];\n if(t < 0 || t >= n_time) continue; // error somehow?\n if(j < 0 || j >= n_dim) continue; // error somehow?\n long out_ce_idx = t * n_batch + batch;\n long out_y_idx = t * n_batch * n_dim + batch * n_dim + j;\n // This assumes that out_grad_z is still softmax(z).\n // This also assumes that every [t,j] is only represented once in the sparse data.\n out_ce[out_ce_idx] -= y_target * log(fmax(out_grad_z[out_y_idx], 1e-30f));\n out_grad_z[out_y_idx] -= y_target;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 6);\n assert(n_outputs == 3);\n Ndarray* z = inputs[0];\n Ndarray* z_mask = inputs[1];\n Ndarray* s0 = inputs[2];\n Ndarray* s1 = inputs[3];\n Ndarray* w = inputs[4];\n Ndarray* s_mask = inputs[5];\n Ndarray* out_ce = *outputs[0];\n Ndarray* out_grad_z = *outputs[1];\n Ndarray* out_max_z = *outputs[2];\n\n assert(Ndarray_NDIM(z) == 3);\n assert(Ndarray_NDIM(z_mask) == 2);\n assert(Ndarray_NDIM(out_ce) == 2);\n assert(Ndarray_NDIM(out_grad_z) == 3);\n assert(Ndarray_NDIM(out_max_z) == 2);\n assert(Ndarray_NDIM(s0) == 2);\n assert(Ndarray_NDIM(s1) == 2);\n assert(Ndarray_NDIM(w) == 2);\n assert(Ndarray_NDIM(out_ce) == 2);\n int n_time = Ndarray_DIMS(z)[0];\n int n_batch = Ndarray_DIMS(z)[1];\n int n_dim = Ndarray_DIMS(z)[2];\n assert(n_time == Ndarray_DIMS(z_mask)[0]);\n assert(n_time == Ndarray_DIMS(out_ce)[0]);\n assert(n_time == Ndarray_DIMS(out_grad_z)[0]);\n assert(n_time == Ndarray_DIMS(out_max_z)[0]);\n assert(n_batch == Ndarray_DIMS(z_mask)[1]);\n assert(n_batch == Ndarray_DIMS(out_ce)[1]);\n assert(n_batch == Ndarray_DIMS(out_grad_z)[1]);\n assert(n_batch == Ndarray_DIMS(out_max_z)[1]);\n assert(n_batch == Ndarray_DIMS(s0)[1]);\n assert(n_batch == Ndarray_DIMS(s1)[1]);\n assert(n_batch == Ndarray_DIMS(w)[1]);\n assert(n_batch == Ndarray_DIMS(s_mask)[1]);\n assert(n_dim == Ndarray_DIMS(out_grad_z)[2]);\n int n_sparse_index = Ndarray_DIMS(s0)[0];\n assert(n_sparse_index == Ndarray_DIMS(s1)[0]);\n assert(n_sparse_index == Ndarray_DIMS(w)[0]);\n assert(n_sparse_index == Ndarray_DIMS(s_mask)[0]);\n\n start_dev_kernel(max_kernel, (\n Ndarray_DEV_DATA(out_max_z), Ndarray_DEV_DATA(z), Ndarray_DEV_DATA(z_mask),\n n_dim, n_time * n_batch\n ));\n HANDLE_LAST_ERROR();\n Ndarray_set_zero(out_ce);\n start_dev_kernel(softmax_kernel, (\n Ndarray_DEV_DATA(out_grad_z),\n Ndarray_DEV_DATA(z), Ndarray_DEV_DATA(out_max_z), Ndarray_DEV_DATA(z_mask),\n n_dim, n_time * n_batch\n ));\n HANDLE_LAST_ERROR();\n start_dev_kernel(ce_sm_grad_kernel, (\n Ndarray_DEV_DATA(out_ce), Ndarray_DEV_DATA(out_grad_z),\n Ndarray_DEV_DATA(z), Ndarray_DEV_DATA(out_max_z), Ndarray_DEV_DATA(z_mask),\n Ndarray_DEV_DATA(s0), Ndarray_DEV_DATA(s1), Ndarray_DEV_DATA(w), Ndarray_DEV_DATA(s_mask),\n n_time, n_batch, n_dim, n_sparse_index\n ));\n HANDLE_LAST_ERROR();\n '
|
class FastBaumWelchOp(NativeOpGenBase):
'\n inputs:\n :param am_scores: scores in -log space. 3d (time,batch,dim)\n :param edges: edges of the graph (from,to,emission_idx,sequence_idx)\n :param weights: weights of the edges\n outputs:\n :param output: Baum-Welch alignment, scores in -log space. 3d (time,batch,dim), like am_scores\n '
in_info = ({'name': 'am_scores', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'edges', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'weights', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'start_end_states', 'ndim': 2, 'shape': (2, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'index', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'state_buffer', 'ndim': 2, 'shape': (2, None), 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 3, 'shape': ((0, 0), (0, 1), (0, 2)), 'need_contiguous': True}, {'name': 'sums', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'need_contiguous': True})
c_extra_support_code = copy.copy(common_fast_bw_kernels)
c_extra_support_code.update({'100_init_bwd_state_buffer': '\n DEF_KERNEL\n void init_bwd_state_buffer(\n float* states, unsigned* end_states, unsigned t, unsigned max_t, float* index, unsigned index_stride) {\n unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (index[t * index_stride + idx] == 1.0 && (t == max_t || index[(t + 1) * index_stride + idx] == 0.0)) {\n unsigned state_idx = end_states[idx];\n states[state_idx] = 0.0;\n }\n }\n ', '101_next_frame': '\n DEF_KERNEL\n void next_frame(bool fwd, unsigned num_edges, unsigned num_emissions,\n unsigned* sequence_idxs, unsigned* from_buffer, unsigned* to_buffer, float* weight_buffer,\n unsigned* emission_idxs,\n float* prev_frame, float* next_frame, float* am_scores, float* edge_buffer) {\n unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_edges) {\n return;\n }\n\n unsigned from = from_buffer [idx];\n float prev_val = prev_frame[from];\n if (isinf(prev_val)) {\n edge_buffer[idx] = INF_F;\n return;\n }\n\n unsigned to = to_buffer [idx];\n unsigned emission_idx = emission_idxs[idx];\n float edge_weight = weight_buffer[idx];\n unsigned sequence_idx = sequence_idxs[idx];\n\n float val = prev_val + edge_weight + am_scores[sequence_idx * num_emissions + emission_idx];\n\n if (fwd) {\n edge_buffer[idx] += val;\n }\n else {\n edge_buffer[idx] += prev_val;\n }\n atomic_prob_add(next_frame + to, val);\n }\n ', '102_normalize': '\n DEF_KERNEL\n void normalize(float* buffer, unsigned* sequence_idxs, unsigned num_edges, unsigned num_seqs, float* sum_output) {\n DEF_SHARED(float, sum);\n\n buffer += blockIdx.x * num_edges;\n\n for (unsigned s = 0u; s < num_seqs; s++) {\n sum[s] = INF_F;\n }\n\n for (unsigned e = 0u; e < num_edges; e++) {\n unsigned s = sequence_idxs[e];\n sum[s] = prob_add(sum[s], buffer[e]);\n }\n\n for (unsigned s = 0ul; s < num_seqs; s++) {\n if (isinf(sum[s])) {\n // if the frame is empty (happens due to batching of seqs with unequal length), set it to 0\n sum_output[blockIdx.x * num_seqs + s] = 0.0;\n }\n else {\n sum_output[blockIdx.x * num_seqs + s] = sum[s];\n }\n }\n\n for (unsigned e = 0u; e < num_edges; e++) {\n unsigned s = sequence_idxs[e];\n buffer[e] -= sum[s];\n }\n }\n ', '103_compute_result': '\n DEF_KERNEL\n void compute_result(float* edge_buffer, float* out, unsigned* emission_idxs, unsigned* sequence_idxs,\n unsigned frame_stride, unsigned seq_stride,\n unsigned num_frames, unsigned num_seqs, unsigned num_edges) {\n unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_frames * num_edges) {\n return;\n }\n\n unsigned e_idx = idx % num_edges;\n unsigned frame = idx / num_edges;\n unsigned emission_idx = emission_idxs[e_idx];\n unsigned seq_idx = sequence_idxs[e_idx];\n float score = edge_buffer[idx];\n\n atomic_prob_add(out + frame * frame_stride + seq_idx * seq_stride + emission_idx, score);\n }\n ', '110_write_alignment_to_file': '\n void write_alignment_to_file(float* d_state_buffer, float* d_index, unsigned index_stride,\n unsigned* d_start_states, unsigned* d_end_states,\n float pruning, unsigned n_frames, unsigned n_seqs, unsigned n_states,\n unsigned batch_idx) {\n std::vector<float> state_buffer((n_frames + 1u) * n_states);\n std::vector<float> index (n_frames * index_stride);\n std::vector<unsigned> start_states(n_seqs);\n std::vector<unsigned> end_states (n_seqs);\n\n //HANDLE_ERROR(cudaMemcpy(\n // state_buffer.data(), d_state_buffer, state_buffer.size() * sizeof(float), cudaMemcpyDeviceToHost));\n //HANDLE_ERROR(cudaMemcpy(\n // index.data(), d_index, index.size() * sizeof(float), cudaMemcpyDeviceToHost));\n //HANDLE_ERROR(cudaMemcpy(\n // start_states.data(), d_start_states, start_states.size() * sizeof(float), cudaMemcpyDeviceToHost));\n //HANDLE_ERROR(cudaMemcpy(\n // end_states.data(), d_end_states, end_states.size() * sizeof(float), cudaMemcpyDeviceToHost));\n\n for (unsigned seq = 0u; seq < n_seqs; seq++) {\n std::stringstream filename;\n filename << "alignment.dump." << batch_idx << \'.\' << seq;\n std::ofstream out(filename.str().c_str(), std::ios::out | std::ios::trunc);\n for (unsigned t = 0u; t < n_frames; t++) {\n if (t > 0u && index[seq * index_stride + t] <= 0.0) {\n break;\n }\n float sum = std::numeric_limits<float>::infinity();\n for (unsigned s = start_states[seq]; s <= end_states[seq]; s++) {\n const float val = state_buffer[t * n_states + s];\n float diff = val - sum;\n if (!isnan(diff)) {\n sum = -log1p(exp(-abs(diff))) + fminf(sum, val);\n }\n }\n for (unsigned s = start_states[seq]; s <= end_states[seq]; s++) {\n const float val = state_buffer[t * n_states + s] - sum;\n if (val <= pruning) {\n out << t << \' \' << (s - start_states[seq]) << \' \' << val << \'\\n\';\n }\n }\n }\n }\n }\n ', '111_write_output_to_file': '\n void write_output_to_file(float* d_out, float* d_index, unsigned index_stride,\n float pruning, unsigned n_frames, unsigned n_seqs, unsigned n_emissions,\n unsigned batch_idx) {\n std::vector<float> buffer(n_frames * n_seqs * n_emissions);\n std::vector<float> index (n_frames * index_stride);\n\n //HANDLE_ERROR(cudaMemcpy(buffer.data(), d_out, buffer.size() * sizeof(float), cudaMemcpyDeviceToHost));\n //HANDLE_ERROR(cudaMemcpy(index.data(), d_index, index.size() * sizeof(float), cudaMemcpyDeviceToHost));\n\n for (unsigned seq = 0u; seq < n_seqs; seq++) {\n std::stringstream filename;\n filename << "target.dump." << batch_idx << \'.\' << seq;\n std::ofstream out(filename.str().c_str(), std::ios::out | std::ios::trunc);\n for (unsigned t = 0u; t < n_frames; t++) {\n if (t > 0u && index[seq * index_stride + t] <= 0.0) {\n break;\n }\n for (unsigned e = 0u; e < n_emissions; e++) {\n const float val = buffer[t * n_seqs * n_emissions + seq * n_emissions + e];\n if (val <= pruning) {\n out << t << \' \' << e << \' \' << val << \'\\n\';\n }\n }\n }\n }\n }\n '})
c_fw_code = '\n // am_scores, edges, weights, start_end_states, index, state_buffer* = input_names (*: inplace)\n // output = output_names\n assert(n_inputs == 6);\n assert(n_outputs == 2);\n Ndarray* am_scores = inputs[0];\n Ndarray* edges = inputs[1];\n Ndarray* weights = inputs[2];\n Ndarray* start_end_states = inputs[3];\n Ndarray* index = inputs[4];\n Ndarray* state_buffer = inputs[5];\n Ndarray* out = *outputs[0];\n Ndarray* sum_output = *outputs[1];\n\n /*\n debug_print(context, am_scores, "am_scores");\n debug_print(context, edges, "edges");\n debug_print(context, weights, "weights");\n debug_print(context, start_end_states, "start_end_states");\n debug_print(context, index, "index");\n debug_print(context, state_buffer, "state_buffer");\n */\n\n assert_cmp(Ndarray_DIMS(am_scores)[0], ==, Ndarray_DIMS(out)[0]);\n assert_cmp(Ndarray_DIMS(am_scores)[1], ==, Ndarray_DIMS(out)[1]);\n assert_cmp(Ndarray_DIMS(am_scores)[2], ==, Ndarray_DIMS(out)[2]);\n assert_cmp(Ndarray_DIMS(am_scores)[1], ==, Ndarray_DIMS(start_end_states)[1]);\n\n assert_cmp(Ndarray_DIMS(sum_output)[0], ==, Ndarray_DIMS(am_scores)[0]);\n assert_cmp(Ndarray_DIMS(sum_output)[1], ==, Ndarray_DIMS(am_scores)[1]);\n\n bool dump_alignment = false;\n bool dump_output = false;\n unsigned dump_every = 40u;\n static unsigned batch_idx = 0u;\n float pruning = 10.f;\n\n unsigned* d_from = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(edges)\n + 0 * Ndarray_STRIDE(edges, 0));\n unsigned* d_to = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(edges)\n + 1 * Ndarray_STRIDE(edges, 0));\n unsigned* d_emission_idxs = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(edges)\n + 2 * Ndarray_STRIDE(edges, 0));\n unsigned* d_sequence_idxs = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(edges)\n + 3 * Ndarray_STRIDE(edges, 0));\n float* d_weights = Ndarray_DEV_DATA(weights);\n float* d_am_scores = Ndarray_DEV_DATA(am_scores);\n unsigned* d_start_states = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(start_end_states)\n + 0 * Ndarray_STRIDE(start_end_states, 0));\n unsigned* d_end_states = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(start_end_states)\n + 1 * Ndarray_STRIDE(start_end_states, 0));\n float* d_index = Ndarray_DEV_DATA(index);\n float* d_state_buffer_prev = Ndarray_DEV_DATA(state_buffer) + 0 * Ndarray_STRIDE(state_buffer, 0);\n float* d_state_buffer_next = Ndarray_DEV_DATA(state_buffer) + 1 * Ndarray_STRIDE(state_buffer, 0);\n float* d_out = Ndarray_DEV_DATA(out);\n float* d_sum_output = Ndarray_DEV_DATA(sum_output);\n\n unsigned n_frames = Ndarray_DIMS(am_scores)[0];\n unsigned n_seqs = Ndarray_DIMS(am_scores)[1];\n unsigned n_emissions = Ndarray_DIMS(am_scores)[2];\n unsigned n_states = Ndarray_DIMS(state_buffer)[1];\n unsigned n_edges = Ndarray_DIMS(edges)[1];\n unsigned n_threads = 1024u;\n unsigned n_blocks = (n_edges + n_threads - 1) / n_threads;\n\n unsigned frame_stride = Ndarray_STRIDE(am_scores, 0);\n unsigned sequence_stride = Ndarray_STRIDE(am_scores, 1);\n unsigned index_stride = Ndarray_STRIDE(index, 0);\n\n assert_cmp(n_frames, >, 0);\n assert_cmp(n_states, >, 0);\n //std::cerr << "n_frames: " << n_frames << std::endl;\n //std::cerr << "n_seqs: " << n_seqs << std::endl;\n //std::cerr << "n_emissions: " << n_emissions << std::endl;\n //std::cerr << "n_states: " << n_states << std::endl;\n //std::cerr << "n_edges: " << n_edges << std::endl;\n //std::cerr << "n_threads: " << n_threads << std::endl;\n //std::cerr << "n_blocks: " << n_blocks << std::endl;\n\n //std::cerr << "frame_stride: " << frame_stride << std::endl;\n //std::cerr << "sequnence_stride: " << sequence_stride << std::endl;\n //std::cerr << "index_stride: " << index_stride << std::endl;\n\n // initialize edge buffer\n float* d_edge_buffer = reinterpret_cast<float*>(device_malloc(n_edges * n_frames * sizeof(float)));\n if(!d_edge_buffer) { HANDLE_LAST_ERROR(); abort(); } // error should have been set in device_malloc\n unsigned n_fill_blocks = (n_edges * n_frames + n_threads - 1u) / n_threads;\n start_dev_kernel2(fill_array, n_fill_blocks, n_threads, 0, (d_edge_buffer, 0.0, n_edges * n_frames));\n HANDLE_LAST_ERROR();\n\n // initialize the state buffer\n n_fill_blocks = (n_states + n_threads - 1u) / n_threads;\n start_dev_kernel2(\n fill_array, n_fill_blocks, n_threads, 0,\n (d_state_buffer_prev, std::numeric_limits<float>::infinity(), n_states));\n HANDLE_LAST_ERROR();\n start_dev_kernel2(set_start_states, 1, n_seqs, 0, (d_state_buffer_prev, d_start_states));\n HANDLE_LAST_ERROR();\n\n // initialize full state buffer (only used to dump the alignment)\n float* d_state_buffer_all = NULL;\n if (dump_alignment && batch_idx %% dump_every == 0) {\n d_state_buffer_all = reinterpret_cast<float*>(device_malloc(n_states * (n_frames + 1u) * sizeof(float)));\n if(!d_state_buffer_all) { HANDLE_LAST_ERROR(); abort(); } // error should have been set in device_malloc\n Ndarray_memcpy(d_state_buffer_all, d_state_buffer_prev, n_states * sizeof(float));\n HANDLE_LAST_ERROR();\n }\n\n // fwd pass\n for (unsigned t = 0u; t < n_frames; t++) {\n start_dev_kernel2(\n fill_array, n_fill_blocks, n_threads, 0,\n (d_state_buffer_next, std::numeric_limits<float>::infinity(), n_states));\n HANDLE_LAST_ERROR();\n start_dev_kernel2(next_frame, n_blocks, n_threads, 0,\n (true, n_edges, sequence_stride,\n d_sequence_idxs, d_from, d_to, d_weights, d_emission_idxs,\n d_state_buffer_prev, d_state_buffer_next, d_am_scores + t * frame_stride, d_edge_buffer + t * n_edges));\n HANDLE_LAST_ERROR();\n if (dump_alignment && batch_idx %% dump_every == 0) {\n Ndarray_memcpy(d_state_buffer_all + (t + 1u) * n_states, d_state_buffer_next, n_states * sizeof(float));\n HANDLE_LAST_ERROR();\n }\n std::swap(d_state_buffer_prev, d_state_buffer_next);\n }\n\n // bwd pass\n start_dev_kernel2(\n fill_array, n_fill_blocks, n_threads, 0,\n (d_state_buffer_prev, std::numeric_limits<float>::infinity(), n_states));\n HANDLE_LAST_ERROR();\n for (unsigned t = n_frames; t > 0; t--) {\n start_dev_kernel2(init_bwd_state_buffer, 1, n_seqs, 0,\n (d_state_buffer_prev, d_end_states, t - 1, n_frames - 1, d_index, index_stride));\n HANDLE_LAST_ERROR();\n if (dump_alignment && batch_idx %% dump_every == 0) {\n float alpha = 1.0f;\n //HANDLE_ERROR(cublasSaxpy(\n // handle, n_states, &alpha, d_state_buffer_prev, 1, d_state_buffer_all + t * n_states, 1));\n }\n start_dev_kernel2(\n fill_array, n_fill_blocks, n_threads, 0,\n (d_state_buffer_next, std::numeric_limits<float>::infinity(), n_states));\n HANDLE_LAST_ERROR();\n start_dev_kernel2(next_frame, n_blocks, n_threads, 0,\n (false, n_edges, sequence_stride,\n d_sequence_idxs, d_to, d_from, d_weights, d_emission_idxs,\n d_state_buffer_prev, d_state_buffer_next, d_am_scores + (t - 1) * frame_stride,\n d_edge_buffer + (t - 1) * n_edges));\n HANDLE_LAST_ERROR();\n std::swap(d_state_buffer_prev, d_state_buffer_next);\n }\n if (dump_alignment && batch_idx %% dump_every == 0) {\n float alpha = 1.0f;\n //HANDLE_ERROR(cublasSaxpy(handle, n_states, &alpha, d_state_buffer_prev, 1, d_state_buffer_all, 1));\n }\n\n // normalize at each time frame\n start_dev_kernel2(normalize, n_frames, 1, n_seqs * sizeof(float),\n (d_edge_buffer, d_sequence_idxs, n_edges, n_seqs, d_sum_output));\n HANDLE_LAST_ERROR();\n\n // dump alignment\n if (dump_alignment && batch_idx %% dump_every == 0) {\n write_alignment_to_file(d_state_buffer_all, d_index, index_stride, d_start_states, d_end_states,\n pruning, n_frames, n_seqs, n_states, batch_idx);\n }\n\n n_fill_blocks = (n_frames * n_seqs * n_emissions + n_threads - 1u) / n_threads;\n start_dev_kernel2(\n fill_array, n_fill_blocks, n_threads, 0,\n (d_out, std::numeric_limits<float>::infinity(), n_frames * n_seqs * n_emissions));\n HANDLE_LAST_ERROR();\n\n frame_stride = Ndarray_STRIDE(out, 0);\n sequence_stride = Ndarray_STRIDE(out, 1);\n n_blocks = (n_frames * n_edges + n_threads - 1u) / n_threads;\n start_dev_kernel2(compute_result, n_blocks, n_threads, 0,\n (d_edge_buffer, d_out, d_emission_idxs, d_sequence_idxs,\n frame_stride, sequence_stride, n_frames, n_seqs, n_edges));\n HANDLE_LAST_ERROR();\n\n #if TENSORFLOW\n // Certain TensorFlow code doesn\'t like inf, even if it is just the CheckNumerics,\n // which is helpful for debugging.\n // We replace it by a very high number, so that tf.exp(-out) will still result in 0.0.\n n_blocks = (n_frames * n_seqs * n_emissions + n_threads - 1u) / n_threads;\n start_dev_kernel2(remove_inf, n_blocks, n_threads, 0, (d_out, n_frames * n_seqs * n_emissions));\n //debug_print(context, out, "out");\n #endif\n if (dump_output && batch_idx %% dump_every == 0) {\n write_output_to_file(d_out, d_index, index_stride, pruning, n_frames, n_seqs, n_emissions, batch_idx);\n }\n\n device_free(d_edge_buffer);\n if (d_state_buffer_all != NULL) {\n device_free(d_state_buffer_all);\n }\n batch_idx++;\n '
c_bw_code = None
|
class MultiEndFastBaumWelchOp(NativeOpGenBase):
'\n inputs:\n :param am_scores: scores in -log space. 3d (time,batch,dim)\n :param edges: edges of the graph (from,to,emission_idx,sequence_idx)\n :param weights: weights of the edges\n outputs:\n :param output: Baum-Welch alignment, scores in -log space. 3d (time,batch,dim), like am_scores\n '
in_info = ({'name': 'am_scores', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'edges', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected', 'dtype': 'int32'}, {'name': 'weights', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'start_states', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected', 'dtype': 'int32'}, {'name': 'end_states', 'ndim': 2, 'shape': (None, 2), 'need_contiguous': True, 'gradient': 'disconnected', 'dtype': 'int32'}, {'name': 'end_state_weights', 'ndim': 1, 'shape': ((4, 0),), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'index', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'state_buffer', 'ndim': 2, 'shape': (2, None), 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 3, 'shape': ((0, 0), (0, 1), (0, 2)), 'need_contiguous': True}, {'name': 'sums', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'need_contiguous': True})
c_extra_support_code = copy.copy(FastBaumWelchOp.c_extra_support_code)
c_extra_support_code.update({'100_init_bwd_state_buffer': '\n __global__\n void init_bwd_state_buffer(unsigned t, unsigned max_t, unsigned num_endstates, unsigned index_stride,\n float* states, unsigned const* end_states, float const* end_state_weights,\n float const* index) {\n unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_endstates) {\n return;\n }\n\n unsigned seq_idx = end_states[idx * 2u + 0u];\n if (index[t * index_stride + seq_idx] == 1.0\n && (t == max_t || index[(t + 1) * index_stride + seq_idx] == 0.0)) {\n unsigned state_idx = end_states[idx * 2u + 1u];\n float weight = end_state_weights[idx];\n states[state_idx] = weight;\n }\n }\n '})
c_fw_code = '\n // am_scores, edges, weights, start_states, end_states, end_state_weights,\n // index, state_buffer* = input_names (*: inplace)\n // output = output_names\n assert(n_inputs == 8);\n assert(n_outputs == 2);\n Ndarray* am_scores = inputs[0];\n Ndarray* edges = inputs[1];\n Ndarray* weights = inputs[2];\n Ndarray* start_states = inputs[3];\n Ndarray* end_states = inputs[4];\n Ndarray* end_state_weights = inputs[5];\n Ndarray* index = inputs[6];\n Ndarray* state_buffer = inputs[7];\n Ndarray* out = *outputs[0];\n Ndarray* sum_output = *outputs[1];\n\n assert(Ndarray_DIMS(am_scores)[0] == Ndarray_DIMS(out)[0]);\n assert(Ndarray_DIMS(am_scores)[1] == Ndarray_DIMS(out)[1]);\n assert(Ndarray_DIMS(am_scores)[2] == Ndarray_DIMS(out)[2]);\n// assert(Ndarray_DIMS(am_scores)[1] == Ndarray_DIMS(end_states)[0]);\n\n assert(Ndarray_DIMS(sum_output)[0] == Ndarray_DIMS(am_scores)[0]);\n assert(Ndarray_DIMS(sum_output)[1] == Ndarray_DIMS(am_scores)[1]);\n\n bool dump_alignment = false;\n bool dump_output = false;\n unsigned dump_every = 40u;\n static unsigned batch_idx = 0u;\n float pruning = 10.f;\n\n unsigned* d_from = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(edges)\n + 0 * Ndarray_STRIDE(edges, 0));\n unsigned* d_to = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(edges)\n + 1 * Ndarray_STRIDE(edges, 0));\n unsigned* d_emission_idxs = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(edges)\n + 2 * Ndarray_STRIDE(edges, 0));\n unsigned* d_sequence_idxs = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(edges)\n + 3 * Ndarray_STRIDE(edges, 0));\n float* d_weights = Ndarray_DEV_DATA(weights);\n float* d_am_scores = Ndarray_DEV_DATA(am_scores);\n unsigned* d_start_states = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(start_states));\n unsigned* d_end_states = reinterpret_cast<unsigned*>(Ndarray_DEV_DATA_int32(end_states));\n float* d_end_state_weights = Ndarray_DEV_DATA(end_state_weights);\n float* d_index = Ndarray_DEV_DATA(index);\n float* d_state_buffer_prev = Ndarray_DEV_DATA(state_buffer) + 0 * Ndarray_STRIDE(state_buffer, 0);\n float* d_state_buffer_next = Ndarray_DEV_DATA(state_buffer) + 1 * Ndarray_STRIDE(state_buffer, 0);\n float* d_out = Ndarray_DEV_DATA(out);\n float* d_sum_output = Ndarray_DEV_DATA(sum_output);\n\n unsigned n_frames = Ndarray_DIMS(am_scores)[0];\n unsigned n_seqs = Ndarray_DIMS(am_scores)[1];\n unsigned n_emissions = Ndarray_DIMS(am_scores)[2];\n unsigned n_states = Ndarray_DIMS(state_buffer)[1];\n unsigned n_edges = Ndarray_DIMS(edges)[1];\n unsigned n_start_states = Ndarray_DIMS(start_states)[0];\n unsigned n_end_states = Ndarray_DIMS(end_states)[0];\n unsigned n_threads = 1024u;\n unsigned n_blocks = (n_edges + n_threads - 1) / n_threads;\n\n unsigned frame_stride = Ndarray_STRIDE(am_scores, 0);\n unsigned sequence_stride = Ndarray_STRIDE(am_scores, 1);\n unsigned index_stride = Ndarray_STRIDE(index, 0);\n\n assert(n_frames > 0);\n\n// std::cerr << "n_frames: " << n_frames << std::endl;\n// std::cerr << "n_seqs: " << n_seqs << std::endl;\n// std::cerr << "n_emissions: " << n_emissions << std::endl;\n// std::cerr << "n_states: " << n_states << std::endl;\n// std::cerr << "n_edges: " << n_edges << std::endl;\n// std::cerr << "n_start_states: " << n_start_states << std::endl;\n// std::cerr << "n_end_states: " << n_end_states << std::endl;\n// std::cerr << "n_threads: " << n_threads << std::endl;\n// std::cerr << "n_blocks: " << n_blocks << std::endl;\n\n// std::cerr << "frame_stride: " << frame_stride << std::endl;\n// std::cerr << "sequence_stride: " << sequence_stride << std::endl;\n// std::cerr << "index_stride: " << index_stride << std::endl;\n\n // initialize edge buffer\n float* d_edge_buffer = reinterpret_cast<float*>(device_malloc(n_edges * n_frames * sizeof(float)));\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n unsigned n_fill_blocks = (n_edges * n_frames + n_threads - 1u) / n_threads;\n fill_array<<<n_fill_blocks, n_threads>>>(d_edge_buffer, 0.0, n_edges * n_frames);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n\n // initialize the state buffer\n n_fill_blocks = (n_states + n_threads - 1u) / n_threads;\n fill_array<<<n_fill_blocks, n_threads>>>(d_state_buffer_prev, std::numeric_limits<float>::infinity(), n_states);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n set_start_states<<<1, n_start_states>>>(d_state_buffer_prev, d_start_states);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n\n // initialize full state buffer (only used to dump the alignment)\n float* d_state_buffer_all = NULL;\n if (dump_alignment and batch_idx %% dump_every == 0) {\n d_state_buffer_all = reinterpret_cast<float*>(device_malloc(n_states * (n_frames + 1u) * sizeof(float)));\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n cudaMemcpy(d_state_buffer_all, d_state_buffer_prev, n_states * sizeof(float), cudaMemcpyDeviceToDevice);\n// HANDLE_LAST_ERROR();\n }\n\n // fwd pass\n for (unsigned t = 0u; t < n_frames; t++) {\n fill_array<<<n_fill_blocks, n_threads>>>(d_state_buffer_next, std::numeric_limits<float>::infinity(), n_states);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n// std::cerr << "frame " << t << std::endl;\n next_frame<<<n_blocks, n_threads>>>(true, n_edges, sequence_stride,\n d_sequence_idxs, d_from, d_to, d_weights, d_emission_idxs,\n d_state_buffer_prev, d_state_buffer_next, d_am_scores + t * frame_stride,\n d_edge_buffer + t * n_edges);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n if (dump_alignment and batch_idx %% dump_every == 0) {\n cudaMemcpy(\n d_state_buffer_all + (t + 1u) * n_states, d_state_buffer_next, n_states * sizeof(float),\n cudaMemcpyDeviceToDevice);\n }\n std::swap(d_state_buffer_prev, d_state_buffer_next);\n }\n\n // bwd pass\n const unsigned n_end_state_blocks = (n_end_states + n_threads - 1u) / n_threads;\n const unsigned n_end_state_threads = min(n_threads, n_end_states);\n fill_array<<<n_fill_blocks, n_threads>>>(d_state_buffer_prev, std::numeric_limits<float>::infinity(), n_states);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n for (unsigned t = n_frames; t > 0; t--) {\n init_bwd_state_buffer<<<n_end_state_blocks, n_end_state_threads>>>(\n t - 1, n_frames - 1, n_end_states, index_stride,\n d_state_buffer_prev, d_end_states, d_end_state_weights, d_index);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n if (dump_alignment and batch_idx %% dump_every == 0) {\n float alpha = 1.0f;\n// HANDLE_ERROR(cublasSaxpy(\n// handle, n_states, &alpha, d_state_buffer_prev, 1, d_state_buffer_all + t * n_states, 1));\n }\n fill_array<<<n_fill_blocks, n_threads>>>(d_state_buffer_next, std::numeric_limits<float>::infinity(), n_states);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n next_frame<<<n_blocks, n_threads>>>(false, n_edges, sequence_stride,\n d_sequence_idxs, d_to, d_from, d_weights, d_emission_idxs,\n d_state_buffer_prev, d_state_buffer_next,\n d_am_scores + (t - 1) * frame_stride,\n d_edge_buffer + (t - 1) * n_edges);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n std::swap(d_state_buffer_prev, d_state_buffer_next);\n }\n if (dump_alignment and batch_idx %% dump_every == 0) {\n float alpha = 1.0f;\n// HANDLE_ERROR(cublasSaxpy(handle, n_states, &alpha, d_state_buffer_prev, 1, d_state_buffer_all, 1));\n }\n\n // normalize at each time frame\n normalize<<<n_frames, 1, n_seqs * sizeof(float)>>>(d_edge_buffer, d_sequence_idxs, n_edges, n_seqs, d_sum_output);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n\n // dump alignment\n if (dump_alignment and batch_idx %% dump_every == 0) {\n write_alignment_to_file(d_state_buffer_all, d_index, index_stride, d_start_states, d_end_states,\n pruning, n_frames, n_seqs, n_states, batch_idx);\n }\n\n n_fill_blocks = (n_frames * n_seqs * n_emissions + n_threads - 1u) / n_threads;\n fill_array<<<n_fill_blocks, n_threads>>>(\n d_out, std::numeric_limits<float>::infinity(), n_frames * n_seqs * n_emissions);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n\n frame_stride = Ndarray_STRIDE(out, 0);\n sequence_stride = Ndarray_STRIDE(out, 1);\n n_blocks = (n_frames * n_edges + n_threads - 1u) / n_threads;\n compute_result<<<n_blocks, n_threads>>>(d_edge_buffer, d_out, d_emission_idxs, d_sequence_idxs,\n frame_stride, sequence_stride, n_frames, n_seqs, n_edges);\n// cudaDeviceSynchronize();\n// HANDLE_LAST_ERROR();\n\n #if TENSORFLOW\n // Certain TensorFlow code doesn\'t like inf, even if it is just the CheckNumerics,\n // which is helpful for debugging.\n // We replace it by a very high number, so that tf.exp(-out) will still result in 0.0.\n n_blocks = (n_frames * n_seqs * n_emissions + n_threads - 1u) / n_threads;\n remove_inf<<<n_blocks, n_threads>>>(d_out, n_frames * n_seqs * n_emissions);\n //debug_print(context, out, "out");\n #endif\n if (dump_output and batch_idx %% dump_every == 0) {\n write_output_to_file(d_out, d_index, index_stride, pruning, n_frames, n_seqs, n_emissions, batch_idx);\n }\n\n device_free(d_edge_buffer);\n if (d_state_buffer_all != NULL) {\n device_free(d_state_buffer_all);\n }\n batch_idx++;\n '
c_bw_code = None
cpu_support = False
|
class SegmentFastBaumWelchOp(NativeOpGenBase):
'\n Segmental Baum-Welch...\n '
in_info = ({'name': 'am_scores', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'batch_idxs', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'edges', 'ndim': 2, 'shape': (None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'weights', 'ndim': 1, 'shape': ((2, 1),), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'length_models', 'ndim': 2, 'shape': (None, (0, 0)), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'start_end_states', 'ndim': 2, 'shape': (2, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'index', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'am_score_scales', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'epoch', 'ndim': 0, 'shape': (), 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 3, 'shape': ((0, 0), (0, 1), (0, 2)), 'need_contiguous': True}, {'name': 'normalization_factors', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'need_contiguous': True}, {'name': 'posterior_weigths', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'need_contiguous': True})
c_extra_support_code = copy.copy(common_fast_bw_kernels)
c_extra_support_code.update({'100_get_batch_idx': '\n __device__\n int get_batch_idx(int const* batch_idxs, unsigned num_seqs, unsigned t, unsigned seq_idx) {\n if (NEW_BATCH_IDX_FORMAT) {\n int res = batch_idxs[seq_idx] + t;\n if (res >= batch_idxs[seq_idx + 1]) {\n return -1;\n }\n return res;\n }\n else {\n return batch_idxs[t * num_seqs + seq_idx];\n }\n }\n ', '101_init_bwd_state_buffer': '\n __global__\n void init_bwd_state_buffer(unsigned t, unsigned num_batches, unsigned num_seqs,\n int* batch_idxs, float* index, float* states, unsigned* end_states) {\n unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n int batch_idx = get_batch_idx(batch_idxs, num_seqs, t, idx);\n if (batch_idx < 0) {\n return;\n }\n float* batch_first_frame = index + batch_idx;\n //if (*batch_first_frame != 0.0 && (t == max_t || *(batch_first_frame + 1) == 0.0)) {\n if (batch_first_frame[0] != 0.0 && batch_first_frame[num_batches] == 0.0) {\n unsigned state_idx = end_states[idx];\n states[state_idx] = 0.0;\n }\n }\n ', '102_next_frame_fwd': '\n __global__\n void next_frame_fwd(unsigned time, unsigned num_states, unsigned num_edges, unsigned num_emissions,\n unsigned num_seg_frames,\n unsigned num_tot_frames, unsigned num_seqs, unsigned num_am_score_scales,\n unsigned const* sequence_idxs, unsigned const* from_buffer, unsigned const* to_buffer,\n float const* weight_buffer,\n unsigned const* emission_idxs, unsigned const* lenmod_idxs, int const* batch_idxs,\n float const* am_scores, float const* length_models, float const* am_score_scales,\n float const* epoch,\n float* state_buffer, float* edge_buffer) {\n const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_edges) {\n return;\n }\n\n const unsigned num_ringbuffer_frames = num_seg_frames + 1;\n const unsigned max_seg_frames = min(num_seg_frames, num_tot_frames - time);\n\n const unsigned prev_frame_idx = time % num_ringbuffer_frames;\n const unsigned prev_frame_start = prev_frame_idx * num_states;\n\n const unsigned from = from_buffer [idx];\n const float prev_val = state_buffer[prev_frame_start + from];\n if (isinf(prev_val)) {\n return;\n }\n\n const unsigned sequence_idx = sequence_idxs[idx];\n const int batch_idx = get_batch_idx(batch_idxs, num_seqs, time, sequence_idx);\n if (batch_idx == -1) {\n return;\n }\n\n const unsigned amss_idx = min(static_cast<unsigned>(*epoch), num_am_score_scales - 1);\n const float am_score_scale = am_score_scales[amss_idx];\n\n const unsigned to = to_buffer [idx];\n const unsigned emission_idx = emission_idxs[idx];\n const unsigned lenmod_idx = lenmod_idxs [idx];\n const float edge_weight = weight_buffer[idx];\n const float prev_plus_edge = prev_val + edge_weight;\n\n float const* am_buffer_in = am_scores + batch_idx * num_seg_frames * num_emissions + emission_idx;\n float const* length_scores = length_models + lenmod_idx * num_seg_frames;\n float* edge_buffer_out = edge_buffer + idx;\n\n for (unsigned i = 0u; i < max_seg_frames; i++) {\n const float val = prev_plus_edge + am_score_scale * am_buffer_in[i * num_emissions] + length_scores[i];\n edge_buffer_out[i * num_edges] = val;\n const unsigned next_frame = (prev_frame_idx + 1 + i) % num_ringbuffer_frames;\n atomic_prob_add(state_buffer + (next_frame * num_states + to), val);\n }\n }\n ', '103_next_frame_bwd': '\n __global__\n void next_frame_bwd(unsigned time, unsigned num_states, unsigned num_edges, unsigned num_emissions,\n unsigned num_seg_frames,\n unsigned num_tot_frames, unsigned num_seqs, unsigned num_am_score_scales,\n unsigned const* sequence_idxs, unsigned const* from_buffer, unsigned const* to_buffer,\n float const* weight_buffer,\n unsigned const* emission_idxs, unsigned const* lenmod_idxs, int const* batch_idxs,\n float const* am_scores, float const* length_models, float const* am_score_scales,\n float const* epoch,\n float* state_buffer, float* edge_buffer) {\n const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_edges) {\n return;\n }\n\n const unsigned num_ringbuffer_frames = num_seg_frames + 1;\n const unsigned max_seg_frames = min(num_seg_frames, num_tot_frames - time);\n\n const unsigned sequence_idx = sequence_idxs[idx];\n const int batch_idx = get_batch_idx(batch_idxs, num_seqs, time, sequence_idx);\n if (batch_idx == -1) {\n return;\n }\n\n const unsigned amss_idx = min(static_cast<unsigned>(*epoch), num_am_score_scales - 1);\n const float am_score_scale = am_score_scales[amss_idx];\n\n const unsigned from = from_buffer [idx];\n const unsigned to = to_buffer [idx];\n const unsigned emission_idx = emission_idxs[idx];\n const unsigned lenmod_idx = lenmod_idxs [idx];\n const float edge_weight = weight_buffer[idx];\n const unsigned next_frame_idx = time % num_ringbuffer_frames;\n\n float const* am_buffer_in = am_scores + batch_idx * num_seg_frames * num_emissions + emission_idx;\n float const* length_scores = length_models + lenmod_idx * num_seg_frames;\n float* edge_buffer_out = edge_buffer + idx;\n\n float acc_val = CUDART_INF_F;\n\n for (unsigned i = 0u; i < max_seg_frames; i++) {\n const unsigned prev_frame_idx = (next_frame_idx + i + 1) % num_ringbuffer_frames;\n const float prev_val = state_buffer[prev_frame_idx * num_states + from];\n if (isinf(prev_val)) {\n edge_buffer_out[i * num_edges] = CUDART_INF_F;\n }\n else {\n const float val =\n prev_val + edge_weight + am_score_scale * am_buffer_in[i * num_emissions] + length_scores[i];\n edge_buffer_out[i * num_edges] += prev_val;\n acc_val = prob_add(acc_val, val);\n }\n }\n\n atomic_prob_add(state_buffer + next_frame_idx * num_states + to, acc_val);\n }\n ', '104_compute_framewise_sum': '\n __global__\n void compute_framewise_sum(unsigned num_tot_frames, unsigned num_seqs, unsigned num_seg_frames,\n unsigned num_batches, unsigned num_edges,\n unsigned const* sequence_idxs, int const* batch_idxs, float const* index,\n float const* edge_buffer,\n float* output_buffer) {\n extern __shared__ float sum[];\n\n const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_tot_frames * num_seg_frames) {\n return;\n }\n\n float* sum_buffer = sum + threadIdx.x * num_seqs;\n edge_buffer += idx * num_edges;\n\n for (unsigned s = 0u; s < num_seqs; s++) {\n sum_buffer[s] = CUDART_INF_F;\n }\n\n for (unsigned i = 0; i < num_edges; i++) {\n const unsigned seq_idx = sequence_idxs[i];\n sum_buffer[seq_idx] = prob_add(sum_buffer[seq_idx], edge_buffer[i]);\n }\n\n const unsigned time = idx / num_seg_frames;\n const unsigned seg_size = idx % num_seg_frames;\n for (unsigned s = 0u; s < num_seqs; s++) {\n const int batch_idx = get_batch_idx(batch_idxs, num_seqs, time, s);\n if (batch_idx >= 0) {\n const unsigned output_idx = seg_size * num_batches + batch_idx;\n if (isinf(sum_buffer[s]) or index[output_idx] == 0.0) {\n output_buffer[output_idx] = 0.0;\n }\n else {\n output_buffer[output_idx] = sum_buffer[s];\n }\n }\n }\n }\n ', '105_merge_framewise_sums': '\n __global__\n void merge_framewise_sum(unsigned num_seg_frames, unsigned num_batches, float const* index, float* sum_buffer) {\n const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_batches) {\n return;\n }\n\n sum_buffer += idx;\n index += idx;\n\n float sum = sum_buffer[0];\n for (unsigned s = 1; s < num_seg_frames; s++) {\n if (index[s * num_batches] != 0.0f) {\n sum = prob_add(sum, sum_buffer[s * num_batches]);\n }\n }\n\n for (unsigned s = 0; s < num_seg_frames; s++) {\n if (index[s * num_batches] != 0.0f) {\n sum_buffer[s * num_batches] = sum;\n }\n }\n }\n ', '106_compute_targets': '\n __global__\n void compute_targets(unsigned num_tot_frames, unsigned num_seg_frames, unsigned num_edges, unsigned num_batches,\n unsigned num_seqs, unsigned num_emissions,\n unsigned const* sequence_idxs, unsigned const* emission_idxs, int const* batch_idxs,\n float const* index,\n float const* edge_buffer, float const* normalization_buffer, float* output_buffer) {\n const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_tot_frames * num_seg_frames * num_edges) {\n return;\n }\n\n const unsigned edge_idx = idx % num_edges;\n const unsigned time = idx / (num_edges * num_seg_frames);\n const unsigned seq_idx = sequence_idxs[edge_idx];\n const int batch_idx = get_batch_idx(batch_idxs, num_seqs, time, seq_idx);\n\n if (batch_idx < 0) {\n return;\n }\n\n const unsigned seg_length = (idx / num_edges) % num_seg_frames;\n\n if (index[seg_length * num_batches + batch_idx] == 0.0) {\n return;\n }\n\n const unsigned emission_idx = emission_idxs[edge_idx];\n const float normalization = normalization_buffer[seg_length * num_batches + batch_idx];\n\n atomic_prob_add(\n output_buffer + seg_length * num_batches * num_emissions + batch_idx * num_emissions + emission_idx,\n edge_buffer[idx] - normalization);\n }\n ', '107_compute_posterior_weights': '\n __global__\n void compute_posterior_weights(unsigned num_tot_frames, unsigned num_seg_frames, unsigned num_seqs,\n unsigned num_batches,\n float const* state_buffer, unsigned const* start_states, int const* batch_idxs,\n float const* index, float const* normalization_factors, float* posterior_weigths) {\n const unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;\n if (idx >= num_tot_frames * num_seqs) {\n return;\n }\n\n const unsigned time = idx / num_seqs;\n const unsigned seq_idx = idx % num_seqs;\n\n const int batch_idx = get_batch_idx(batch_idxs, num_seqs, time, seq_idx);\n if (batch_idx < 0) {\n return;\n }\n\n const float seq_sum = state_buffer[start_states[seq_idx]];\n for (unsigned s = 0u; s < num_seg_frames; s++) {\n const unsigned i = s * num_batches + batch_idx;\n if (index[i] == 0.0) {\n return;\n }\n posterior_weigths[i] = exp(-(normalization_factors[i] - seq_sum));\n }\n }\n '})
c_fw_code = '\n // inputs: am_scores, batch_idxs, edges, weights, length_models, start_end_states, index, am_score_scales, epoch\n // outputs: output, normalization_factors, posterior_weigths\n assert(n_inputs == 9);\n assert(n_outputs == 3);\n Ndarray* ary_am_scores = inputs[0];\n Ndarray* ary_batch_idxs = inputs[1];\n Ndarray* ary_edges = inputs[2];\n Ndarray* ary_weights = inputs[3];\n Ndarray* ary_start_end_states = inputs[4];\n Ndarray* ary_length_models = inputs[5];\n Ndarray* ary_index = inputs[6];\n Ndarray* ary_am_score_scales = inputs[7];\n Ndarray* ary_epoch = inputs[8];\n Ndarray* ary_out = *outputs[0];\n Ndarray* ary_norm_factors = *outputs[1];\n Ndarray* ary_posterior_weights = *outputs[2];\n\n assert(Ndarray_DIMS(ary_edges)[1] == Ndarray_DIMS(ary_weights)[0]);\n\n static unsigned iter = 0u; // used for debug output\n\n float* d_am_scores = Ndarray_DEV_DATA(ary_am_scores);\n int* d_batch_idxs = reinterpret_cast<int*>(Ndarray_DEV_DATA(ary_batch_idxs));\n unsigned* d_from =\n reinterpret_cast<unsigned*>(Ndarray_DEV_DATA(ary_edges) + 0 * Ndarray_STRIDE(ary_edges, 0));\n unsigned* d_to =\n reinterpret_cast<unsigned*>(Ndarray_DEV_DATA(ary_edges) + 1 * Ndarray_STRIDE(ary_edges, 0));\n unsigned* d_emission_idxs =\n reinterpret_cast<unsigned*>(Ndarray_DEV_DATA(ary_edges) + 2 * Ndarray_STRIDE(ary_edges, 0));\n unsigned* d_lenmod_idxs =\n reinterpret_cast<unsigned*>(Ndarray_DEV_DATA(ary_edges) + 3 * Ndarray_STRIDE(ary_edges, 0));\n unsigned* d_sequence_idxs =\n reinterpret_cast<unsigned*>(Ndarray_DEV_DATA(ary_edges) + 4 * Ndarray_STRIDE(ary_edges, 0));\n float* d_weights = Ndarray_DEV_DATA(ary_weights);\n float* d_length_models = Ndarray_DEV_DATA(ary_length_models);\n unsigned* d_start_states =\n reinterpret_cast<unsigned*>(Ndarray_DEV_DATA(ary_start_end_states) + 0 * Ndarray_STRIDE(ary_start_end_states, 0));\n unsigned* d_end_states =\n reinterpret_cast<unsigned*>(Ndarray_DEV_DATA(ary_start_end_states) + 1 * Ndarray_STRIDE(ary_start_end_states, 0));\n float* d_index = Ndarray_DEV_DATA(ary_index);\n float* d_am_score_scales = Ndarray_DEV_DATA(ary_am_score_scales);\n float* d_epoch = Ndarray_DEV_DATA(ary_epoch);\n float* d_out = Ndarray_DEV_DATA(ary_out);\n float* d_norm_factors = Ndarray_DEV_DATA(ary_norm_factors);\n float* d_posterior_weights = Ndarray_DEV_DATA(ary_posterior_weights);\n\n std::vector<int> seq_lengths;\n if (NEW_BATCH_IDX_FORMAT) {\n seq_lengths.resize(Ndarray_DIMS(ary_batch_idxs)[0]);\n HANDLE_ERROR(cudaMemcpy(\n seq_lengths.data(), d_batch_idxs, seq_lengths.size() * sizeof(int), cudaMemcpyDeviceToHost));\n }\n\n const unsigned n_seg_frames = Ndarray_DIMS(ary_am_scores)[0];\n const unsigned n_batches = Ndarray_DIMS(ary_am_scores)[1];\n const unsigned n_emissions = Ndarray_DIMS(ary_am_scores)[2];\n const unsigned n_seqs =\n NEW_BATCH_IDX_FORMAT ? (Ndarray_DIMS(ary_batch_idxs)[0] - 1) : Ndarray_DIMS(ary_batch_idxs)[1];\n const unsigned n_tot_frames =\n NEW_BATCH_IDX_FORMAT ? seq_lengths.back() : Ndarray_DIMS(ary_batch_idxs)[0];\n const unsigned n_edges = Ndarray_DIMS(ary_edges)[1];\n const unsigned n_length_models = Ndarray_DIMS(ary_length_models)[1];\n const unsigned n_am_score_scales = Ndarray_DIMS(ary_am_score_scales)[0];\n const unsigned n_threads = 1024u;\n unsigned n_blocks = (n_edges + n_threads - 1) / n_threads;\n\n unsigned tmp;\n HANDLE_ERROR(cudaMemcpy(&tmp, d_end_states + n_seqs - 1, sizeof(float), cudaMemcpyDeviceToHost));\n\n const unsigned n_states = tmp + 1;\n\n /*std::cerr << "seg frames: " << n_seg_frames << std::endl;\n std::cerr << "batches: " << n_batches << std::endl;\n std::cerr << "emissions: " << n_emissions << std::endl;\n std::cerr << "tot frames: " << n_tot_frames << std::endl;\n std::cerr << "seqs: " << n_seqs << std::endl;\n std::cerr << "edges: " << n_edges << std::endl;\n std::cerr << "length models: " << n_length_models << std::endl;\n std::cerr << "threads: " << n_threads << std::endl;\n std::cerr << "blocks: " << n_blocks << std::endl;\n std::cerr << "num states: " << n_states << std::endl;*/\n\n // initialize edge buffer\n const unsigned edge_buffer_size = n_tot_frames * n_seg_frames * n_edges;\n float* d_edge_buffer = reinterpret_cast<float*>(device_malloc(edge_buffer_size * sizeof(float)));\n HANDLE_LAST_ERROR();\n unsigned n_fill_blocks = (edge_buffer_size + n_threads - 1u) / n_threads;\n fill_array<<<n_fill_blocks, n_threads>>>(d_edge_buffer, std::numeric_limits<float>::infinity(), edge_buffer_size);\n HANDLE_LAST_ERROR();\n\n // initialize the state buffer\n const unsigned n_ringbuffer_frames = n_seg_frames + 1;\n float* d_state_buffer = reinterpret_cast<float*>(device_malloc(n_states * n_ringbuffer_frames * sizeof(float)));\n HANDLE_LAST_ERROR();\n n_fill_blocks = (n_states * n_ringbuffer_frames + n_threads - 1u) / n_threads;\n fill_array<<<n_fill_blocks, n_threads>>>(\n d_state_buffer, std::numeric_limits<float>::infinity(), n_states * n_ringbuffer_frames);\n HANDLE_LAST_ERROR();\n\n // initialize sum buffer and posterior weigths\n n_fill_blocks = (n_batches * n_seg_frames + n_threads - 1u) / n_threads;\n fill_array<<<n_fill_blocks, n_threads>>>(d_norm_factors, 0.0f, n_batches * n_seg_frames);\n HANDLE_LAST_ERROR();\n fill_array<<<n_fill_blocks, n_threads>>>(d_posterior_weights, 0.0f, n_batches * n_seg_frames);\n HANDLE_LAST_ERROR();\n\n set_start_states<<<1, n_seqs>>>(d_state_buffer, d_start_states);\n HANDLE_LAST_ERROR();\n\n // fwd pass\n for (unsigned t = 0u; t < n_tot_frames; t++) {\n //std::cerr << "fwd t: " << t << " " << n_tot_frames << std::endl;\n float* d_state_buffer_prev = d_state_buffer + ((t - 1) %% n_ringbuffer_frames) * n_states;\n fill_array<<<n_fill_blocks, n_threads>>>(d_state_buffer_prev, std::numeric_limits<float>::infinity(), n_states);\n HANDLE_LAST_ERROR();\n next_frame_fwd<<<n_blocks, n_threads>>>(t, n_states, n_edges, n_emissions, n_seg_frames, n_tot_frames, n_seqs,\n n_am_score_scales,\n d_sequence_idxs, d_from, d_to, d_weights, d_emission_idxs, d_lenmod_idxs,\n d_batch_idxs,\n d_am_scores, d_length_models, d_am_score_scales, d_epoch,\n d_state_buffer, d_edge_buffer + t * n_seg_frames * n_edges);\n HANDLE_LAST_ERROR();\n\n //std::stringstream ss;\n //ss << "dump/fwd_state_buffer." << t << ".dump";\n //dump_to_file_2d(d_state_buffer, n_ringbuffer_frames, n_states, ss.str());\n }\n\n //dump_to_file_3d(d_edge_buffer, n_tot_frames, n_seg_frames, n_edges, "dump/fwd_edges.dump");\n\n // bwd pass\n n_fill_blocks = (n_states * n_ringbuffer_frames + n_threads - 1u) / n_threads;\n fill_array<<<n_fill_blocks, n_threads>>>(\n d_state_buffer, std::numeric_limits<float>::infinity(), n_states * n_ringbuffer_frames);\n HANDLE_LAST_ERROR();\n n_fill_blocks = (n_states + n_threads - 1u) / n_threads;\n for (unsigned t = n_tot_frames; t > 0; t--) {\n //std::cerr <<\n //"bwd t: " << t << " " << n_tot_frames << " buffer next: " << ((t-1) %% n_ringbuffer_frames) << std::endl;\n float* d_state_buffer_next = d_state_buffer + ((t - 1) %% n_ringbuffer_frames) * n_states;\n float* d_state_buffer_prev = d_state_buffer + ( t %% n_ringbuffer_frames) * n_states;\n fill_array<<<n_fill_blocks, n_threads>>>(d_state_buffer_next, std::numeric_limits<float>::infinity(), n_states);\n HANDLE_LAST_ERROR();\n init_bwd_state_buffer<<<1, n_seqs>>>(\n t - 1, n_batches, n_seqs, d_batch_idxs, d_index, d_state_buffer_prev, d_end_states);\n HANDLE_LAST_ERROR();\n next_frame_bwd<<<n_blocks, n_threads>>>(\n t - 1, n_states, n_edges, n_emissions, n_seg_frames, n_tot_frames, n_seqs, n_am_score_scales,\n d_sequence_idxs, d_to, d_from, d_weights, d_emission_idxs, d_lenmod_idxs, d_batch_idxs,\n d_am_scores, d_length_models, d_am_score_scales, d_epoch,\n d_state_buffer, d_edge_buffer + (t - 1) * n_seg_frames * n_edges);\n HANDLE_LAST_ERROR();\n\n //std::stringstream ss;\n //ss << "dump/bwd_state_buffer." << t << ".dump";\n //dump_to_file_2d(d_state_buffer, n_ringbuffer_frames, n_states, ss.str());\n }\n\n n_blocks = (n_tot_frames * n_seg_frames + n_threads - 1) / n_threads;\n compute_framewise_sum<<<n_blocks, n_threads, n_threads * n_seqs * sizeof(float)>>>(\n n_tot_frames, n_seqs, n_seg_frames, n_batches, n_edges,\n d_sequence_idxs, d_batch_idxs,\n d_index, d_edge_buffer, d_norm_factors);\n HANDLE_LAST_ERROR();\n\n //dump_to_file_2d(d_norm_factors, n_seg_frames, n_batches, "dump/norm_factors_1.dump");\n\n if (segmentwise_normalization) {\n n_blocks = (n_batches + n_threads - 1) / n_threads;\n merge_framewise_sum<<<n_blocks, n_threads>>>(n_seg_frames, n_batches, d_index, d_norm_factors);\n HANDLE_LAST_ERROR();\n }\n\n //dump_to_file_2d(d_norm_factors, n_seg_frames, n_batches, "dump/norm_factors_2.dump");\n\n n_blocks = (n_tot_frames * n_seqs + n_threads - 1) / n_threads;\n compute_posterior_weights<<<n_blocks, n_threads>>>(n_tot_frames, n_seg_frames, n_seqs, n_batches, d_state_buffer,\n d_start_states, d_batch_idxs, d_index, d_norm_factors,\n d_posterior_weights);\n HANDLE_LAST_ERROR();\n\n n_fill_blocks = (n_batches * n_seg_frames * n_emissions + n_threads - 1u) / n_threads;\n fill_array<<<n_fill_blocks, n_threads>>>(\n d_out, std::numeric_limits<float>::infinity(), n_batches * n_seg_frames * n_emissions);\n HANDLE_LAST_ERROR();\n\n n_blocks = (n_tot_frames * n_seg_frames * n_edges + n_threads - 1) / n_threads;\n compute_targets<<<n_blocks, n_threads>>>(n_tot_frames, n_seg_frames, n_edges, n_batches, n_seqs, n_emissions,\n d_sequence_idxs, d_emission_idxs, d_batch_idxs, d_index, d_edge_buffer,\n d_norm_factors, d_out);\n HANDLE_LAST_ERROR();\n\n //dump_to_file_1d(d_weights, n_edges, "dump/edge_weights.dump");\n //dump_to_file_1d(d_sequence_idxs, n_edges, "dump/sequence_idxs.dump");\n //dump_to_file_2d(d_state_buffer, n_ringbuffer_frames, n_states, "dump/state_buffer.dump");\n //dump_to_file_2d(d_batch_idxs, n_tot_frames, n_seqs, "dump/batch_idxs.dump");\n //dump_to_file_2d(d_index, n_seg_frames, n_batches, "dump/index.dump");\n //dump_to_file_3d(d_edge_buffer, n_tot_frames, n_seg_frames, n_edges, "dump/edges.dump");\n //dump_to_file_3d(d_am_scores, n_seg_frames, n_batches, n_emissions, "dump/am_scores.dump");\n //dump_to_file_3d(d_out, n_seg_frames, n_batches, n_emissions, "dump/targets.dump");\n\n if (dump_targets and iter %% dump_targets_interval == 0) {\n std::stringstream ss;\n ss << "dump/targets_" << iter << ".dump";\n dump_to_file_3d(d_out, n_seg_frames, n_batches, n_emissions, ss.str());\n ss.str("");\n ss.clear();\n ss << "dump/norm_factors_" << iter << ".dump";\n dump_to_file_2d(d_norm_factors, n_seg_frames, n_batches, ss.str());\n ss.str("");\n ss.clear();\n ss << "dump/posterior_weights_" << iter << ".dump";\n dump_to_file_2d(d_posterior_weights, n_seg_frames, n_batches, ss.str());\n }\n\n iter += 1;\n\n device_free(d_state_buffer);\n device_free(d_edge_buffer);\n '
cpu_support = False
def __init__(self, segmentwise_normalization=False, dump_targets_interval=None, new_batch_idxs_format=False):
def _to_cpp_bool(v):
return ('true' if v else 'false')
extra_lines = [('const bool segmentwise_normalization = %s;' % _to_cpp_bool(segmentwise_normalization)), ('const bool dump_targets = %s;' % _to_cpp_bool((dump_targets_interval is not None))), ('const unsigned dump_targets_interval = %d;' % (0 if (dump_targets_interval is None) else dump_targets_interval))]
self.c_extra_support_code = dict(**self.c_extra_support_code)
self.c_extra_support_code['000_batch_format'] = ('#define NEW_BATCH_IDX_FORMAT %s\n' % _to_cpp_bool(new_batch_idxs_format))
if new_batch_idxs_format:
in_info = list(self.in_info)
in_info[1] = {'name': 'batch_idxs', 'ndim': 1, 'shape': (None,), 'need_contiguous': True, 'gradient': 'disconnected'}
self.in_info = tuple(in_info)
self.c_fw_code = (('\n'.join(extra_lines) + '\n') + self.c_fw_code)
|
class FastViterbiOp(NativeOpGenBase):
'\n inputs:\n :param am_scores: scores in +log space. 3d (time,batch,dim)\n :param am_seq_len: (batch,)\n :param edges: edges of the graph (from,to,emission_idx,sequence_idx), i.e. (4, n_edges)\n :param weights: weights of the edges (n_edges,)\n :param start_end_states: (2, batch)\n :param n_states: scalar, int32\n outputs:\n :param output: Viterbi (hard) alignment, scores in +log space. 2d (time,batch)\n :param scores: (batch,)\n '
in_info = ({'name': 'am_scores', 'ndim': 3, 'shape': (None, None, None), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'am_seq_len', 'ndim': 1, 'shape': ((0, 0),), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'edges', 'ndim': 2, 'shape': (4, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'weights', 'ndim': 1, 'shape': ((3, 1),), 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'start_end_states', 'ndim': 2, 'shape': (2, (0, 0)), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'n_states', 'ndim': 0, 'shape': (), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected', 'host_memory': True})
out_info = ({'name': 'output', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'dtype': 'int32', 'need_contiguous': True}, {'name': 'scores', 'ndim': 1, 'shape': ((0, 1),), 'need_contiguous': True})
c_extra_support_code = {'01_IdxAndVal': '\n struct __attribute__((__packed__)) IdxAndVal {\n int idx;\n float val;\n };\n ', '04_select_max': '\n DEV_FUNC\n void select_max(IdxAndVal* a, IdxAndVal b) {\n // fast path\n if(b.val < a->val)\n return;\n // Maybe we could use double compare-and-swap (https://stackoverflow.com/questions/55941382/).\n // But not sure how.\n // So instead, we use double-wide compare-and-swap.\n union U {\n IdxAndVal s;\n unsigned long long int v64;\n };\n while(true) {\n U prev;\n prev.s = *a;\n if(b.val < prev.s.val)\n return;\n if(b.val == prev.s.val && b.idx >= prev.s.idx)\n return;\n U updated;\n updated.s = b;\n\n U old;\n old.v64 = elem_atomic_cas((unsigned long long int*) a, prev.v64, updated.v64);\n if(old.v64 == prev.v64)\n return;\n // Not the same, so repeat.\n }\n }\n ', '05_init_buffer': '\n DEF_KERNEL\n void init_buffer\n (\n int n_time,\n int n_states, // for the whole batch\n IdxAndVal* buffer // (time+1,n_states), states for the whole batch\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < (n_time + 1) * n_states) {\n buffer[idx].val = -INF_F;\n buffer[idx].idx = -1;\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '06_init_first_frame': '\n DEF_KERNEL\n void init_first_frame\n (\n int n_batch,\n int n_states, // for the whole batch\n IdxAndVal* frame, // (n_states,), states for the whole batch\n const int32_t* d_start_states // (n_batch,)\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch) {\n int state_idx = d_start_states[idx];\n frame[state_idx].val = 0;\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '08_next_frame': '\n DEF_KERNEL\n void next_frame\n (\n int n_time,\n int n_states,\n int n_edges,\n int n_classes,\n int t,\n const float* d_am_scores,\n const int32_t* d_am_seq_len,\n const IdxAndVal* prev_frame,\n IdxAndVal* frame,\n const int32_t* d_edge_from,\n const int32_t* d_edge_to,\n const int32_t* d_edge_emission_idx,\n const int32_t* d_edge_seq_idx,\n const float* d_edge_weights,\n const int32_t* d_end_states // (n_batch,)\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_edges) {\n int from_idx = d_edge_from[idx];\n //assert_cmp(0, <=, from_idx); assert_cmp(from_idx, <, n_states);\n\n int seq_idx = d_edge_seq_idx[idx];\n if(t < d_am_seq_len[seq_idx]) {\n float prev_val = prev_frame[from_idx].val;\n int emission_idx = d_edge_emission_idx[idx];\n //assert_cmp(0, <=, emission_idx); assert_cmp(emission_idx, <, n_classes);\n int to_idx = d_edge_to[idx];\n //assert_cmp(0, <=, to_idx); assert_cmp(to_idx, <, n_states);\n IdxAndVal candidate;\n candidate.val = prev_val + d_edge_weights[idx] + d_am_scores[seq_idx * n_classes + emission_idx];\n candidate.idx = idx;\n select_max(&frame[to_idx], candidate);\n }\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '11_select_scores': '\n DEF_KERNEL\n void select_scores\n (\n int n_batch,\n int n_states,\n int buffer_stride,\n const IdxAndVal* buffer,\n const int32_t* d_am_seq_len, // (n_batch,)\n const int32_t* d_end_states, // (n_batch,)\n float* d_score // (n_batch,)\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch) {\n const IdxAndVal* last_frame = buffer + d_am_seq_len[idx] * buffer_stride;\n int end_state_idx = d_end_states[idx];\n d_score[idx] = last_frame[end_state_idx].val;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '13_select_best_path': '\n DEF_KERNEL\n void select_best_path\n (\n int n_batch,\n int n_states,\n int n_edges,\n int t,\n int32* cur_state, // (n_batch,)\n const IdxAndVal* frame,\n const int32_t* d_am_seq_len,\n const int32_t* d_edge_from,\n const int32_t* d_edge_to,\n const int32_t* d_edge_emission_idx,\n int32_t* output\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch) {\n if(t < d_am_seq_len[idx]) {\n int state_idx = cur_state[idx];\n //assert_cmp(0, <=, state_idx); assert_cmp(state_idx, <, n_states);\n int edge_idx = frame[state_idx].idx;\n if(edge_idx >= 0) {\n //assert_cmp(0, <=, edge_idx); assert_cmp(edge_idx, <, n_edges);\n //assert_cmp(state_idx, ==, d_edge_to[edge_idx]);\n cur_state[idx] = d_edge_from[edge_idx];\n output[idx] = d_edge_emission_idx[edge_idx];\n }\n else // no path found\n output[idx] = 0;\n }\n else {\n output[idx] = 0;\n }\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n using namespace std;\n // am_scores, am_seq_len, edges, weights, start_end_states, n_states = input_names\n // output, scores = output_names\n assert(n_inputs == 6);\n assert(n_outputs == 2);\n Ndarray* am_scores = inputs[0];\n Ndarray* am_seq_len = inputs[1];\n Ndarray* edges = inputs[2];\n Ndarray* weights = inputs[3];\n Ndarray* start_end_states = inputs[4];\n Ndarray* n_states_ref = inputs[5];\n Ndarray* output = *outputs[0];\n Ndarray* score = *outputs[1];\n\n assert_cmp(Ndarray_NDIM(am_scores), ==, 3);\n assert_cmp(Ndarray_NDIM(am_seq_len), ==, 1);\n assert_cmp(Ndarray_NDIM(edges), ==, 2);\n assert_cmp(Ndarray_NDIM(weights), ==, 1);\n assert_cmp(Ndarray_NDIM(start_end_states), ==, 2);\n assert_cmp(Ndarray_NDIM(n_states_ref), ==, 0);\n assert_cmp(Ndarray_NDIM(output), ==, 2);\n assert_cmp(Ndarray_NDIM(score), ==, 1);\n int n_time = Ndarray_DIMS(am_scores)[0];\n int n_batch = Ndarray_DIMS(am_scores)[1];\n int n_classes = Ndarray_DIMS(am_scores)[2];\n assert_cmp(Ndarray_DIMS(am_scores)[0], ==, n_time);\n assert_cmp(Ndarray_DIMS(am_scores)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(am_scores)[2], ==, n_classes);\n assert_cmp(Ndarray_DIMS(am_seq_len)[0], ==, n_batch);\n int n_edges = Ndarray_DIMS(edges)[1];\n assert_cmp(Ndarray_DIMS(edges)[0], ==, 4);\n assert_cmp(Ndarray_DIMS(edges)[1], ==, n_edges);\n assert_cmp(Ndarray_DIMS(weights)[0], ==, n_edges);\n assert_cmp(Ndarray_DIMS(start_end_states)[0], ==, 2);\n assert_cmp(Ndarray_DIMS(start_end_states)[1], ==, n_batch);\n int n_states = Ndarray_DEV_DATA_int32_scalar(n_states_ref);\n assert_cmp(Ndarray_DIMS(output)[0], ==, n_time);\n assert_cmp(Ndarray_DIMS(output)[1], ==, n_batch);\n assert_cmp(Ndarray_DIMS(score)[0], ==, n_batch);\n\n int32_t* d_edge_from = Ndarray_DEV_DATA_int32(edges) + 0 * Ndarray_STRIDE(edges, 0);\n int32_t* d_edge_to = Ndarray_DEV_DATA_int32(edges) + 1 * Ndarray_STRIDE(edges, 0);\n int32_t* d_edge_emission_idx = Ndarray_DEV_DATA_int32(edges) + 2 * Ndarray_STRIDE(edges, 0);\n int32_t* d_edge_seq_idx = Ndarray_DEV_DATA_int32(edges) + 3 * Ndarray_STRIDE(edges, 0);\n float* d_edge_weights = Ndarray_DEV_DATA(weights);\n float* d_am_scores = Ndarray_DEV_DATA(am_scores);\n int am_scores_stride = Ndarray_STRIDE(am_scores, 0);\n int32_t* d_am_seq_len = Ndarray_DEV_DATA_int32(am_seq_len);\n int32_t* d_start_states = Ndarray_DEV_DATA_int32(start_end_states) + 0 * Ndarray_STRIDE(start_end_states, 0);\n int32_t* d_end_states = Ndarray_DEV_DATA_int32(start_end_states) + 1 * Ndarray_STRIDE(start_end_states, 0);\n int32_t* d_output = Ndarray_DEV_DATA_int32(output);\n int output_stride = Ndarray_STRIDE(output, 0);\n float* d_score = Ndarray_DEV_DATA(score);\n\n IdxAndVal* d_buffer = (IdxAndVal*) device_malloc((n_time + 1) * n_states * sizeof(IdxAndVal));\n int buffer_stride = n_states;\n start_dev_kernel(init_buffer, (n_time, n_states, d_buffer));\n start_dev_kernel(init_first_frame, (n_batch, n_states, d_buffer, d_start_states));\n HANDLE_LAST_ERROR();\n\n for(int t = 0; t < n_time; ++t) {\n start_dev_kernel(next_frame, (\n n_time,\n n_states,\n n_edges,\n n_classes,\n t,\n d_am_scores + t * am_scores_stride,\n d_am_seq_len,\n d_buffer + t * buffer_stride,\n d_buffer + (t + 1) * buffer_stride,\n d_edge_from,\n d_edge_to,\n d_edge_emission_idx,\n d_edge_seq_idx,\n d_edge_weights,\n d_end_states\n ));\n }\n HANDLE_LAST_ERROR();\n\n start_dev_kernel(select_scores, (\n n_batch,\n n_states,\n buffer_stride,\n d_buffer,\n d_am_seq_len,\n d_end_states,\n d_score // out\n ));\n\n int32_t* d_cur_state = (int32_t*) device_malloc(n_batch * sizeof(int32_t));\n Ndarray_memcpy(d_cur_state, d_end_states, n_batch * sizeof(int32_t));\n\n for(int t = n_time - 1; t >= 0; --t) {\n start_dev_kernel(select_best_path, (\n n_batch,\n n_states,\n n_edges,\n t,\n d_cur_state,\n d_buffer + (t + 1) * buffer_stride,\n d_am_seq_len,\n d_edge_from,\n d_edge_to,\n d_edge_emission_idx,\n d_output + t * output_stride // out\n ));\n }\n HANDLE_LAST_ERROR();\n\n device_free(d_cur_state);\n device_free(d_buffer);\n '
c_bw_code = None
|
class GetCtcFsaFastBwOp(NativeOpGenBase):
'\n This implements :func:`Fsa.get_ctc_fsa_fast_bw` as a native op.\n This is for constructing a FSA with a CTC topology.\n The output format is compatible to the FastBaumWelch native op.\n\n inputs:\n :param targets: shape (batch,time), int32\n :param seq_lens: shape (batch), int32\n :param blank_idx: scalar, int32\n :param weights: shape (num_edges,), float32 (not used, except for target shape)\n :param label_loop: scalar, int32 (casted from bool). True -> normal CTC; False -> RNA-like\n outputs:\n :param edges: (4,num_edges), int32, edges of the graph (from,to,emission_idx,sequence_idx)\n :param start_end_states: (2,batch), int32, (start,end) state idx in FSA\n\n To construct `weights` (for FastBaumWelch), `weights` should be just `tf.zeros((num_edges,))`.\n `num_edges` should be `n_batch * (5 * (n_time - 1) + 10)`\n (see construction in kernel why that number).\n '
in_info = ({'name': 'targets', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'seq_lens', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'blank_idx', 'ndim': 0, 'shape': (), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected', 'host_memory': True}, {'name': 'weights', 'ndim': 1, 'shape': (None,), 'dtype': 'float32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'label_loop', 'ndim': 0, 'shape': (), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected', 'host_memory': True})
out_info = ({'name': 'edges', 'ndim': 2, 'shape': (4, (3, 0)), 'dtype': 'int32', 'need_contiguous': True}, {'name': 'start_end_states', 'ndim': 2, 'shape': (2, (1, 0)), 'dtype': 'int32', 'need_contiguous': True})
c_extra_support_code = {'01_kernel': '\n template<bool label_loop>\n DEF_KERNEL\n void construct_kernel\n (\n int n_batch, int n_time, int n_edges,\n const int32_t* targets, const int32_t* seq_lens,\n int32_t blank_idx,\n int32_t* edges, int32_t* start_end_states\n )\n {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n // n_edges should be n_batch * (5 * (n_time - 1) + 10).\n assert(n_edges % n_batch == 0);\n while(idx < n_edges) {\n int batch_idx = idx / (n_edges / n_batch);\n int rel_edge_idx = idx % (n_edges / n_batch);\n int32_t seq_len = seq_lens[batch_idx];\n // state_idx: 0 b, 1 l, 2 b, 3 l, ..., (T-1)*2 b, T*2-1 l, T*2 b, T*2+1 dummy, T*2+2 end\n // i.e. T*2+3 states per seq.\n int state_idx_offset = (n_time * 2 + 3) * batch_idx;\n int t = -1; // pos in targets\n int srel_edge_idx = -1; // state relative edge\n // (seq_len * 2) - 1 is last label state idx. seq_len * 2 is last blank state idx.\n int32_t dummy_state_idx = seq_len * 2 + 1;\n int32_t end_state_idx = seq_len * 2 + 2;\n int32_t state_idx = dummy_state_idx;\n int32_t to_state_idx = dummy_state_idx;\n if(rel_edge_idx == 0) {\n start_end_states[0 * n_batch + batch_idx] = state_idx_offset; // start\n start_end_states[1 * n_batch + batch_idx] = state_idx_offset + end_state_idx; // end\n }\n int32_t emission_idx = blank_idx;\n int32_t label_idx = -1, next_label_idx = -1;\n if(seq_len == 0) {\n t = -1;\n emission_idx = blank_idx;\n // 1 single blank loop\n if(rel_edge_idx == 0) {\n state_idx = 0;\n to_state_idx = 0;\n srel_edge_idx = 0;\n }\n else if(rel_edge_idx == 1) {\n state_idx = 0;\n to_state_idx = end_state_idx;\n srel_edge_idx = 1;\n }\n else {\n state_idx = dummy_state_idx;\n srel_edge_idx = -1;\n }\n }\n else if(seq_len == 1) {\n label_idx = targets[batch_idx * n_time + 0];\n // 3 edges for first / prev last blank\n if(rel_edge_idx < 3) {\n t = 0;\n state_idx = 0;\n srel_edge_idx = rel_edge_idx;\n if(srel_edge_idx == 0) {\n to_state_idx = state_idx;\n emission_idx = blank_idx;\n }\n else if(srel_edge_idx == 1) {\n to_state_idx = state_idx + 1;\n emission_idx = label_idx;\n }\n else if(srel_edge_idx == 2) {\n to_state_idx = end_state_idx;\n emission_idx = label_idx;\n }\n }\n // 4 edges for first / last label\n else if(rel_edge_idx < 7) {\n t = 0;\n state_idx = 1;\n srel_edge_idx = rel_edge_idx - 3;\n if(srel_edge_idx == 0) {\n to_state_idx = label_loop ? state_idx : dummy_state_idx;\n emission_idx = label_idx;\n }\n else if(srel_edge_idx == 1) {\n to_state_idx = state_idx + 1;\n emission_idx = blank_idx;\n }\n else if(srel_edge_idx == 2) {\n to_state_idx = label_loop ? end_state_idx : dummy_state_idx;\n emission_idx = label_idx;\n }\n else if(srel_edge_idx == 3) {\n to_state_idx = end_state_idx;\n emission_idx = blank_idx;\n }\n }\n // 2 edges for last blank\n else if(rel_edge_idx < 9) {\n t = -1;\n emission_idx = blank_idx;\n state_idx = 2;\n srel_edge_idx = rel_edge_idx - 7;\n if(srel_edge_idx == 0)\n to_state_idx = state_idx;\n else\n to_state_idx = end_state_idx;\n }\n else {\n t = -1;\n state_idx = dummy_state_idx;\n srel_edge_idx = -1;\n }\n }\n else { // seq_len >= 2\n // 2 edges for each blank, 3 for each label. up to prev last.\n if(rel_edge_idx < 5 * (seq_len - 1)) {\n t = rel_edge_idx / 5;\n label_idx = targets[batch_idx * n_time + t];\n next_label_idx = targets[batch_idx * n_time + t + 1];\n state_idx = 2 * (rel_edge_idx / 5);\n srel_edge_idx = rel_edge_idx % 5;\n if(srel_edge_idx >= 2) {\n srel_edge_idx -= 2;\n state_idx += 1;\n }\n if(state_idx % 2 == 0) { // blank loop state\n if(srel_edge_idx == 0) {\n to_state_idx = state_idx;\n emission_idx = blank_idx;\n }\n else if(srel_edge_idx == 1) {\n to_state_idx = state_idx + 1;\n emission_idx = label_idx;\n }\n }\n else { // label loop state\n if(srel_edge_idx == 0) {\n to_state_idx = label_loop ? state_idx : dummy_state_idx;\n emission_idx = label_idx;\n }\n else if(srel_edge_idx == 1) {\n to_state_idx = state_idx + 1;\n emission_idx = blank_idx;\n }\n else if(srel_edge_idx == 2) {\n // skip over blank to next label (if allowed <=> next label is different)\n if(label_idx != next_label_idx || !label_loop) {\n to_state_idx = state_idx + 2;\n emission_idx = next_label_idx;\n }\n }\n }\n }\n // 1 more edge for prev last label\n else if(rel_edge_idx == 5 * (seq_len - 1)) {\n t = seq_len - 2;\n label_idx = targets[batch_idx * n_time + t];\n next_label_idx = targets[batch_idx * n_time + t + 1];\n state_idx = (seq_len - 2) * 2 + 1;\n srel_edge_idx = 3;\n // skip over blank to next label / end state (if allowed <=> next label is different)\n if(label_idx != next_label_idx || !label_loop) {\n to_state_idx = end_state_idx;\n emission_idx = next_label_idx;\n }\n }\n // 3 edges for prev last blank\n else if(rel_edge_idx <= 5 * (seq_len - 1) + 3) {\n t = seq_len - 1;\n label_idx = targets[batch_idx * n_time + t];\n state_idx = (seq_len - 1) * 2;\n srel_edge_idx = rel_edge_idx - (5 * (seq_len - 1) + 1);\n if(srel_edge_idx == 0) {\n to_state_idx = state_idx;\n emission_idx = blank_idx;\n }\n else if(srel_edge_idx == 1) {\n to_state_idx = state_idx + 1;\n emission_idx = label_idx;\n }\n else if(srel_edge_idx == 2) {\n to_state_idx = end_state_idx;\n emission_idx = label_idx;\n }\n }\n // 4 edges for last label\n else if(rel_edge_idx <= 5 * (seq_len - 1) + 7) {\n t = seq_len - 1;\n label_idx = targets[batch_idx * n_time + t];\n state_idx = (seq_len - 1) * 2 + 1;\n srel_edge_idx = rel_edge_idx - (5 * (seq_len - 1) + 4);\n if(srel_edge_idx == 0) {\n to_state_idx = label_loop ? state_idx : dummy_state_idx;\n emission_idx = label_idx;\n }\n else if(srel_edge_idx == 1) {\n to_state_idx = state_idx + 1;\n emission_idx = blank_idx;\n }\n else if(srel_edge_idx == 2) {\n to_state_idx = label_loop ? end_state_idx : dummy_state_idx;\n emission_idx = label_idx;\n }\n else if(srel_edge_idx == 3) {\n to_state_idx = end_state_idx;\n emission_idx = blank_idx;\n }\n }\n // 2 edges for last blank\n else if(rel_edge_idx <= 5 * (seq_len - 1) + 9) {\n t = -1;\n emission_idx = blank_idx;\n state_idx = (seq_len - 1) * 2 + 2;\n srel_edge_idx = rel_edge_idx - (5 * (seq_len - 1) + 8);\n if(srel_edge_idx == 0)\n to_state_idx = state_idx;\n else\n to_state_idx = end_state_idx;\n }\n else {\n t = -1;\n state_idx = dummy_state_idx;\n srel_edge_idx = -1;\n }\n }\n\n edges[0 * n_edges + idx] = state_idx_offset + state_idx; // from\n edges[1 * n_edges + idx] = state_idx_offset + to_state_idx; // to\n edges[2 * n_edges + idx] = emission_idx; // emission\n edges[3 * n_edges + idx] = batch_idx; // batch\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 5);\n assert(n_outputs == 2);\n Ndarray* targets = inputs[0];\n Ndarray* seq_lens = inputs[1];\n Ndarray* blank_idx_ref = inputs[2];\n Ndarray* weights = inputs[3];\n bool label_loop = (bool) Ndarray_DEV_DATA_int32_scalar(inputs[4]);\n Ndarray* edges = *outputs[0];\n Ndarray* start_end_states = *outputs[1];\n assert_cmp(Ndarray_NDIM(targets), ==, 2);\n assert_cmp(Ndarray_NDIM(seq_lens), ==, 1);\n assert_cmp(Ndarray_NDIM(blank_idx_ref), ==, 0);\n assert_cmp(Ndarray_NDIM(weights), ==, 1);\n assert_cmp(Ndarray_NDIM(edges), ==, 2);\n assert_cmp(Ndarray_NDIM(start_end_states), ==, 2);\n int n_batch = Ndarray_DIMS(seq_lens)[0];\n assert_cmp(Ndarray_DIMS(targets)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(seq_lens)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(start_end_states)[1], ==, n_batch);\n int n_time = Ndarray_DIMS(targets)[1];\n int n_edges = Ndarray_DIMS(weights)[0];\n assert_cmp(Ndarray_DIMS(start_end_states)[0], ==, 2);\n assert_cmp(Ndarray_DIMS(edges)[0], ==, 4);\n assert_cmp(Ndarray_DIMS(edges)[1], ==, n_edges);\n\n assert_cmp(n_edges, ==, n_batch * (5 * (n_time - 1) + 10));\n\n Ndarray_memset(Ndarray_DEV_DATA_int32(edges), 255, 4 * n_edges * sizeof(int32_t));\n Ndarray_memset(Ndarray_DEV_DATA_int32(start_end_states), 255, 2 * n_batch * sizeof(int32_t));\n int32_t blank_idx = Ndarray_DEV_DATA_int32_scalar(blank_idx_ref);\n\n if(label_loop) {\n start_dev_kernel(construct_kernel<true>, (\n n_batch, n_time, n_edges,\n Ndarray_DEV_DATA_int32(targets), Ndarray_DEV_DATA_int32(seq_lens),\n blank_idx,\n Ndarray_DEV_DATA_int32(edges), Ndarray_DEV_DATA_int32(start_end_states)\n ));\n } else {\n start_dev_kernel(construct_kernel<false>, (\n n_batch, n_time, n_edges,\n Ndarray_DEV_DATA_int32(targets), Ndarray_DEV_DATA_int32(seq_lens),\n blank_idx,\n Ndarray_DEV_DATA_int32(edges), Ndarray_DEV_DATA_int32(start_end_states)\n ));\n }\n HANDLE_LAST_ERROR();\n '
|
class EditDistanceOp(NativeOpGenBase):
'\n Similar to :func:`tf.edit_distance`.\n Calculates the `edit distance / Levenshtein distance <https://en.wikipedia.org/wiki/Levenshtein_distance>`__.\n\n The naive implementation either goes over ``a`` and then ``b``, thus results in O(|a|*|b|) time complexity.\n To calculate a new entry in the table (over then length of ``a`` and ``b``),\n it depends on the prev symbol in ``a`` (left) (deletion error),\n the prev symbol in ``b`` (up) (insertion error),\n and the left-up diagonal (substitution error, or no error).\n\n To take advantage of the parallelism of the GPU, we follow a diagonal iteration scheme, such that\n in every iteration, all entries on the diagonal can be computed in parallel, as they do not depend on each other.\n After implementing this, we found that this algorithm is described here::\n\n Using GPUs to Speed-Up Levenshtein Edit Distance Computation, Balhaf et al, 2016,\n https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7476090&tag=1\n\n inputs:\n :param a: symbols. 2d (batch,time), int32\n :param a_len: 1d (batch,), int32\n :param b: symbols. 2d (batch,time), int32\n :param b_len: 1d (batch,), int32\n outputs:\n :param output: 1d (batch,), int32, unnormalized edit distance\n '
in_info = ({'name': 'a', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 1, 'shape': ((0, 0),), 'dtype': 'int32', 'need_contiguous': True},)
c_extra_support_code = {'001_next_step': '\n DEF_KERNEL\n void next_step_kernel(\n int n_batch, int n_a_max_len, int n_b_max_len,\n int diag_idx,\n const int32_t* a, const int32_t* b,\n const int32_t* a_len, const int32_t* b_len,\n const int32_t* last1_dist, const int32_t* last2_dist, int32_t* cur_dist,\n int32_t* result) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n // We are going diagonal!\n int num_entries;\n if(diag_idx <= n_a_max_len) {\n num_entries = diag_idx + 1;\n if(num_entries > n_b_max_len + 1)\n num_entries = n_b_max_len + 1;\n } else {\n num_entries = n_b_max_len + 1 - (diag_idx - n_a_max_len);\n if(num_entries > n_a_max_len + 1)\n num_entries = n_a_max_len + 1;\n }\n int max_num_entries = n_a_max_len + 1;\n if(max_num_entries > n_b_max_len + 1)\n max_num_entries = n_b_max_len + 1;\n while(idx < n_batch * num_entries) {\n int batch_idx = idx / num_entries;\n int entry_idx = idx % num_entries;\n int dist_idx = batch_idx * max_num_entries + entry_idx;\n\n int t_a, t_b;\n if(diag_idx <= n_a_max_len) {\n t_a = diag_idx - entry_idx;\n t_b = entry_idx;\n } else {\n t_a = n_a_max_len - entry_idx;\n t_b = diag_idx - n_a_max_len + entry_idx;\n }\n\n if(t_a == 0)\n cur_dist[dist_idx] = t_b; // distance == how much to delete from b\n else if(t_b == 0)\n cur_dist[dist_idx] = t_a; // distance == how much to delete from a\n else {\n // last1 is with diag_idx - 2. Needed for substitution cost.\n // last2 is with diag_idx - 1. Needed for insertion or deletion cost.\n // last2 refers to the first, for deletion. last2_idx + 1 is for insertion.\n int last1_idx, last2_idx;\n if(diag_idx - 1 < n_a_max_len)\n last1_idx = dist_idx - 1;\n else if(diag_idx - 1 == n_a_max_len)\n last1_idx = dist_idx;\n else\n last1_idx = dist_idx + 1;\n if(diag_idx <= n_a_max_len)\n last2_idx = dist_idx - 1;\n else\n last2_idx = dist_idx;\n\n int del_cost, ins_cost, sub_cost;\n del_cost = last2_dist[last2_idx] + 1;\n ins_cost = last2_dist[last2_idx + 1] + 1;\n sub_cost = last1_dist[last1_idx];\n if(a[batch_idx * n_a_max_len + t_a - 1] != b[batch_idx * n_b_max_len + t_b - 1])\n ++sub_cost;\n //printf("t_a %i, t_b %i, del %i, ins %i, sub %i\\n", t_a, t_b, del_cost, ins_cost, sub_cost);\n int min_cost = del_cost;\n if(min_cost > ins_cost) min_cost = ins_cost;\n if(min_cost > sub_cost) min_cost = sub_cost;\n cur_dist[dist_idx] = min_cost;\n }\n //printf("t_a %i, t_b %i, dist %i\\n", t_a, t_b, cur_dist[dist_idx]);\n\n if(t_a == a_len[batch_idx] && t_b == b_len[batch_idx])\n result[batch_idx] = cur_dist[dist_idx];\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 4);\n assert(n_outputs == 1);\n Ndarray* a = inputs[0];\n Ndarray* a_len = inputs[1];\n Ndarray* b = inputs[2];\n Ndarray* b_len = inputs[3];\n Ndarray* out = *outputs[0];\n assert_cmp(Ndarray_NDIM(a), ==, 2);\n assert_cmp(Ndarray_NDIM(a_len), ==, 1);\n assert_cmp(Ndarray_NDIM(b), ==, 2);\n assert_cmp(Ndarray_NDIM(b_len), ==, 1);\n assert_cmp(Ndarray_NDIM(out), ==, 1);\n int n_batch = Ndarray_DIMS(out)[0];\n assert_cmp(Ndarray_DIMS(a)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(a_len)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b_len)[0], ==, n_batch);\n int n_a_max_len = Ndarray_DIMS(a)[1];\n int n_b_max_len = Ndarray_DIMS(b)[1];\n Ndarray_memset(Ndarray_DEV_DATA_int32(out), 255, n_batch * sizeof(int32_t));\n\n // Working buffer.\n int max_num_entries = std::min(n_a_max_len + 1, n_b_max_len + 1);\n int32_t* buffer = (int32_t*) device_malloc(3 * n_batch * max_num_entries * sizeof(int32_t));\n int32_t* last1_dist = buffer;\n int32_t* last2_dist = buffer + n_batch * max_num_entries;\n int32_t* cur_dist = buffer + 2 * n_batch * max_num_entries;\n\n int num_diag = n_a_max_len + n_b_max_len + 1;\n for(int diag_idx = 0; diag_idx < num_diag; ++diag_idx) {\n start_dev_kernel(next_step_kernel, (\n n_batch, n_a_max_len, n_b_max_len,\n diag_idx,\n Ndarray_DEV_DATA_int32(a), Ndarray_DEV_DATA_int32(b),\n Ndarray_DEV_DATA_int32(a_len), Ndarray_DEV_DATA_int32(b_len),\n last1_dist, last2_dist, cur_dist,\n Ndarray_DEV_DATA_int32(out)));\n // Rotate. last1_dist not needed anymore.\n int32_t* tmp = last1_dist;\n last1_dist = last2_dist;\n last2_dist = cur_dist;\n cur_dist = tmp;\n }\n HANDLE_LAST_ERROR();\n\n device_free(buffer);\n '
c_bw_code = None
|
class OptimalCompletionEditDistanceOp(NativeOpGenBase):
'\n Given some prefix ``a``, what is the minimum possible edit distance to ``b`` with any possible suffix on ``a`` ?\n This is described in `Optimal Completion Distillation (OCD) <https://arxiv.org/abs/1810.01398>`__.\n The implementation is derived from :class:`EditDistanceOp`.\n\n inputs:\n :param a: symbols. 2d (batch,time), int32. prefix.\n :param a_len: 1d (batch,), int32\n :param b: symbols. 2d (batch,time), int32\n :param b_len: 1d (batch,), int32\n outputs:\n :param output: 1d (batch,), int32, unnormalized edit distance\n '
in_info = ({'name': 'a', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 1, 'shape': ((0, 0),), 'dtype': 'int32', 'need_contiguous': True},)
c_extra_support_code = {'001_init_result': '\n DEF_KERNEL\n void init_result_kernel(int n_batch, int32_t* result) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch) {\n result[idx] = 2147483647; // biggest int32\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '002_next_step': '\n DEF_KERNEL\n void next_step_kernel(\n int n_batch, int n_a_max_len, int n_b_max_len,\n int diag_idx,\n const int32_t* a, const int32_t* b,\n const int32_t* a_len, const int32_t* b_len,\n const int32_t* last1_dist, const int32_t* last2_dist, int32_t* cur_dist,\n int32_t* result) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n // We are going diagonal!\n int num_entries;\n if(diag_idx <= n_a_max_len) {\n num_entries = diag_idx + 1;\n if(num_entries > n_b_max_len + 1)\n num_entries = n_b_max_len + 1;\n } else {\n num_entries = n_b_max_len + 1 - (diag_idx - n_a_max_len);\n if(num_entries > n_a_max_len + 1)\n num_entries = n_a_max_len + 1;\n }\n int max_num_entries = n_a_max_len + 1;\n if(max_num_entries > n_b_max_len + 1)\n max_num_entries = n_b_max_len + 1;\n while(idx < n_batch * num_entries) {\n int batch_idx = idx / num_entries;\n int entry_idx = idx % num_entries;\n int dist_idx = batch_idx * max_num_entries + entry_idx;\n\n int t_a, t_b;\n if(diag_idx <= n_a_max_len) {\n t_a = diag_idx - entry_idx;\n t_b = entry_idx;\n } else {\n t_a = n_a_max_len - entry_idx;\n t_b = diag_idx - n_a_max_len + entry_idx;\n }\n\n if(t_a == 0)\n cur_dist[dist_idx] = t_b; // distance == how much to delete from b\n else if(t_b == 0)\n cur_dist[dist_idx] = t_a; // distance == how much to delete from a\n else {\n // last1 is with diag_idx - 2. Needed for substitution cost.\n // last2 is with diag_idx - 1. Needed for insertion or deletion cost.\n // last2 refers to the first, for deletion. last2_idx + 1 is for insertion.\n int last1_idx, last2_idx;\n if(diag_idx - 1 < n_a_max_len)\n last1_idx = dist_idx - 1;\n else if(diag_idx - 1 == n_a_max_len)\n last1_idx = dist_idx;\n else\n last1_idx = dist_idx + 1;\n if(diag_idx <= n_a_max_len)\n last2_idx = dist_idx - 1;\n else\n last2_idx = dist_idx;\n\n int del_cost, ins_cost, sub_cost;\n del_cost = last2_dist[last2_idx] + 1;\n ins_cost = last2_dist[last2_idx + 1] + 1;\n sub_cost = last1_dist[last1_idx];\n if(a[batch_idx * n_a_max_len + t_a - 1] != b[batch_idx * n_b_max_len + t_b - 1])\n ++sub_cost;\n int min_cost = del_cost;\n if(min_cost > ins_cost) min_cost = ins_cost;\n if(min_cost > sub_cost) min_cost = sub_cost;\n cur_dist[dist_idx] = min_cost;\n }\n\n if(t_a == a_len[batch_idx] && t_b <= b_len[batch_idx])\n elem_atomic_min(&result[batch_idx], cur_dist[dist_idx]);\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 4);\n assert(n_outputs == 1);\n Ndarray* a = inputs[0];\n Ndarray* a_len = inputs[1];\n Ndarray* b = inputs[2];\n Ndarray* b_len = inputs[3];\n Ndarray* out = *outputs[0];\n assert_cmp(Ndarray_NDIM(a), ==, 2);\n assert_cmp(Ndarray_NDIM(a_len), ==, 1);\n assert_cmp(Ndarray_NDIM(b), ==, 2);\n assert_cmp(Ndarray_NDIM(b_len), ==, 1);\n assert_cmp(Ndarray_NDIM(out), ==, 1);\n int n_batch = Ndarray_DIMS(out)[0];\n assert_cmp(Ndarray_DIMS(a)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(a_len)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b_len)[0], ==, n_batch);\n int n_a_max_len = Ndarray_DIMS(a)[1];\n int n_b_max_len = Ndarray_DIMS(b)[1];\n start_dev_kernel(init_result_kernel, (n_batch, Ndarray_DEV_DATA_int32(out)));\n\n // Working buffer.\n int max_num_entries = std::min(n_a_max_len + 1, n_b_max_len + 1);\n int32_t* buffer = (int32_t*) device_malloc(3 * n_batch * max_num_entries * sizeof(int32_t));\n int32_t* last1_dist = buffer;\n int32_t* last2_dist = buffer + n_batch * max_num_entries;\n int32_t* cur_dist = buffer + 2 * n_batch * max_num_entries;\n\n int num_diag = n_a_max_len + n_b_max_len + 1;\n for(int diag_idx = 0; diag_idx < num_diag; ++diag_idx) {\n start_dev_kernel(next_step_kernel, (\n n_batch, n_a_max_len, n_b_max_len,\n diag_idx,\n Ndarray_DEV_DATA_int32(a), Ndarray_DEV_DATA_int32(b),\n Ndarray_DEV_DATA_int32(a_len), Ndarray_DEV_DATA_int32(b_len),\n last1_dist, last2_dist, cur_dist,\n Ndarray_DEV_DATA_int32(out)));\n // Rotate. last1_dist not needed anymore.\n int32_t* tmp = last1_dist;\n last1_dist = last2_dist;\n last2_dist = cur_dist;\n cur_dist = tmp;\n }\n HANDLE_LAST_ERROR();\n\n device_free(buffer);\n '
c_bw_code = None
|
class OptimalCompletionEditDistancePerSuccessorOp(NativeOpGenBase):
'\n Given some prefix ``a`` + successor,\n what is the minimum possible edit distance to ``b`` with any possible suffix on ``a`` + successor,\n for successor in ``successors``.\n This is described in `Optimal Completion Distillation (OCD) <https://arxiv.org/abs/1810.01398>`__.\n The implementation is derived from :class:`OptimalCompletionEditDistanceOp`.\n\n inputs:\n :param a: symbols. 2d (batch,time), int32. prefix.\n :param a_len: 1d (batch,), int32\n :param b: symbols. 2d (batch,time), int32\n :param b_len: 1d (batch,), int32\n :param successors: 1d (num_labels,), int32\n outputs:\n :param output: 2d (batch,num_labels), int32, unnormalized edit distance\n '
in_info = ({'name': 'a', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'successors', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 2, 'shape': ((0, 0), (4, 0)), 'dtype': 'int32', 'need_contiguous': True},)
c_extra_support_code = {'001_next_step': '\n DEF_KERNEL\n void next_step_kernel(\n int n_batch, int n_a_max_len, int n_b_max_len,\n int diag_idx,\n const int32_t* a, const int32_t* b,\n const int32_t* a_len, const int32_t* b_len,\n const int32_t* last1_dist, const int32_t* last2_dist, int32_t* cur_dist, int32_t* a_last_row) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n // We are going diagonal!\n int num_entries;\n if(diag_idx <= n_a_max_len) {\n num_entries = diag_idx + 1;\n if(num_entries > n_b_max_len + 1)\n num_entries = n_b_max_len + 1;\n } else {\n num_entries = n_b_max_len + 1 - (diag_idx - n_a_max_len);\n if(num_entries > n_a_max_len + 1)\n num_entries = n_a_max_len + 1;\n }\n int max_num_entries = n_a_max_len + 1;\n if(max_num_entries > n_b_max_len + 1)\n max_num_entries = n_b_max_len + 1;\n while(idx < n_batch * num_entries) {\n int batch_idx = idx / num_entries;\n int entry_idx = idx % num_entries;\n int dist_idx = batch_idx * max_num_entries + entry_idx;\n\n int t_a, t_b;\n if(diag_idx <= n_a_max_len) {\n t_a = diag_idx - entry_idx;\n t_b = entry_idx;\n } else {\n t_a = n_a_max_len - entry_idx;\n t_b = diag_idx - n_a_max_len + entry_idx;\n }\n\n if(t_a == 0)\n cur_dist[dist_idx] = t_b; // distance == how much to delete from b\n else if(t_b == 0)\n cur_dist[dist_idx] = t_a; // distance == how much to delete from a\n else {\n // last1 is with diag_idx - 2. Needed for substitution cost.\n // last2 is with diag_idx - 1. Needed for insertion or deletion cost.\n // last2 refers to the first, for deletion. last2_idx + 1 is for insertion.\n int last1_idx, last2_idx;\n if(diag_idx - 1 < n_a_max_len)\n last1_idx = dist_idx - 1;\n else if(diag_idx - 1 == n_a_max_len)\n last1_idx = dist_idx;\n else\n last1_idx = dist_idx + 1;\n if(diag_idx <= n_a_max_len)\n last2_idx = dist_idx - 1;\n else\n last2_idx = dist_idx;\n\n int del_cost, ins_cost, sub_cost;\n del_cost = last2_dist[last2_idx] + 1;\n ins_cost = last2_dist[last2_idx + 1] + 1;\n sub_cost = last1_dist[last1_idx];\n if(a[batch_idx * n_a_max_len + t_a - 1] != b[batch_idx * n_b_max_len + t_b - 1])\n ++sub_cost;\n int min_cost = del_cost;\n if(min_cost > ins_cost) min_cost = ins_cost;\n if(min_cost > sub_cost) min_cost = sub_cost;\n cur_dist[dist_idx] = min_cost;\n }\n\n if(t_a == a_len[batch_idx] && t_b <= b_len[batch_idx])\n a_last_row[batch_idx * (n_b_max_len + 1) + t_b] = cur_dist[dist_idx];\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '002_init_result': '\n DEF_KERNEL\n void init_result_kernel(\n int n_batch, int n_b_max_len, int n_labels,\n const int32_t* a_len, const int32_t* b_len,\n const int32_t* a_last_row,\n int32_t* result\n ) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch * n_labels) {\n int batch_idx = idx / n_labels;\n int successor_idx = idx % n_labels;\n\n // Initial insertion, last deletion.\n int t_a = a_len[batch_idx] + 1;\n int min_cost = t_a;\n int last_del_cost = a_last_row[batch_idx * (n_b_max_len + 1) + b_len[batch_idx]] + 1;\n if(min_cost > last_del_cost) min_cost = last_del_cost;\n result[batch_idx * n_labels + successor_idx] = min_cost;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n ', '003_expand': '\n DEF_KERNEL\n void expand_kernel(\n int n_batch, int n_b_max_len, int n_labels,\n const int32_t* b,\n const int32_t* b_len,\n const int32_t* a_last_row,\n const int32_t* successors,\n int32_t* result\n ) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch * n_labels * n_b_max_len) {\n int batch_idx = idx / n_b_max_len / n_labels;\n int successor_idx = (idx / n_b_max_len) % n_labels;\n int t_b = idx % n_b_max_len;\n int successor = successors[successor_idx];\n\n if(t_b < b_len[batch_idx]) {\n // We can ignore insertion/deletion\n // (except initial insertion / last deletion, see init_result_kernel).\n int sub_cost = a_last_row[batch_idx * (n_b_max_len + 1) + t_b];\n if(successor != b[batch_idx * n_b_max_len + t_b])\n ++sub_cost;\n elem_atomic_min(&result[batch_idx * n_labels + successor_idx], sub_cost);\n }\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 5);\n assert(n_outputs == 1);\n Ndarray* a = inputs[0];\n Ndarray* a_len = inputs[1];\n Ndarray* b = inputs[2];\n Ndarray* b_len = inputs[3];\n Ndarray* successors = inputs[4];\n Ndarray* out = *outputs[0];\n assert_cmp(Ndarray_NDIM(a), ==, 2);\n assert_cmp(Ndarray_NDIM(a_len), ==, 1);\n assert_cmp(Ndarray_NDIM(b), ==, 2);\n assert_cmp(Ndarray_NDIM(b_len), ==, 1);\n assert_cmp(Ndarray_NDIM(successors), ==, 1);\n assert_cmp(Ndarray_NDIM(out), ==, 2);\n int n_batch = Ndarray_DIMS(out)[0];\n int n_labels = Ndarray_DIMS(successors)[0];\n assert_cmp(Ndarray_DIMS(out)[1], ==, n_labels);\n assert_cmp(Ndarray_DIMS(a)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(a_len)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b_len)[0], ==, n_batch);\n int n_a_max_len = Ndarray_DIMS(a)[1];\n int n_b_max_len = Ndarray_DIMS(b)[1];\n Ndarray_memset(Ndarray_DEV_DATA_int32(out), 255, n_batch * n_labels * sizeof(int32_t));\n\n // Working buffer.\n int max_num_entries = std::min(n_a_max_len + 1, n_b_max_len + 1);\n int32_t* buffer = (int32_t*) device_malloc(3 * n_batch * max_num_entries * sizeof(int32_t));\n int32_t* last1_dist = buffer;\n int32_t* last2_dist = buffer + n_batch * max_num_entries;\n int32_t* cur_dist = buffer + 2 * n_batch * max_num_entries;\n int32_t* a_last_row = (int32_t*) device_malloc(n_batch * (n_b_max_len + 1) * sizeof(int32_t));\n\n int num_diag = n_a_max_len + n_b_max_len + 1;\n for(int diag_idx = 0; diag_idx < num_diag; ++diag_idx) {\n start_dev_kernel(next_step_kernel, (\n n_batch, n_a_max_len, n_b_max_len,\n diag_idx,\n Ndarray_DEV_DATA_int32(a), Ndarray_DEV_DATA_int32(b),\n Ndarray_DEV_DATA_int32(a_len), Ndarray_DEV_DATA_int32(b_len),\n last1_dist, last2_dist, cur_dist, a_last_row\n ));\n // Rotate. last1_dist not needed anymore.\n int32_t* tmp = last1_dist;\n last1_dist = last2_dist;\n last2_dist = cur_dist;\n cur_dist = tmp;\n }\n HANDLE_LAST_ERROR();\n\n start_dev_kernel(init_result_kernel, (\n n_batch, n_b_max_len, n_labels,\n Ndarray_DEV_DATA_int32(a_len), Ndarray_DEV_DATA_int32(b_len),\n a_last_row,\n Ndarray_DEV_DATA_int32(out)\n ));\n HANDLE_LAST_ERROR();\n\n start_dev_kernel(expand_kernel, (\n n_batch, n_b_max_len, n_labels,\n Ndarray_DEV_DATA_int32(b),\n Ndarray_DEV_DATA_int32(b_len),\n a_last_row,\n Ndarray_DEV_DATA_int32(successors),\n Ndarray_DEV_DATA_int32(out)\n ));\n HANDLE_LAST_ERROR();\n\n device_free(buffer);\n device_free(a_last_row);\n '
c_bw_code = None
|
class NextEditDistanceRowOp(NativeOpGenBase):
'\n This does a single step in calculating the edit distance table, going over the symbols in ``a``.\n Note that when you have the full sequence ``a`` in advance, :class:`EditDistanceOp` should be faster.\n However, this iterative op is useful when ``a`` is constructed step by step.\n\n inputs:\n :param last_row: 2d (batch,b_time + 1), int32. last edit distances\n :param a: symbols. 1d (batch,), int32. current.\n :param a_n: (batch,), int32. current position\n :param a_ended: 1d (batch,), int32 (casted from bool, because int32 easier to handle)\n :param b: symbols. 2d (batch,b_time), int32\n :param b_len: 1d (batch,), int32\n outputs:\n :param output: 2d (batch,b_time + 1), int32, next (unnormalized) edit distance row\n '
in_info = ({'name': 'last_row', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a_n', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a_ended', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'})
out_info = ({'name': 'output', 'ndim': 2, 'shape': ((0, 0), (0, 1)), 'dtype': 'int32', 'need_contiguous': True},)
c_extra_support_code = {'001_next_row': '\n DEF_KERNEL\n void next_row_kernel(\n int n_batch, int n_b_max_len,\n const int32_t* last_row,\n const int32_t* a, const int32_t* a_n, const int32_t* a_ended,\n const int32_t* b, const int32_t* b_len,\n int32_t* next_row\n ) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch) {\n int batch_idx = idx;\n\n int last_dist;\n if(!a_ended[batch_idx]) {\n last_dist = a_n[batch_idx] + 1; // Initial deletion error.\n next_row[batch_idx * (n_b_max_len + 1)] = last_dist;\n for(int t_b = 1; t_b <= b_len[batch_idx]; ++t_b) {\n int ins_error = last_row[batch_idx * (n_b_max_len + 1) + t_b] + 1;\n int del_error = last_dist + 1;\n int sub_error = last_row[batch_idx * (n_b_max_len + 1) + t_b - 1];\n if(a[batch_idx] != b[batch_idx * n_b_max_len + t_b - 1])\n ++sub_error;\n last_dist = ins_error;\n if(last_dist > del_error) last_dist = del_error;\n if(last_dist > sub_error) last_dist = sub_error;\n next_row[batch_idx * (n_b_max_len + 1) + t_b] = last_dist;\n }\n }\n else { // a ended\n // Just copy over.\n for(int t_b = 0; t_b <= b_len[batch_idx]; ++t_b) {\n last_dist = last_row[batch_idx * (n_b_max_len + 1) + t_b];\n next_row[batch_idx * (n_b_max_len + 1) + t_b] = last_dist;\n }\n }\n // Repeat last entry.\n for(int t_b = b_len[batch_idx] + 1; t_b < n_b_max_len + 1; ++t_b)\n next_row[batch_idx * (n_b_max_len + 1) + t_b] = last_dist;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 6);\n assert(n_outputs == 1);\n Ndarray* last_row = inputs[0];\n Ndarray* a = inputs[1];\n Ndarray* a_n = inputs[2];\n Ndarray* a_ended = inputs[3];\n Ndarray* b = inputs[4];\n Ndarray* b_len = inputs[5];\n Ndarray* out = *outputs[0];\n assert_cmp(Ndarray_NDIM(last_row), ==, 2);\n assert_cmp(Ndarray_NDIM(a), ==, 1);\n assert_cmp(Ndarray_NDIM(a_n), ==, 1);\n assert_cmp(Ndarray_NDIM(a_ended), ==, 1);\n assert_cmp(Ndarray_NDIM(b), ==, 2);\n assert_cmp(Ndarray_NDIM(b_len), ==, 1);\n assert_cmp(Ndarray_NDIM(out), ==, 2);\n int n_batch = Ndarray_DIMS(out)[0];\n int n_b_max_len = Ndarray_DIMS(b)[1];\n assert_cmp(Ndarray_DIMS(out)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(out)[1], ==, n_b_max_len + 1);\n assert_cmp(Ndarray_DIMS(last_row)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(last_row)[1], ==, n_b_max_len + 1);\n assert_cmp(Ndarray_DIMS(a)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(a_n)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(a_ended)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b)[1], ==, n_b_max_len);\n assert_cmp(Ndarray_DIMS(b_len)[0], ==, n_batch);\n\n start_dev_kernel(next_row_kernel, (\n n_batch, n_b_max_len,\n Ndarray_DEV_DATA_int32(last_row),\n Ndarray_DEV_DATA_int32(a), Ndarray_DEV_DATA_int32(a_n), Ndarray_DEV_DATA_int32(a_ended),\n Ndarray_DEV_DATA_int32(b), Ndarray_DEV_DATA_int32(b_len),\n Ndarray_DEV_DATA_int32(out)\n ));\n HANDLE_LAST_ERROR();\n '
c_bw_code = None
|
class NextEditDistanceReduceOp(NativeOpGenBase):
'\n Code derived from :class:`NextEditDistanceRowOp`.\n\n inputs:\n :param last_row: 2d (batch,b_time + 1), int32. last edit distances\n :param a: symbols. 2d (batch|1,n_labels), int32. current.\n :param a_n: 1d (batch,), int32. current position\n :param a_ended: 1d (batch,), int32 (casted from bool, because int32 easier to handle)\n :param b: symbols. 2d (batch,b_time), int32\n :param b_len: 1d (batch,), int32\n :param optimal_completion: scalar, int32 (casted from bool). True -> reduce_min over row; False -> last of row\n :param a_blank_idx: scalar, int32. use -1 to not use\n outputs:\n :param output: 2d (batch,n_labels), int32, next (unnormalized) (maybe optional) edit distance\n '
in_info = ({'name': 'last_row', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a_n', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a_ended', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'optimal_completion', 'ndim': 0, 'shape': (), 'dtype': 'int32', 'gradient': 'disconnected', 'host_memory': True}, {'name': 'a_blank_idx', 'ndim': 0, 'shape': (), 'dtype': 'int32', 'gradient': 'disconnected', 'host_memory': True})
out_info = ({'name': 'output', 'ndim': 2, 'shape': ((0, 0), (1, 1)), 'dtype': 'int32', 'need_contiguous': True},)
c_extra_support_code = {'001_calc_result': '\n DEF_KERNEL\n void calc_result_kernel(\n int n_batch, int n_b_max_len, int n_labels,\n const int32_t* last_row,\n const int32_t* a, const int32_t* a_n, const int32_t* a_ended,\n const int32_t* b, const int32_t* b_len,\n int32_t* result,\n bool optimal_completion,\n bool a_broadcast_batch,\n int32_t a_blank_idx\n ) {\n int idx = threadIdx.x + blockDim.x * blockIdx.x;\n while(idx < n_batch * n_labels) {\n int batch_idx = idx / n_labels;\n int label_idx = idx % n_labels;\n int a_label = a[(a_broadcast_batch ? 0 : batch_idx) * n_labels + label_idx];\n\n int total_min_error;\n int last_dist;\n if(!a_ended[batch_idx] && a_label != a_blank_idx) {\n last_dist = a_n[batch_idx] + 1; // Initial deletion error.\n total_min_error = last_dist;\n for(int t_b = 1; t_b <= b_len[batch_idx]; ++t_b) {\n int ins_error = last_row[batch_idx * (n_b_max_len + 1) + t_b] + 1;\n int del_error = last_dist + 1;\n int sub_error = last_row[batch_idx * (n_b_max_len + 1) + t_b - 1];\n if(a_label != b[batch_idx * n_b_max_len + t_b - 1])\n ++sub_error;\n int min_error = ins_error;\n if(min_error > del_error) min_error = del_error;\n if(min_error > sub_error) min_error = sub_error;\n last_dist = min_error;\n if(total_min_error > last_dist) total_min_error = last_dist;\n }\n }\n else { // a ended or blank\n // Just copy over.\n total_min_error = last_row[batch_idx * (n_b_max_len + 1)];\n for(int t_b = 0; t_b <= b_len[batch_idx]; ++t_b) {\n last_dist = last_row[batch_idx * (n_b_max_len + 1) + t_b];\n if(total_min_error > last_dist) total_min_error = last_dist;\n }\n }\n\n result[batch_idx * n_labels + label_idx] = optimal_completion ? total_min_error : last_dist;\n\n idx += gridDim.x * blockDim.x;\n }\n }\n '}
c_fw_code = '\n assert(n_inputs == 8);\n assert(n_outputs == 1);\n Ndarray* last_row = inputs[0];\n Ndarray* a = inputs[1];\n Ndarray* a_n = inputs[2];\n Ndarray* a_ended = inputs[3];\n Ndarray* b = inputs[4];\n Ndarray* b_len = inputs[5];\n bool optimal_completion = (bool) Ndarray_DEV_DATA_int32_scalar(inputs[6]);\n int32_t a_blank_idx = Ndarray_DEV_DATA_int32_scalar(inputs[7]);\n Ndarray* out = *outputs[0];\n assert_cmp(Ndarray_NDIM(last_row), ==, 2);\n assert_cmp(Ndarray_NDIM(a), ==, 2);\n assert_cmp(Ndarray_NDIM(a_n), ==, 1);\n assert_cmp(Ndarray_NDIM(a_ended), ==, 1);\n assert_cmp(Ndarray_NDIM(b), ==, 2);\n assert_cmp(Ndarray_NDIM(b_len), ==, 1);\n assert_cmp(Ndarray_NDIM(out), ==, 2);\n int n_batch = Ndarray_DIMS(out)[0];\n int n_labels = Ndarray_DIMS(out)[1];\n int n_b_max_len = Ndarray_DIMS(b)[1];\n assert_cmp(Ndarray_DIMS(out)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(out)[1], ==, n_labels);\n assert_cmp(Ndarray_DIMS(last_row)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(last_row)[1], ==, n_b_max_len + 1);\n bool a_broadcast_batch = Ndarray_DIMS(a)[0] == 1;\n if(!a_broadcast_batch)\n assert_cmp(Ndarray_DIMS(a)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(a)[1], ==, n_labels);\n assert_cmp(Ndarray_DIMS(a_n)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(a_ended)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b)[0], ==, n_batch);\n assert_cmp(Ndarray_DIMS(b)[1], ==, n_b_max_len);\n assert_cmp(Ndarray_DIMS(b_len)[0], ==, n_batch);\n\n start_dev_kernel(calc_result_kernel, (\n n_batch, n_b_max_len, n_labels,\n Ndarray_DEV_DATA_int32(last_row),\n Ndarray_DEV_DATA_int32(a), Ndarray_DEV_DATA_int32(a_n), Ndarray_DEV_DATA_int32(a_ended),\n Ndarray_DEV_DATA_int32(b), Ndarray_DEV_DATA_int32(b_len),\n Ndarray_DEV_DATA_int32(out),\n optimal_completion,\n a_broadcast_batch, a_blank_idx\n ));\n HANDLE_LAST_ERROR();\n '
c_bw_code = None
|
def sparse_splice_offset_numpy(s0, idx):
'\n Like sparse_slice_offset().\n '
mask = (s0 < idx)
return numpy.sum(mask)
|
class WrapEpochValue():
'\n Use this wrapper if you want to define some value in your network\n which depends on the pretrain epoch.\n This is going to be part in your network description dict.\n '
def __init__(self, func):
"\n :param ((epoch: int) -> object) func: function which should accept one kwd-arg 'epoch'\n "
self.func = func
def get_value(self, epoch):
'\n :param int epoch:\n :return: anything, whatever self.func returns\n :rtype: object\n '
return self.func(epoch=epoch)
|
def find_pretrain_wrap_values(net_json):
'\n See also :func:`Pretrain._resolve_wrapped_values`.\n Recursively goes through dicts, tuples and lists.\n This is a simple check to see if this is needed,\n i.e. if there are any :class:`WrapEpochValue` used.\n\n :param dict[str] net_json: network dict\n :return: whether there is some :class:`WrapEpochValue` in it\n :rtype: bool\n '
assert isinstance(net_json, dict)
def _check(d):
if isinstance(d, WrapEpochValue):
return True
if isinstance(d, dict):
for (k, v) in sorted(d.items()):
if _check(v):
return True
if isinstance(d, (tuple, list)):
for v in d:
if _check(v):
return True
return False
return _check(net_json)
|
class Pretrain():
'\n Start with 1 hidden layers up to N hidden layers -> N pretrain steps -> N epochs (with repetitions == 1).\n The first hidden layer is the input layer.\n This works for generic network constructions. See _construct_epoch().\n '
def __init__(self, original_network_json, network_init_args=None, copy_param_mode=None, copy_output_layer=None, greedy=None, repetitions=None, construction_algo='from_output', output_layers=('output',), input_layers=('data',)):
'\n :type original_network_json: dict[str]\n :param dict[str]|None network_init_args: additional args we use for LayerNetwork.from_json().\n must have n_in, n_out. (for Theano only, thus optional now)\n :param str copy_param_mode:\n :param bool|str copy_output_layer: whether to copy the output layer params from last epoch or reinit\n :param bool greedy: if True, only train output+last layer, otherwise train all\n :param None | int | list[int] | dict repetitions: how often to repeat certain pretrain steps.\n default is one epoch.\n It can also be a dict, with keys like \'default\' and \'final\'. See code below.\n :param str|callable construction_algo: e.g. "from_output"\n :param list[str]|tuple[str] output_layers: used for construction\n :param list[str]|tuple[str] input_layers: used for construction\n '
assert (copy_param_mode in [None, 'ifpossible', 'subset', 'reset'])
if (copy_output_layer is None):
copy_output_layer = copy_param_mode
if (copy_output_layer is None):
copy_output_layer = 'ifpossible'
if (copy_output_layer == 'reset'):
copy_output_layer = False
if copy_output_layer:
assert ((copy_output_layer is True) or (copy_output_layer in ['ifpossible', 'subset']))
self.copy_param_mode = copy_param_mode
self.copy_output_layer = copy_output_layer
if (greedy is None):
greedy = False
self.greedy = greedy
self.network_init_args = network_init_args
self._original_network_json = original_network_json
self._construction_algo = construction_algo
self._input_layers = input_layers
self._output_layers = output_layers
if (construction_algo == 'from_input'):
self._construct_epochs_from_input()
elif (construction_algo == 'from_output'):
self._construct_epochs_from_output()
elif callable(construction_algo):
self._construct_epochs_custom(construction_algo)
elif (construction_algo == 'no_network_modifications'):
self._construct_epochs_no_network_modifications()
else:
raise Exception(('invalid construction_algo %r' % construction_algo))
if (not callable(construction_algo)):
self._remove_non_trainable_added_only()
if (not repetitions):
repetitions = 1
if isinstance(repetitions, dict):
rep_dict = repetitions.copy()
default_rep = rep_dict.pop('default', 1)
repetitions = ([default_rep] * len(self._step_net_jsons))
for (k, v) in sorted(rep_dict.items()):
if (k == 'final'):
k = (len(self._step_net_jsons) - 1)
repetitions[k] = v
else:
if (not isinstance(repetitions, list)):
assert isinstance(repetitions, (int, long))
repetitions = [repetitions]
assert isinstance(repetitions, list)
assert (0 < len(repetitions) <= len(self._step_net_jsons))
if (len(repetitions) < len(self._step_net_jsons)):
repetitions = (repetitions + ([repetitions[(- 1)]] * (len(self._step_net_jsons) - len(repetitions))))
assert (len(repetitions) == len(self._step_net_jsons))
for (i, net_dict) in enumerate(self._step_net_jsons):
if ('#repetition' in net_dict):
repetitions[i] = net_dict.pop('#repetition')
self.repetitions = repetitions
self._make_repetitions()
self._resolve_wrapped_values()
def _remove_non_trainable_added_only(self):
'\n If from one epoch to the next, only non-trainable layers were added, remove this pretrain epoch.\n Output layers are ignored.\n Also handles first epoch.\n '
assert self._step_net_jsons
old_net_jsons = self._step_net_jsons
self._step_net_jsons = []
for i in range((- 1), (len(old_net_jsons) - 2)):
if (i == (- 1)):
(net1, net2) = ({}, old_net_jsons[0])
else:
(net1, net2) = old_net_jsons[i:(i + 2)]
assert isinstance(net1, dict)
assert isinstance(net2, dict)
for layer_name in sorted(net1.keys()):
assert (layer_name in net2)
have_new_trainable = False
for layer_name in sorted(net2.keys()):
if self._is_layer_output(net2, layer_name):
continue
if (layer_name in net1):
continue
if net2[layer_name].get('trainable', True):
have_new_trainable = True
break
if have_new_trainable:
self._step_net_jsons.append(net2)
self._step_net_jsons.append(old_net_jsons[(- 1)])
def _make_repetitions(self):
assert (len(self.repetitions) == len(self._step_net_jsons))
from copy import deepcopy
old_net_jsons = self._step_net_jsons
self._step_net_jsons = []
for (n_rep, net_json) in zip(self.repetitions, old_net_jsons):
for _ in range(n_rep):
self._step_net_jsons.append(deepcopy(net_json))
def _resolve_wrapped_values(self):
'\n Resolves any :class:`WrapEpochValue` in the net dicts.\n Recursively goes through dicts, tuples and lists.\n See also :func:`find_pretrain_wrap_values`.\n '
def _check_dict(d, epoch, depth=0):
for (k, v) in sorted(d.items()):
if (depth <= 1):
assert isinstance(k, (str, unicode))
d[k] = _check(v, epoch=epoch, depth=(depth + 1))
def _check(v, epoch, depth):
'\n :param WrapEpochValue|tuple|list|dict|T v:\n :param int epoch:\n :param int depth:\n :rtype: T\n '
if isinstance(v, WrapEpochValue):
return v.get_value(epoch=epoch)
if isinstance(v, (tuple, list)):
if (not any([isinstance(x, WrapEpochValue) for x in v])):
return v
return type(v)([_check(x, epoch=epoch, depth=(depth + 1)) for x in v])
if isinstance(v, dict):
_check_dict(v, epoch=epoch, depth=depth)
return v
return v
for (i, net_json) in enumerate(self._step_net_jsons):
epoch = (i + 1)
_check_dict(net_json, epoch=epoch)
def _find_layer_descendants(self, json, sources):
ls = []
for (other_layer_name, other_layer) in sorted(json.items()):
if (other_layer_name in ls):
continue
other_sources = other_layer.get('from', ['data'])
for src in sources:
if (src in other_sources):
ls.append(other_layer_name)
break
return ls
def _is_layer_output(self, json, layer_name):
if (layer_name in self._output_layers):
return True
if (json[layer_name]['class'] == 'softmax'):
return True
if ('target' in json[layer_name]):
return True
return False
def _find_layer_outputs(self, json, sources):
outs = []
visited = set()
while sources:
visited.update(sources)
for src in sources:
if (src in outs):
continue
if self._is_layer_output(json, src):
outs.append(src)
sources = self._find_layer_descendants(self._original_network_json, sources)
for v in visited:
if (v in sources):
sources.remove(v)
return outs
def _find_existing_inputs(self, json, layer_name, _collected=None, _visited=None):
if (_collected is None):
_collected = []
if (_visited is None):
_visited = {layer_name: None}
sources = self._original_network_json[layer_name].get('from', ['data'])
for src in sources:
if ((src in json) or (src == 'data')):
if (src not in _collected):
_collected.append(src)
elif (src not in _visited):
_visited[src] = layer_name
self._find_existing_inputs(json=json, layer_name=src, _collected=_collected, _visited=_visited)
return _collected
def _construct_next_epoch_from_input(self, num_steps):
'\n First find all layers which have data as input.\n Then expand from those layers.\n '
from copy import deepcopy
new_net = {}
sources = ['data']
needed = set()
def update_needed(layer_name):
'\n :param str layer_name:\n '
needed.update(set(new_net[layer_name].get('from', ['data'])).difference((list(new_net.keys()) + ['data'])))
while True:
descendants = self._find_layer_descendants(self._original_network_json, sources)
added_something = False
for layer_name in descendants:
if (layer_name in new_net):
continue
if self._original_network_json[layer_name].get('trainable', True):
continue
if (layer_name in needed):
needed.remove(layer_name)
added_something = True
sources.append(layer_name)
new_net[layer_name] = deepcopy(self._original_network_json[layer_name])
update_needed(layer_name)
if (not added_something):
break
for _ in range(num_steps):
descendants = self._find_layer_descendants(self._original_network_json, sources)
sources = []
for layer_name in descendants:
if (layer_name in new_net):
continue
if (layer_name in needed):
needed.remove(layer_name)
sources.append(layer_name)
new_net[layer_name] = deepcopy(self._original_network_json[layer_name])
update_needed(layer_name)
if (not sources):
return False
for layer_name in sorted(self._original_network_json.keys()):
if (layer_name in new_net):
continue
if (not self._is_layer_output(self._original_network_json, layer_name)):
continue
if (layer_name in needed):
needed.remove(layer_name)
new_net[layer_name] = deepcopy(self._original_network_json[layer_name])
update_needed(layer_name)
if (not needed):
return False
for layer_name in sorted(new_net.keys()):
sources = new_net[layer_name].get('from', ['data'])
sources2 = self._find_existing_inputs(new_net, layer_name)
if (sources != sources2):
if ('data' in sources2):
sources2.remove('data')
new_net[layer_name]['from'] = sources2
self._step_net_jsons.append(new_net)
return True
def _construct_epochs_from_input(self):
self._step_net_jsons = []
num_steps = 1
while self._construct_next_epoch_from_input(num_steps):
num_steps += 1
self._step_net_jsons.append(self._original_network_json)
def _construct_new_epoch_from_output(self):
'\n We start from the most simple network which we have constructed so far,\n and try to construct an even simpler network.\n '
from copy import deepcopy
new_json = deepcopy(self._step_net_jsons[0])
while True:
for out_layer_name in self._output_layers:
assert (out_layer_name in new_json)
new_sources = set()
deleted_sources = set()
for out_layer_name in self._output_layers:
for source in new_json[out_layer_name]['from']:
if (source in self._input_layers):
new_sources.add(source)
else:
assert (source in new_json), ('error %r, n: %i, last: %s' % (source, len(self._step_net_jsons), self._step_net_jsons[0]))
new_sources.update(new_json[source].get('from', ['data']))
del new_json[source]
deleted_sources.add(source)
if (list(sorted(new_sources)) == list(sorted(set(sum([new_json[name]['from'] for name in self._output_layers], []))))):
return False
for out_layer_name in self._output_layers:
new_json[out_layer_name]['from'] = list(sorted(new_sources))
if new_sources.intersection(set(self._input_layers)):
continue
if all(((not self._original_network_json[del_source].get('trainable', True)) for del_source in deleted_sources)):
continue
self._step_net_jsons = ([new_json] + self._step_net_jsons)
return True
def _construct_epochs_from_output(self):
self._step_net_jsons = [self._original_network_json]
while self._construct_new_epoch_from_output():
pass
def _construct_epochs_custom(self, func):
'\n :param ((idx: int, net_dict: dict[str,dict[str]]) -> dict[str,dict[str]]|None) func:\n ``func`` can work inplace on net_dict and should then return it.\n If ``None`` is returned, it will stop with the construction.\n The original network will always be added at the end.\n '
from copy import deepcopy
self._step_net_jsons = []
idx = 0
while True:
d = func(idx=idx, net_dict=deepcopy(self._original_network_json))
if (not d):
break
self._step_net_jsons.append(d)
idx += 1
self._step_net_jsons.append(self._original_network_json)
def _construct_epochs_no_network_modifications(self):
self._step_net_jsons = [self._original_network_json]
def __str__(self):
parts = [('Pretrain construction algo %r,' % self._construction_algo), ('number of pretrain epochs: %i' % self.get_train_num_epochs())]
rep_set = set(self.repetitions)
if (rep_set != {1}):
if (len(rep_set) == 1):
parts.append(('(repetitions: %i)' % self.repetitions[0]))
else:
parts.append(('(repetitions: %r)' % self.repetitions))
return ' '.join(parts)
def get_train_num_epochs(self):
'\n :rtype: int\n '
return len(self._step_net_jsons)
def get_final_network_json(self):
'\n :rtype: dict[str,dict[str]]\n '
return self._step_net_jsons[(- 1)]
def get_network_json_for_epoch(self, epoch):
'\n :param int epoch: starting at 1\n :rtype: dict[str]\n '
assert (epoch >= 1)
if (epoch > len(self._step_net_jsons)):
epoch = len(self._step_net_jsons)
return self._step_net_jsons[(epoch - 1)]
def get_train_param_args_for_epoch(self, epoch):
'\n :type epoch: int\n :returns the kwargs for LayerNetwork.set_train_params, i.e. which params to train.\n :rtype: dict[str]\n '
if (not self.greedy):
return {}
if (epoch == 1):
return {}
raise NotImplementedError('This feature was removed with dropped Theano support')
|
def pretrain_from_config(config):
'\n :type config: returnn.config.Config\n :rtype: Pretrain | None\n '
from returnn.config import network_json_from_config
pretrain_type = config.bool_or_other('pretrain', None)
if ((pretrain_type == 'default') or (isinstance(pretrain_type, dict) and pretrain_type) or (pretrain_type is True)):
network_init_args = None
original_network_json = network_json_from_config(config)
opts = config.get_of_type('pretrain', dict, {})
if config.has('pretrain_copy_output_layer'):
opts.setdefault('copy_output_layer', config.bool_or_other('pretrain_copy_output_layer', 'ifpossible'))
if config.has('pretrain_greedy'):
opts.setdefault('greedy', config.bool('pretrain_greedy', None))
if config.has('pretrain_repetitions'):
if config.is_typed('pretrain_repetitions'):
opts.setdefault('repetitions', config.typed_value('pretrain_repetitions'))
else:
opts.setdefault('repetitions', config.int_list('pretrain_repetitions', None))
if config.has('pretrain_construction_algo'):
opts.setdefault('construction_algo', config.value('pretrain_construction_algo', None))
return Pretrain(original_network_json=original_network_json, network_init_args=network_init_args, **opts)
elif (not pretrain_type):
return None
else:
raise Exception(('unknown pretrain type: %s' % pretrain_type))
|
def demo():
'\n Will print out the different network topologies of the specified pretraining scheme.\n '
import returnn.util.better_exchook
returnn.util.better_exchook.install()
import returnn.__main__ as rnn
import argparse
from returnn.util.basic import obj_diff_str
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('config')
arg_parser.add_argument('--diff', action='store_true', help='show diff only')
arg_parser.add_argument('other_returnn_args', nargs=argparse.REMAINDER, help='config updates or so')
args = arg_parser.parse_args()
rnn.init_config(config_filename=args.config, command_line_options=args.other_returnn_args, extra_updates={'log': []})
rnn.config._hack_value_reading_debug()
rnn.init_log()
if (not rnn.config.value('pretrain', '')):
print("config option 'pretrain' not set, will set it for this demo to 'default'")
rnn.config.set('pretrain', 'default')
pretrain = pretrain_from_config(rnn.config)
print(('pretrain: %s' % pretrain))
num_pretrain_epochs = pretrain.get_train_num_epochs()
last_net_json = None
from pprint import pprint
for epoch in range(1, (1 + num_pretrain_epochs)):
print(('epoch %i (of %i) network json:' % (epoch, num_pretrain_epochs)))
net_json = pretrain.get_network_json_for_epoch(epoch)
if args.diff:
if (last_net_json is not None):
print(obj_diff_str(last_net_json, net_json))
else:
print('(initial)')
else:
pprint(net_json)
last_net_json = net_json
print('done.')
|
def print(*args, **kwargs):
'\n ``print`` replacement.\n\n :param args:\n :param kwargs:\n '
if (not Quiet):
_orig_print(*args, **kwargs)
|
def init(name, reference, config, sprint_unit=None, version_number=None, callback=None, **kwargs):
'\n This will be called by Sprint PythonControl.\n But we also call it ourselves e.g. in getSegmentList() and SprintNnPythonLayer.\n In this specific module, we expect that there is "c2p_fd" and "p2c_fd" in the config string\n to communicate with the parent process, which is usually handled by SprintErrorSignals.\n\n :param str name: this specifies the caller. e.g. "Sprint.PythonControl"\n :param reference: this is any object to identify the specific instance of the caller, if there are multiple.\n :param str config: this will be passed over from Sprint. you can configure that via --*.pymod-config.\n :param str sprint_unit: if this is called by Sprint PythonControl, this will specify which specific part\n of Sprint is using this PythonControl, because there can be multiple parts.\n E.g. there is "FeedForwardTrainer", "SegmentwiseNnTrainer" and "NnTrainer.pythonControl".\n :param int|None version_number: if this is called by Sprint PythonControl, this will set the version number.\n only newer Sprint versions will set this.\n :param function|None callback: if this is called by Sprint PythonControl, this might provide a callback.\n Only newer Sprint versions will provide this to init().\n This callback can be used for many different actions.\n It\'s supposed to be called like callback(action, **other_args), where action is a string.\n See Sprint PythonControl code about the possible actions and arguments.\n :param kwargs: all remaining args are specific for each caller.\n '
config = config.split(',')
config = {key: value for (key, value) in [s.split(':', 1) for s in config if s]}
global Quiet
if to_bool(config.get('quiet', False)):
Quiet = True
print(('RETURNN SprintControl[pid %i] init: name=%r, sprint_unit=%r, version_number=%r, callback=%r, ref=%r, config=%r, kwargs=%r' % (os.getpid(), name, sprint_unit, version_number, callback, reference, config, kwargs)))
InitTypes.add(name)
global Verbose
if to_bool(config.get('verbose', False)):
Verbose = True
if (to_bool(config.get('EnableAutoNumpySharedMemPickling', False)) and (not task_system.SharedMemNumpyConfig['enabled'])):
task_system.SharedMemNumpyConfig['enabled'] = True
print(('RETURNN SprintControl[pid %i] EnableAutoNumpySharedMemPickling = True' % (os.getpid(),)))
return PythonControl.create(c2p_fd=int(config['c2p_fd']), p2c_fd=int(config['p2c_fd']), name=name, reference=reference, config=config, sprint_unit=sprint_unit, version_number=version_number, min_version_number=int(config['minPythonControlVersion']), callback=callback, **kwargs)
|
def getSegmentList(corpusName, segmentList, config, **kwargs):
'\n Sprint will directly call this function.\n '
print(('RETURNN SprintControl[pid %i] getSegmentList: corpus=%r, config=%r' % (os.getpid(), corpusName, config)))
init(name='RETURNN.PythonSegmentOrder', reference=corpusName, config=config)
PythonControl.instance.check_control_loop_running()
for segment_name in PythonControl.instance.segment_list_iterator():
if isinstance(segment_name, bytes):
(yield segment_name.decode('utf-8'))
else:
(yield segment_name)
|
class SprintNnPythonLayer():
'\n Sprint will directly call this class, i.e. create an instance of it.\n It implements the Sprint NN PythonLayer interface.\n '
def __init__(self, config, **kwargs):
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.__init__: %r, %r' % (os.getpid(), config, kwargs)))
init(name='RETURNN.SprintNnPythonLayer', reference=self, config=config)
self.input_size = None
self.output_size = None
def finalize(self):
'\n Called by Sprint at exit.\n '
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.finalize' % (os.getpid(),)))
def setInputDimension(self, stream, size):
'\n :param int stream:\n :param int size:\n '
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.setInputDimension: stream=%r, size=%r' % (os.getpid(), stream, size)))
assert (stream == 0), 'we only support a single input stream (for now)'
self.input_size = size
def setOutputDimension(self, size):
'\n :param int size:\n '
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.setOutputDimension: %r' % (os.getpid(), size)))
self.output_size = size
def initializeNetworkParameters(self):
'\n Called by Sprint for param init.\n '
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.initializeNetworkParameters' % (os.getpid(),)))
def loadNetworkParameters(self, filename):
'\n :param str filename:\n '
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.loadNetworkParameters: %r' % (os.getpid(), filename)))
def saveNetworkParameters(self, filename):
'\n :param str filename:\n '
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.saveNetworkParameters: %r' % (os.getpid(), filename)))
def isTrainable(self):
'\n :rtype: bool\n '
return True
def getNumberOfFreeParameters(self):
'\n :rtype: int\n '
return 0
def forward(self, input):
'\n :param input: tuple of input matrices of format (input_size,time). we ignore them.\n :return: single output matrix of format (output_size,time)\n '
if Verbose:
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.forward: %s' % (os.getpid(), (input[0].shape if input else repr(input)[:10]))))
assert (len(input) == 1)
assert (input[0].ndim == 2)
assert (input[0].shape[0] == self.input_size)
seg_len = input[0].shape[1]
posteriors = PythonControl.instance.get_current_seg_posteriors(seg_len=seg_len)
if PythonControl.instance.posteriors_in_log_space:
assert PythonControl.instance.sprint_knows_about_log_space_probs
assert (posteriors.shape == (seg_len, self.output_size))
return posteriors.T
def backpropagate(self, errorSignalIn):
'\n :param numpy.ndarray errorSignalIn: matrix of format (output_size,time)\n :return: tuple of matrices of format (input_size,time)\n :rtype: numpy.ndarray\n '
if Verbose:
print(('RETURNN SprintControl[pid %i] SprintNnPythonLayer.backpropagate: %r' % (os.getpid(), errorSignalIn.shape)))
assert (errorSignalIn.ndim == 2)
assert (errorSignalIn.shape[0] == self.output_size)
seg_len = errorSignalIn.shape[1]
PythonControl.instance.set_current_seg_error_signal(seg_len=seg_len, error_signal=errorSignalIn.T)
return (numpy.zeros((self.input_size, seg_len), dtype='float32'),)
|
class PythonControl():
'\n This will send data to RETURNN over a pipe.\n We expect that we are child process and the parent process has spawned us,\n\n An instance of this class is also the interface for multiple Sprint interfaces, i.e.:\n * PythonControl (standalone via NnTrainer tool)\n * PythonControl (via SegmentwiseNnTrainer)\n * implicitly PythonSegmentOrder (see code above)\n '
Version = 1
instance = None
@classmethod
def create(cls, **kwargs):
'\n :param kwargs: passed to :class:`PythonControl`\n :rtype: PythonControl\n '
if cls.instance:
cls.instance._additional_init(**kwargs)
return cls.instance
print(('RETURNN SprintControl[pid %i] PythonControl create %r' % (os.getpid(), kwargs)))
return PythonControl(**kwargs)
def __init__(self, c2p_fd, p2c_fd, **kwargs):
'\n :param int c2p_fd: child-to-parent file descriptor\n :param int p2c_fd: parent-to-child file descriptor\n '
print(('RETURNN SprintControl[pid %i] PythonControl init %r' % (os.getpid(), kwargs)))
assert (not self.__class__.instance), 'only one instance expected'
self.__class__.instance = self
self.cond = Condition()
self.pipe_c2p = os.fdopen(c2p_fd, 'wb')
self.pipe_p2c = os.fdopen(p2c_fd, 'rb')
self.sprint_callback = None
self.sprint_version_number = None
self.callback = None
self.loss_and_error_signal_via_sprint_callback = False
self.posteriors_in_log_space = True
self.sprint_knows_about_log_space_probs = True
self.control_loop_started = False
self.control_loop_exited = False
self.control_thread__have_new_seg = False
self.control_thread__have_new_error_signal = False
self.seg_name = None
self.seg_len = None
self.posteriors = None
self.asked_for_posteriors = False
self.notified_for_segment = False
self.error_signal = None
self.loss = None
self._init(**kwargs)
def _additional_init(self, **kwargs):
print(('RETURNN SprintControl[pid %i] PythonControl additional_init %r' % (os.getpid(), kwargs)))
self._init(**kwargs)
def _init(self, name, sprint_unit=None, callback=None, version_number=None, min_version_number=None, **kwargs):
if (name == 'Sprint.PythonControl'):
print(('RETURNN SprintControl[pid %i] init for Sprint.PythonControl %r' % (os.getpid(), kwargs)))
assert min_version_number
assert ((version_number or 0) >= min_version_number), 'need new Sprint'
self.sprint_version_number = version_number
if callback:
self.sprint_callback = callback
def init_processing(self, input_dim=None, output_dim=None, **kwargs):
'\n This is called via Sprint when we use PythonControl to iterate the corpus,\n i.e. we set --*.action=python-control in Sprint in the NN trainer tool.\n We expect that we use the Sprint callback to calculate loss and error signal.\n This is called on the first segment.\n input_dim/output_dim are set iff we extract features/alignments.\n\n :param int|None input_dim:\n :param int|None output_dim:\n '
print(('RETURNN SprintControl[pid %i] init_processing input_dim=%r, output_dim=%r' % (os.getpid(), input_dim, output_dim)))
print(('RETURNN SprintControl[pid %i] loss_and_error_signal_via_sprint_callback enabled' % (os.getpid(),)))
self.loss_and_error_signal_via_sprint_callback = True
assert self.sprint_callback
def process_segment(self, name, orthography, features=None, alignment=None, soft_alignment=None, **kwargs):
'\n This is called via Sprint when we use PythonControl to iterate the corpus.\n\n :param str name: segment name\n :param str orthography: segment orth\n :param numpy.ndarray|None features:\n :param numpy.ndarray|None alignment:\n :param numpy.ndarray|None soft_alignment:\n '
if Verbose:
print(('RETURNN SprintControl[pid %i] process_segment name=%r orth=%r' % (os.getpid(), name, (orthography[:10] + '...'))))
assert self.loss_and_error_signal_via_sprint_callback
assert (self.seg_name == name)
assert (self.posteriors.ndim == 2)
assert (features is None), 'in Sprint, set --*.extract-features=false'
assert (alignment is None), 'in Sprint, set --*.extract-alignment=false'
assert (soft_alignment is None), 'in Sprint, set --*.extract-alignment=false'
(loss, error_signal) = self._get_loss_and_error_signal_via_sprint_callback(seg_name=name, orthography=orthography, posteriors=self.posteriors)
assert (loss is not None)
assert (error_signal is not None)
with self.cond:
self.loss = loss
self.error_signal = error_signal
self.cond.notifyAll()
def _get_loss_and_error_signal_via_sprint_callback(self, seg_name, orthography, posteriors):
'\n :param str seg_name:\n :param str orthography:\n :param numpy.ndarray posteriors:\n :return: (loss, error_signal)\n :rtype: (float, numpy.ndarray)\n '
if self.posteriors_in_log_space:
if self.sprint_knows_about_log_space_probs:
output_error_type = 'error-signal'
else:
posteriors = numpy.exp(posteriors)
output_error_type = 'error-signal-before-softmax'
else:
output_error_type = 'error-signal-before-softmax'
(loss, error_signal) = self.sprint_callback('calculate_criterion', posteriors=posteriors.T, orthography=orthography, output_error_type=output_error_type, segment_name=seg_name)
if (loss is None):
return (self._default_skipped_loss(), self._default_skipped_error_signal(posteriors))
error_signal = error_signal.T
assert (error_signal.shape == posteriors.shape)
return (loss, error_signal)
def _send(self, data):
util.write_pickled_object(self.pipe_c2p, data)
def _read(self):
return util.read_pickled_object(self.pipe_p2c)
def close(self):
'\n Close pipe.\n '
self.pipe_c2p.close()
self.pipe_p2c.close()
def _handle_cmd_exit(self):
self.close()
raise SystemExit
def _handle_cmd_init(self, name, version):
assert (version == self.Version)
return ('SprintControl', self.Version)
def _handle_cmd_get_loss_and_error_signal(self, seg_name, seg_len, posteriors):
'\n :param str seg_name: seg name\n :param int seg_len: the segment length in frames\n :param numpy.ndarray posteriors: 2d (time,label) float array\n\n See SprintErrorSignals.SprintSubprocessInstance.get_loss_and_error_signal().\n '
assert isinstance(seg_len, (int, long, numpy.int32))
assert (seg_len > 0)
assert (posteriors.ndim == 2)
assert (posteriors.shape[0] == seg_len)
if Verbose:
print(('RETURNN SprintControl[pid %i] PythonControl handle_cmd_get_loss_and_error_signal: name=%r, len=%r' % (os.getpid(), seg_name, seg_len)))
with self.cond:
self.control_thread__have_new_seg = True
self.control_thread__have_new_error_signal = False
if isinstance(seg_name, bytes):
self.seg_name = seg_name.decode('utf-8')
else:
self.seg_name = seg_name
self.seg_len = seg_len
self.posteriors = posteriors
self.error_signal = None
self.loss = None
self.asked_for_posteriors = False
self.notified_for_segment = False
self.cond.notifyAll()
(loss, error_signal) = self.callback('get_loss_and_error_signal', seg_name, seg_len, posteriors)
assert (error_signal.shape == posteriors.shape)
with self.cond:
self.control_thread__have_new_error_signal = True
self.posteriors = None
self.cond.notifyAll()
numpy_set_unused(posteriors)
error_signal = error_signal.astype('float32', copy=False)
return (loss, error_signal)
def _handle_cmd_export_allophone_state_fsa_by_segment_name(self, segment_name):
return self.callback('export_allophone_state_fsa_by_segment_name', segment_name)
def _handle_cmd(self, cmd, *args):
'\n :param str cmd:\n :param args:\n :return: some tuple, whatever the func returns\n :rtype: tuple\n '
func = getattr(self, ('_handle_cmd_%s' % cmd))
return func(*args)
def handle_next(self):
'\n Called by self.run_control_loop.\n We catch some message from our parent process, handle it and send back the result.\n '
import sys
args = self._read()
try:
if (not isinstance(args, tuple)):
raise TypeError(('expected tuple but got %r' % args))
if (len(args) < 1):
raise Exception('need multiple args (cmd, ...)')
res = self._handle_cmd(*args)
except Exception as e:
print(('RETURNN SprintControl[pid %i] PythonControl handle_next exception' % (os.getpid(),)))
sys.excepthook(*sys.exc_info())
self._send(('exception', str(e)))
else:
assert isinstance(res, tuple)
self._send((('ok',) + res))
def run_control_loop(self, callback, **kwargs):
'\n Called by Sprint when we are in PythonControl run_control_loop mode.\n Also called by us via self.run_threaded_control_loop().\n '
print(('RETURNN SprintControl[pid %i] PythonControl run_control_loop: %r, %r' % (os.getpid(), callback, kwargs)))
print(('RETURNN SprintControl[pid %i] PythonControl run_control_loop control: %r' % (os.getpid(), callback('version'))))
self.callback = callback
with self.cond:
assert (not self.control_loop_started)
self.control_loop_started = True
self.cond.notifyAll()
try:
while True:
self.handle_next()
finally:
with self.cond:
self.control_loop_exited = True
self.cond.notifyAll()
def exit(self, **kwargs):
'\n Called by Sprint.\n '
print(('RETURNN SprintControl[pid %i] PythonControl exit: %r' % (os.getpid(), kwargs)))
def check_control_loop_running(self):
'\n Called by Sprint.\n '
if self.control_loop_started:
print(('RETURNN SprintControl[pid %i] PythonControl check_control_loop_running: already running' % (os.getpid(),)))
return
self.run_threaded_control_loop()
def run_threaded_control_loop(self):
'\n Called by Sprint.\n '
print(('RETURNN SprintControl[pid %i] PythonControl run_threaded_control_loop' % (os.getpid(),)))
from threading import Thread
def control_loop():
'\n Control loop.\n '
rnn.init_better_exchook()
self.run_control_loop(self.own_threaded_callback)
t = Thread(target=control_loop, name='SprintControl.PythonControl.threaded_control_loop')
t.daemon = True
t.start()
while True:
with self.cond:
if self.control_loop_started:
return
assert t.is_alive()
self.cond.wait(timeout=1)
def own_threaded_callback(self, cmd, *args):
'\n This is used if we run our own control loop via run_threaded_control_loop.\n '
func = getattr(self, ('own_tcb_%s' % cmd))
return func(*args)
def own_tcb_version(self):
'\n :return: version string\n :rtype: str\n '
return '<version>RETURNN.own_threaded_callback</version>'
def own_tcb_get_loss_and_error_signal(self, seg_name, seg_len, posteriors):
'\n :param seg_name:\n :param seg_len:\n :param posteriors:\n :return:\n '
while True:
with self.cond:
if ((self.loss is not None) and (self.error_signal is not None)):
return (self.loss, self.error_signal)
self.cond.wait(timeout=1)
def init_segment(self, segment_name):
'\n Called by Sprint PythonControl in FeedForwardTrainer/SegmentwiseNnTrainer.\n '
if Verbose:
print(('RETURNN SprintControl[pid %i] init_segment %s' % (os.getpid(), segment_name)))
with self.cond:
assert (self.seg_name == segment_name)
self.notified_for_segment = True
self.cond.notifyAll()
def notify_segment_loss(self, segment_name, loss):
'\n Called by Sprint PythonControl in FeedForwardTrainer/SegmentwiseNnTrainer.\n '
if Verbose:
print(('RETURNN SprintControl[pid %i] notify_segment_loss %s %s' % (os.getpid(), segment_name, loss)))
self.set_current_seg_loss(seg_name=segment_name, loss=loss)
def get_current_seg_posteriors(self, seg_len):
'\n :param int seg_len: just for double checking, the length of the current segment\n :return: matrix (time,label)\n '
with self.cond:
assert (self.seg_len == seg_len)
assert (self.posteriors.shape[0] == seg_len)
self.asked_for_posteriors = True
self.cond.notifyAll()
return self.posteriors
def set_current_seg_error_signal(self, seg_len, error_signal):
'\n :param int seg_len: just for double checking, the length of the current segment\n :param error_signal: matrix (time,label)\n '
with self.cond:
assert (self.seg_len == seg_len)
assert (error_signal.ndim == 2)
assert (error_signal.shape[0] == seg_len)
self.error_signal = error_signal
self.cond.notifyAll()
def set_current_seg_loss(self, seg_name, loss):
'\n :param str|None seg_name: just for double checking, the name of the current segment. might be None\n :param float loss: the loss of the current seg\n '
with self.cond:
if seg_name:
assert (self.seg_name == seg_name)
self.loss = loss
self.cond.notifyAll()
def _default_skipped_loss(self):
'\n :rtype: float\n '
return float('inf')
def _default_skipped_error_signal(self, posteriors):
'\n :param numpy.ndarray posteriors:\n :rtype: numpy.ndarray\n '
return numpy.zeros_like(posteriors)
def _skip_segment_loss_and_error(self):
with self.cond:
assert (self.posteriors is not None)
if (self.loss is None):
self.loss = self._default_skipped_loss()
if (self.error_signal is None):
self.error_signal = self._default_skipped_error_signal(self.posteriors)
self.cond.notifyAll()
def _wait_for_control_loop_error_signal(self):
while True:
with self.cond:
if (self.control_thread__have_new_error_signal or self.control_thread__have_new_seg):
break
if self.control_loop_exited:
break
if ((self.loss is None) or (self.error_signal is None)):
break
if Verbose:
print(('RETURNN SprintControl[pid %i] getSegmentList: wait for control loop to handle error signal' % (os.getpid(),)))
self.cond.wait(timeout=1)
def segment_list_iterator(self):
'\n :return: yields segment names\n :rtype: typing.Iterator[str]\n '
with self.cond:
assert self.control_loop_started
while True:
while True:
with self.cond:
if self.control_thread__have_new_seg:
assert self.seg_name
seg_name = self.seg_name
self.control_thread__have_new_seg = False
break
if self.control_loop_exited:
return
self.cond.wait(timeout=1)
if Verbose:
print(('RETURNN SprintControl[pid %i] getSegmentList, yield %r' % (os.getpid(), seg_name)))
(yield seg_name)
self._wait_for_control_loop_error_signal()
with self.cond:
if (not (self.control_thread__have_new_error_signal or self.control_thread__have_new_seg)):
print(('RETURNN SprintControl[pid %i] getSegmentList, no error signal, skip segment: %s' % (os.getpid(), seg_name)))
if Verbose:
import signal
os.kill(os.getpid(), signal.SIGUSR1)
if (not self.notified_for_segment):
print(('RETURNN SprintControl[pid %i] getSegmentList: Do you use PythonControl in the Sprint trainer? Got no segment notification.' % (os.getpid(),)))
if (not self.asked_for_posteriors):
print(('RETURNN SprintControl[pid %i] getSegmentList: Do you use PythonLayer in Sprint? Did not get asked for posteriors.' % (os.getpid(),)))
self._skip_segment_loss_and_error()
self._wait_for_control_loop_error_signal()
|
def getSegmentList(corpusName, segmentList, **kwargs):
'\n Called by Sprint PythonSegmentOrder.\n Set python-segment-order = true in Sprint to use this.\n\n If this is used, this gets called really early.\n If it is used together with the Sprint PythonTrainer,\n it will get called way earlier before the init() below.\n It might also get called multiple times, e.g. if\n Sprint is in interactive mode to calc the seg count.\n This is optional. You can use the SprintInterface\n only for the PythonTrainer.\n\n :type corpusName: str\n :type segmentList: list[str]\n :rtype: list[str]\n :returns segment list. Can also be an iterator.\n '
print(('SprintExternInterface: getSegmentList(%r), num segments: %i' % (corpusName, len(segmentList))))
global segmentOrderList
segmentOrderList = segmentList
return segmentList
|
def exchook(exc_type, exc_obj, exc_tb):
'\n Replacement for sys.excepthook.\n '
if (exc_type is KeyboardInterrupt):
print(('SprintExternInterface[pid %i]: KeyboardInterrupt' % (os.getpid(),)))
sys.exit(1)
better_exchook.better_exchook(exc_type, exc_obj, exc_tb)
|
def init(**kwargs):
'\n Called by Sprint when it initializes the PythonTrainer,\n or also for PythonControl.\n Set trainer = python-trainer in Sprint to enable PythonTrainer for the Sprint nn-trainer tool.\n Note that Sprint will call this, i.e. the trainer init lazily quite late,\n only once it sees the first data.\n\n :param kwargs: all passed to :func:`_init_python_trainer` or :func:`PythonControl.init`\n :rtype: None|PythonControl\n '
sys.excepthook = exchook
if (('name' in kwargs) and (kwargs['name'] == 'Sprint.PythonControl')):
return PythonControl.init(**kwargs)
return _init_python_trainer(**kwargs)
|
def _parse_config_str(config_str):
assert isinstance(config_str, (str, unicode))
config_list = config_str.split(',')
config = {key: value for (key, value) in [s.split(':', 1) for s in config_list if s]}
return config
|
def _common_init(config):
if (to_bool(config.get('EnableAutoNumpySharedMemPickling', False)) and (not task_system.SharedMemNumpyConfig['enabled'])):
task_system.SharedMemNumpyConfig['enabled'] = True
print(('SprintExternInterface[pid %i] EnableAutoNumpySharedMemPickling = True' % (os.getpid(),)))
|
def _init_python_trainer(inputDim, outputDim, config, targetMode, **kwargs):
'\n :type inputDim: int\n :type outputDim: int\n :param str config: config string, passed by Sprint. assumed to be ","-separated\n :param str targetMode: "target-alignment" or "criterion-by-sprint" or so\n '
print(('SprintExternInterface[pid %i]: PythonTrainer init_PythonTrainer()' % (os.getpid(),)))
print('inputDim:', inputDim)
print('outputDim:', outputDim)
print('config:', config)
print('targetMode:', targetMode)
print('other args:', kwargs)
global InputDim, OutputDim, isInitialized
InputDim = inputDim
OutputDim = outputDim
isInitialized = True
assert (targetMode != 'criterion-by-sprint')
config = _parse_config_str(config)
assert (config['action'] == 'ExternSprintDataset')
_common_init(config)
_init_global_sprint_dataset(input_dim=inputDim, output_dim=outputDim, config=config)
|
def _init_global_sprint_dataset(input_dim, output_dim, config):
global sprintDataset
if sprintDataset:
return
num_segments = (len(segmentOrderList) if (segmentOrderList is not None) else None)
sprintDataset = ExternSprintDatasetSource(c2p_fd=int(config['c2p_fd']), p2c_fd=int(config['p2c_fd']), input_dim=input_dim, output_dim=output_dim, num_segments=num_segments)
|
def exit():
'\n Called by Sprint, to signal that it is exiting.\n '
print('SprintExternInterface: PythonTrainer exit()')
assert isInitialized
sprintDataset.close()
|
def feedInput(features, weights=None, segmentName=None):
'\n Called by Sprint.\n Unsupervised case.\n\n :param numpy.ndarray features:\n :param numpy.ndarray|None weights:\n :param str|None segmentName:\n '
feedInputAndTarget(features=features, weights=weights, segmentName=segmentName)
|
def feedInputAndTargetAlignment(features, targetAlignment, weights=None, segmentName=None):
'\n :param numpy.ndarray features:\n :param numpy.ndarray targetAlignment:\n :param numpy.ndarray|None weights:\n :param str|None segmentName:\n '
feedInputAndTarget(features=features, alignment=targetAlignment, weights=weights, segmentName=segmentName)
|
def feedInputAndTargetSegmentOrth(features, targetSegmentOrth, weights=None, segmentName=None):
'\n :param numpy.ndarray features:\n :param str targetSegmentOrth:\n :param numpy.ndarray|None weights:\n :param str|None segmentName:\n '
feedInputAndTarget(features=features, orthography=targetSegmentOrth, weights=weights, segmentName=segmentName)
|
def feedInputAndTarget(features, weights=None, segmentName=None, orthography=None, alignment=None, speaker_name=None, speaker_gender=None, **kwargs):
'\n :param numpy.ndarray features:\n :param numpy.ndarray|None weights:\n :param str|None segmentName:\n :param str|None orthography:\n :param numpy.ndarray|None alignment:\n :param str|None speaker_name:\n :param str|None speaker_gender:\n '
assert (features.shape[0] == InputDim)
targets = {}
if (alignment is not None):
targets['classes'] = alignment
if (orthography is not None):
targets['orth'] = orthography
sprintDataset.add_new_data(segment_name=segmentName, features=features, targets=targets)
|
class PythonControl():
'\n PythonControl, interface for Sprint.\n '
instance = None
@classmethod
def init(cls, **kwargs):
'\n Called by global init().\n\n :rtype: PythonControl\n '
print(('SprintExternInterface[pid %i]: PythonControl %s init %r' % (os.getpid(), __file__, kwargs)))
if cls.instance:
return cls.instance
cls.instance = cls(**kwargs)
return cls.instance
def __init__(self, config, **kwargs):
self.config = _parse_config_str(config)
_common_init(self.config)
def init_processing(self, input_dim, output_dim, **kwargs):
'\n Called by Sprint.\n\n :param int input_dim:\n :param int output_dim:\n :param kwargs: maybe others\n '
print(('SprintExternInterface: PythonControl init_processing inputDim=%i, outputDim=%i, other:%r' % (input_dim, output_dim, kwargs)))
_init_global_sprint_dataset(input_dim=input_dim, output_dim=output_dim, config=self.config)
def process_segment(self, name, orthography, features, alignment, soft_alignment, speaker_name=None, **kwargs):
'\n Called by Sprint.\n\n :param str name:\n :param str|None orthography:\n :param numpy.ndarray features:\n :param numpy.ndarray|None alignment:\n :param numpy.ndarray|None soft_alignment:\n :param str|None speaker_name:\n :param kwargs: maybe others\n '
assert sprintDataset
targets = {}
if (orthography is not None):
targets['orth'] = orthography
if (speaker_name is not None):
targets['speaker_name'] = speaker_name
if (alignment is not None):
targets['classes'] = alignment
elif (soft_alignment is not None):
assert isinstance(soft_alignment, tuple)
assert (len(soft_alignment) == 3)
targets['classes[sparse:coo:2:0]'] = soft_alignment[0]
targets['classes[sparse:coo:2:1]'] = soft_alignment[1]
targets['classes[sparse:coo:2:2]'] = soft_alignment[2]
sprintDataset.add_new_data(segment_name=name, features=features, targets=targets)
def exit(self, **kwargs):
'\n Called by Sprint.\n\n :param kwargs:\n '
print(('SprintExternInterface: PythonControl exit %r' % kwargs))
if sprintDataset:
sprintDataset.close()
|
class ExternSprintDatasetSource():
'\n This will send data to ExternSprintDataset over a pipe.\n We expect that we are child process and the parent process has spawned us via ExternSprintDataset\n and is waiting for our data.\n '
def __init__(self, c2p_fd, p2c_fd, input_dim, output_dim, num_segments):
'\n :param int c2p_fd: child-to-parent file descriptor\n :param int p2c_fd: parent-to-child file descriptor\n :type input_dim: int\n :type output_dim: int\n :type num_segments: int | None\n :param num_segments: can be None if not known in advance\n '
self.pipe_c2p = os.fdopen(c2p_fd, 'wb')
self.pipe_p2c = os.fdopen(p2c_fd, 'rb')
self._send('init', (input_dim, output_dim, num_segments))
def _send(self, data_type, args=None):
'\n :param str data_type:\n :param object args:\n '
assert (data_type is not None)
util.write_pickled_object(self.pipe_c2p, (data_type, args))
def add_new_data(self, segment_name, features, targets):
'\n :param str segment_name:\n :param numpy.ndarray features: 2D array, (feature,time)\n :param dict[str,numpy.ndarray] targets: each target is either 1D (time->idx) or 2D (time,class)\n '
self._send('data', (segment_name, features, targets))
def close(self):
'\n Close pipe fds.\n '
self._send('exit')
self.pipe_c2p.close()
self.pipe_p2c.close()
|
class DimTypes():
'\n Defines possible values for ``kind``.\n '
Unspecified = None
Batch = Entity('batch')
Spatial = Entity('spatial')
Time = Spatial
Feature = Entity('feature')
Types = (Batch, Spatial, Feature)
|
class _DimExtra():
def __init__(self, *, dim: Dim, kind=DimTypes.Unspecified, vocab=None, undefined=False, special=False, auto_generated=False, match_priority=0, derived_from_tag=None, derived_from_op=None, batch=None, control_flow_ctx=None, src_data: Optional[_t.Tensor]=None, src_axis: Optional[int]=None):
'\n :param dim:\n :param Entity|None kind:\n :param returnn.datasets.util.vocabulary.Vocabulary|None vocab:\n :param bool undefined: When this is specified as `None` by the user via `shape`.\n :param bool special: this can not be a dim tag of :class:`Tensor`.\n But this dim tag also does not match anything except itself.\n So it can be used to represent special placeholders with special meanings like ``single_step``.\n :param bool auto_generated:\n This is auto-generated by RETURNN because it was not explicitly specified by the user.\n E.g. for ConvLayer and others.\n This implies certain behavior on equality, such as comparing the description,\n to allow for several independent creations of the dim tag during template construction.\n :param Dim|None derived_from_tag:\n Whether this new tag is reduced, down/up sampled, padded etc from this given other tag.\n In situations where dim tags are being matched (Data.get_common_data),\n the behavior is to consider them as equal,\n and assume that the chain of operations (e.g. padding + valid conv) results in the same dim.\n :param Op|None derived_from_op:\n :param int match_priority: when there is ambiguity between multiple dim tags, this value defines the order\n in which the dimension are assigned to their matching counterparts.\n A dimension tag with a higher priority value is assigned first.\n E.g. for a square matrix used for a linear transformation,\n the reduce dim tag should have a higher priority.\n :param BatchInfo|None batch: for batch-dim, or dynamic dims per batch\n :param ControlFlowContext|None control_flow_ctx:\n :param src_data:\n :param src_axis:\n '
self.dim = dim
assert ((kind is None) or (isinstance(kind, Entity) and (kind in DimTypes.Types)))
self.kind = kind
self.vocab = vocab
self.same_as = None
self.copy_same_as = None
self.derived_from_tag = derived_from_tag
self.derived_from_op = derived_from_op
if (derived_from_op and (not derived_from_op.output)):
derived_from_op.output = dim
self.match_priority = match_priority
if src_data:
assert (isinstance(src_data, _t.Tensor) and isinstance(src_axis, int))
if ((not batch) and dim.dyn_size_ext):
batch = dim.dyn_size_ext.batch
if (not control_flow_ctx):
control_flow_ctx = dim.dyn_size_ext.control_flow_ctx
if ((not batch) and derived_from_tag):
batch = derived_from_tag.batch
if (not control_flow_ctx):
control_flow_ctx = derived_from_tag.control_flow_ctx
self.batch = batch
self.control_flow_ctx = control_flow_ctx
self.src_data = src_data
self.src_axis = src_axis
self.dyn_size_same = set()
self.undefined = undefined
self.special = special
if derived_from_tag:
auto_generated = derived_from_tag.auto_generated
self.auto_generated = auto_generated
self.same_for_batch_ctx = {}
self.cache_dyn_size_ext_dev = {}
self.cache_seq_mask: Dict[(Tuple[(str, Optional[Tuple[(Dim, ...)]])], _t.Tensor)] = {}
self.cache_dim_math: Dict[(Tuple[(str, Union[(Dim, int)])], Dim)] = {}
def __getstate__(self):
d = vars(self).copy()
d['batch'] = None
d['same_for_batch_ctx'] = {}
d['cache_dyn_size_ext_dev'] = {}
d['cache_seq_mask'] = {}
d['cache_dim_math'] = {}
d['kind'] = (self.kind.name if self.kind else None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
if (self.kind is not None):
self.kind = {v.name: v for v in DimTypes.Types}[self.kind]
|
class _DimMixin():
name: Optional[str]
capacity: Optional[int]
size: Optional[int]
dyn_size_ext: Optional[_t.Tensor]
_dyn_size_max_value: Optional[_t.Tensor]
_extra: Optional[_DimExtra]
def _handle_extra_kwargs(self: Dim, *, dyn_size: Optional[_t.RawTensorType]=None, **kwargs):
if kwargs:
self._extra = _DimExtra(dim=self, **kwargs)
if (dyn_size is not None):
self.dyn_size = dyn_size
if (self.derived_from_op and self.is_dynamic()):
self.complete_dyn_size()
@property
def description(self) -> Optional[str]:
'\n :return: description, alias for name\n '
return self.name
@property
def dimension(self) -> Optional[int]:
'\n :return: alias for static size, or None if dynamic\n In __init__, it is more flexible, but we require this API for the attrib (property)\n for compatibility to old code.\n '
return self.size
@property
def kind(self) -> Optional[Entity]:
'\n :return: one in DimTypes (deprecated)\n '
if (not self._extra):
return None
return self._extra.kind
@property
def match_priority(self) -> int:
'\n :return: match priority\n '
if (not self._extra):
return 0
return self._extra.match_priority
@property
def batch(self) -> Optional[BatchInfo]:
'\n :return: batch info (deprecated)\n '
if (not self._extra):
if self.dyn_size_ext:
return self.dyn_size_ext.batch
return None
return self._extra.batch
@batch.setter
def batch(self: Dim, value: Optional[BatchInfo]):
if (self.batch is value):
return
self._make_extra().batch = value
@property
def control_flow_ctx(self) -> Optional[ControlFlowContext]:
'\n :return: control flow context (deprecated)\n '
if (not self._extra):
if self.dyn_size_ext:
return self.dyn_size_ext.control_flow_ctx
return None
return self._extra.control_flow_ctx
@control_flow_ctx.setter
def control_flow_ctx(self: Dim, value: Optional[ControlFlowContext]):
if (self.control_flow_ctx is value):
return
self._make_extra().control_flow_ctx = value
@property
def auto_generated(self) -> bool:
'\n :return: see _DimExtra\n '
if (not self._extra):
return False
return self._extra.auto_generated
@property
def same_as(self) -> Optional[Dim]:
'\n :return: same as other dim\n '
if (not self._extra):
return None
return self._extra.same_as
@same_as.setter
def same_as(self: Dim, value: Optional[_d.Dim]):
if (self.same_as is value):
return
self._make_extra().same_as = value
@property
def special(self) -> bool:
'\n :return: see _DimExtra\n '
if (not self._extra):
return False
return self._extra.special
@property
def derived_from_op(self) -> Optional[Op]:
'\n :return: op\n '
if (not self._extra):
return None
return self._extra.derived_from_op
@property
def derived_from_tag(self) -> Optional[Dim]:
'\n :return: dim\n '
if (not self._extra):
return None
return self._extra.derived_from_tag
def short_repr(self):
'\n :return: some short repr\n :rtype: str\n '
if self.is_batch_dim():
return 'B'
desc = ('%s%r' % (('F' if self.is_feature_dim() else ''), self.get_same_base().description))
if self.special:
desc += '!'
elif (self.dimension is not None):
desc += f'({self.dimension})'
else:
if self.dyn_size_ext:
desc += ('[%s]' % ','.join(self.dyn_size_ext.get_batch_axes_short_description(special_axes=False)))
else:
desc += '[?]'
if self.control_flow_ctx:
desc += ('{ctx=%s}' % self.control_flow_ctx.repr_inner())
return desc
def __copy__(self):
'\n Normally we would not want to get a new tag with ``tag != copy(tag)``.\n https://github.com/rwth-i6/returnn/issues/860\n\n See :func:`Dim.copy` if you explicitly want a copy.\n\n :return: self\n :rtype: Dim\n '
return self
def __deepcopy__(self, memo=None):
'\n Normally we would not want to get a new tag with ``tag != deepcopy(tag)``.\n https://github.com/rwth-i6/returnn/issues/860\n\n See :func:`Dim.copy` if you explicitly want a copy.\n\n :param memo:\n :return: self\n :rtype: Dim\n '
return self
def __reduce_ex__(self: _d.Dim, protocol):
if (self == _d.batch_dim):
return 'batch_dim'
if (self == _d.single_step_dim):
return 'single_step_dim'
(func, args, (vs, slots), *more_args) = super().__reduce_ex__(protocol)
assert (not vs)
assert (isinstance(slots, dict) and ('_dyn_size_max_value' in slots))
slots['_dyn_size_max_value'] = None
return (func, args, (vs, slots), *more_args)
def copy(self, same_as_self=True, description=None, kind=None, match_priority=None):
'\n :param bool same_as_self:\n :param str|None description: new description\n :param Entity|None kind: if set, overwrites self.kind\n :param int|None match_priority:\n :return: copy, maybe as new kind. setting same_as to self\n :rtype: Dim\n '
assert self.can_be_used_as_dim()
if (not same_as_self):
assert (description is not None), ('%s copy with not same_as_self should have a new description' % self)
tag = _d.Dim(kind=(kind or self.kind), description=(description or self.description), match_priority=(match_priority if (match_priority is not None) else self.match_priority), dimension=self.dimension, dyn_size_ext=self.dyn_size_ext, batch=self.batch, src_data=(self._extra.src_data if self._extra else None), src_axis=(self._extra.src_axis if self._extra else None))
if same_as_self:
tag.same_as = self
return tag
def reset_eager(self: Dim):
'\n In an eager-based framework, dyn_size_ext.raw_tensor etc will be different in each step.\n This resets everything related.\n This can also include caches.\n '
self.reset_raw()
def reset_raw(self: Dim, *, only_self: bool=False, include_parents: bool=False):
'\n Reset all raw tensors.\n '
visited = set()
queue = [self]
while queue:
dim: Dim = queue.pop()
if (id(dim) in visited):
continue
visited.add(id(dim))
dim.reset_batch_ctx()
dim._dyn_size_max_value = None
if dim.dyn_size_ext:
dim.dyn_size_ext.reset()
dim_extra = dim._extra
if dim_extra:
dim_extra.cache_dyn_size_ext_dev.clear()
dim_extra.cache_seq_mask.clear()
if only_self:
return
if dim_extra:
queue += dim_extra.cache_dim_math.values()
if dim_extra.same_as:
queue.append(dim_extra.same_as)
if dim_extra.copy_same_as:
queue.append(dim_extra.copy_same_as)
queue += dim_extra.same_for_batch_ctx.values()
if (include_parents and dim_extra.derived_from_op):
queue.extend(dim_extra.derived_from_op.inputs)
def reset_batch_and_raw(self: Dim):
'\n Reset batch and raw tensors.\n '
self.reset_raw(include_parents=True)
def transform_tensors(self: Dim, func: Callable[([_t.Tensor], None)]):
'\n Transforms all tensors inplace, e.g. Numpy to PyTorch or so.\n Resets all caches.\n\n :param func: operates inplace\n '
dyn_size_ext = (self.dyn_size_ext.copy() if self.dyn_size_ext else None)
dyn_size_ext_max = (self._dyn_size_max_value if self._dyn_size_max_value else None)
self.reset_raw(only_self=True)
if dyn_size_ext:
func(dyn_size_ext)
if dyn_size_ext_max:
func(dyn_size_ext_max)
self.dyn_size_ext = dyn_size_ext
self._dyn_size_max_value = dyn_size_ext_max
def _can_use_in_ctx(self, ctx):
'\n :param ControlFlowContext|None ctx:\n :rtype: bool\n '
if (self.control_flow_ctx == ctx):
return True
from returnn.tf.util.data import ControlFlowContext
if (not ControlFlowContext.is_parent_or_same(self.control_flow_ctx, ctx)):
return False
assert ctx
if (not self.dyn_size_ext):
return False
parent_dims = ControlFlowContext.collect_parent_dims(ctx)
for dim in self.dyn_size_ext.dim_tags:
if (dim in parent_dims):
return False
return True
def _validate_in_current_graph(self: Dim):
'\n :rtype: bool\n '
if ((self.dyn_size_ext and (not self.dyn_size_ext.is_valid_in_current_graph())) or (self._dyn_size_max_value and (not self._dyn_size_max_value.is_valid_in_current_graph()))):
self.reset_batch_ctx()
return False
return True
def _maybe_update(self: Dim):
if self.is_batch_dim():
return
if isinstance(self.size, int):
return
if (not self._extra):
return
if (not self.batch):
if (self.dyn_size_ext and self.dyn_size_ext.batch):
self.batch = self.dyn_size_ext.batch
else:
return
extra = self._get_same_base_extra()
if (not extra):
return
key = (self.batch, self.control_flow_ctx)
if (self.dyn_size_ext and (key not in extra.same_for_batch_ctx)):
extra.same_for_batch_ctx[key] = self
if (key in extra.same_for_batch_ctx):
same = extra.same_for_batch_ctx[key]
if (same is not self):
if (same.dyn_size_ext and (not self.dyn_size_ext)):
self.dyn_size_ext = same.dyn_size_ext
if (same.dyn_size_ext and (same.dyn_size_ext.placeholder is not None)):
if (self.dyn_size_ext.placeholder is None):
self.dyn_size_ext = same.dyn_size_ext
if (self.dyn_size_ext and (not same.dyn_size_ext)):
same.dyn_size_ext = self.dyn_size_ext
if (self.dyn_size_ext and (self.dyn_size_ext.placeholder is not None)):
if ((not same.dyn_size_ext) or (same.dyn_size_ext.placeholder is None)):
same.dyn_size_ext = self.dyn_size_ext
if ((self._dyn_size_max_value is None) and (same._dyn_size_max_value is not None)):
self._dyn_size_max_value = same._dyn_size_max_value
if ((same._dyn_size_max_value is None) and (self._dyn_size_max_value is not None)):
same._dyn_size_max_value = self._dyn_size_max_value
def get_for_batch_ctx(self: Dim, batch: BatchInfo, ctx: Optional[ControlFlowContext], *, allow_none: bool=False) -> Optional[Dim]:
'\n Warning: This is only for TensorFlow, and also we might want to remove it.\n https://github.com/rwth-i6/returnn/issues/975\n\n :param BatchInfo batch:\n :param ControlFlowContext|None ctx:\n :param bool allow_none:\n '
from returnn.tensor import ControlFlowContext
assert self.can_be_used_as_dim()
if ((self.batch == batch) and self._can_use_in_ctx(ctx) and self.dyn_size_ext):
self._validate_in_current_graph()
self._maybe_update()
if ((self.batch == batch) and self._can_use_in_ctx(ctx) and self.dyn_size_ext):
return self
if self.is_batch_dim():
if (self.batch == batch):
return self
return batch.batch_dim_tag
if self.is_static():
assert (not self.batch)
return self
if batch.is_broadcast():
return self
dim_tag = None
if self._extra:
same_base = self.get_same_base()
same_base._validate_in_current_graph()
if same_base._extra:
for ctx_ in ControlFlowContext.abs_ctx_stack_with_root(ctx):
tag = same_base._extra.same_for_batch_ctx.get((batch, ctx_), None)
if (tag and tag._can_use_in_ctx(ctx) and tag._validate_in_current_graph()):
assert (tag.batch == batch)
if tag.dyn_size_ext:
return tag
dim_tag = tag
break
if ((same_base.batch == batch) and same_base._can_use_in_ctx(ctx) and same_base.dyn_size_ext):
return same_base
else:
same_base = self
same_base_extra = same_base._make_extra()
if ctx:
derived_bases = same_base.get_derived_bases_set()
derived_bases.remove(same_base)
if derived_bases:
derived_ctxs = set()
for d in derived_bases:
with util.guard_infinite_recursion(_d.Dim.get_for_batch_ctx, d):
d = d.get_for_batch_ctx(batch=batch, ctx=ctx)
if d.control_flow_ctx:
derived_ctxs.add(d.control_flow_ctx)
if (not derived_ctxs):
ctx = None
elif (len(derived_ctxs) == 1):
ctx = derived_ctxs.pop()
else:
raise NotImplementedError(('not yet implemented: multiple derived ctxs: %r' % (derived_ctxs,)))
if dim_tag:
assert (not dim_tag.dyn_size_ext)
dyn_size_ext = None
if (batch != batch.get_global_base()):
batch_base = batch.get_global_base()
base_can_use_in_ctx = None
if ((same_base.batch == batch_base) and same_base._can_use_in_ctx(ctx) and same_base.dyn_size_ext):
base_can_use_in_ctx = same_base
elif same_base._extra:
from returnn.tf.util.data import ControlFlowContext
for ctx_ in ControlFlowContext.abs_ctx_stack_with_root(ctx):
tag = same_base._extra.same_for_batch_ctx.get((batch_base, ctx_), None)
if (tag and tag._can_use_in_ctx(ctx) and tag._validate_in_current_graph() and tag.dyn_size_ext):
base_can_use_in_ctx = tag
break
if (base_can_use_in_ctx and base_can_use_in_ctx.dyn_size_ext):
if base_can_use_in_ctx.dyn_size_ext.have_batch_axis():
dyn_size_ext = base_can_use_in_ctx.dyn_size_ext.copy_extend_batch(batch)
if batch.beam:
dyn_size_ext = base_can_use_in_ctx.dyn_size_ext.copy_extend_with_beam(batch.beam)
assert (dyn_size_ext.batch == batch)
if (dyn_size_ext.placeholder is not None):
beam_expanded_base_data = getattr(dyn_size_ext.placeholder, '_RETURNN_beam_expanded_base_data', None)
if batch.beam:
assert beam_expanded_base_data
import tensorflow as tf
from returnn.tf.util.basic import get_valid_scope_name_from_str, same_control_flow_ctx
with same_control_flow_ctx(dyn_size_ext.placeholder):
dyn_size_ext.placeholder = tf.identity(dyn_size_ext.placeholder, name=get_valid_scope_name_from_str(('%s_get_for_batch_ctx_%s' % (dyn_size_ext.name, batch.short_repr()))))
if batch.beam:
dyn_size_ext.placeholder._RETURNN_dyn_size_beam = batch.beam
dyn_size_ext.placeholder._RETURNN_beam_expanded_base_data = beam_expanded_base_data
if (not dyn_size_ext):
candidates = ([self, same_base] + list(same_base_extra.same_for_batch_ctx.values()))
for other in candidates:
if (other.dyn_size_ext and ControlFlowContext.is_parent_or_same(other.control_flow_ctx, ctx)):
dyn_size_ext = other.dyn_size_ext.copy_template()
dyn_size_ext.beam = batch.beam
dyn_size_ext.batch = batch
break
if dyn_size_ext:
ctx = dyn_size_ext.control_flow_ctx
elif dim_tag:
ctx = dim_tag.control_flow_ctx
for candidate in [self, same_base]:
if (((candidate.batch == batch) or ((not candidate.batch) and batch.is_global_batch())) and (not candidate.control_flow_ctx) and (not ctx)):
candidate.batch = batch
if dyn_size_ext:
if candidate.dyn_size_ext:
candidate.dyn_size_ext.batch = batch
assert (candidate.dyn_size_ext.dim_tags == dyn_size_ext.dim_tags)
else:
candidate.dyn_size_ext = dyn_size_ext
assert (not candidate.dyn_size_ext.control_flow_ctx)
elif candidate.dyn_size_ext:
candidate.dyn_size_ext.batch = batch
else:
candidate.complete_dyn_size(template_only=True)
if (not dim_tag):
dim_tag = candidate
if (not dim_tag):
if allow_none:
return None
dim_tag = _d.Dim(kind=self.kind, description=self.description, dimension=self.dimension, auto_generated=self.auto_generated, batch=batch, control_flow_ctx=ctx, dyn_size_ext=dyn_size_ext)
dim_tag.same_as = same_base
if (dyn_size_ext and (dyn_size_ext.placeholder is not None)):
if (_d.Dim.get_tag_from_size_tensor(dyn_size_ext.placeholder) is None):
dim_tag.set_tag_on_size_tensor(dyn_size_ext.placeholder, batch=batch)
same_base_extra.same_for_batch_ctx[(batch, ctx)] = dim_tag
if dyn_size_ext:
if (not dim_tag.dyn_size_ext):
dim_tag.dyn_size_ext = dyn_size_ext
else:
assert (dim_tag.dyn_size_ext.dims == dyn_size_ext.dims)
elif dim_tag.dyn_size_ext:
pass
else:
dim_tag.complete_dyn_size(template_only=True)
return dim_tag
def reset_batch_ctx(self: Dim):
'\n For the self instance, reset batch and context.\n '
if self._extra:
self._extra.same_for_batch_ctx.pop((self.batch, self.control_flow_ctx), None)
self._extra.cache_seq_mask.clear()
self._extra.cache_dyn_size_ext_dev.clear()
self.batch = None
self.control_flow_ctx = None
if (self.dyn_size_ext and self.dyn_size_ext.batch):
self.dyn_size_ext = self.dyn_size_ext.copy_template()
self.dyn_size_ext.batch = None
self.dyn_size_ext.control_flow_ctx = None
self._dyn_size_max_value = None
def set_dyn_size_ext_for_batch_ctx(self, batch, ctx, dyn_size_ext):
'\n :param BatchInfo batch:\n :param ControlFlowContext|None ctx:\n :param Data dyn_size_ext:\n '
assert self.can_be_used_as_dim()
same = self.get_for_batch_ctx(batch, ctx)
assert ((dyn_size_ext.batch == batch) and (dyn_size_ext.control_flow_ctx == ctx))
if same.dyn_size_ext:
assert (same.dyn_size_ext.dim_tags == dyn_size_ext.dim_tags)
if (dyn_size_ext.placeholder is not None):
same.dyn_size_ext.placeholder = dyn_size_ext.placeholder
else:
same.dyn_size_ext = dyn_size_ext
self._maybe_update()
def get_dyn_size_ext_for_batch_ctx(self, batch, ctx, template_only=False):
'\n :param BatchInfo|None batch:\n :param ControlFlowContext|None ctx:\n :param bool template_only:\n :rtype: _t.Tensor|None\n '
assert self.can_be_used_as_dim()
if ((not batch) and self.batch):
batch = self.batch.get_global_base()
if (not batch):
assert ((batch == self.batch) and (ctx == self.control_flow_ctx))
return self.dyn_size_ext
same = self.get_for_batch_ctx(batch, ctx, allow_none=True)
if (not same):
return None
same.complete_dyn_size(template_only=template_only)
return same.dyn_size_ext
@property
def dyn_size(self):
'\n :return: dyn size / seq len (usually of shape [B]), or None\n If the dyn size can potentially be of a different shape, directly access dyn_size_ext.\n :rtype: tf.Tensor|None\n '
if self.dyn_size_ext:
return self.dyn_size_ext.placeholder
return None
@dyn_size.setter
def dyn_size(self, dyn_size):
'\n Also see :func:`set_dyn_size_ext_for_batch_ctx`.\n\n :param tf.Tensor dyn_size:\n '
if (self.dyn_size_ext and (self.dyn_size_ext.placeholder is dyn_size)):
return
assert self.can_be_used_as_dim()
other = _d.Dim.get_tag_from_size_tensor(dyn_size)
if other:
self.declare_same_as(other)
if self.batch:
assert ((self.batch == other.batch) and (self.control_flow_ctx == other.control_flow_ctx))
else:
self.batch = other.batch
self.control_flow_ctx = other.control_flow_ctx
self.dyn_size_ext = other.dyn_size_ext
assert (self.dyn_size_ext.placeholder is dyn_size)
return
self._init_default_dyn_size_ext(dyn_size)
self.set_tag_on_size_tensor(dyn_size)
assert (self.dyn_size_ext.placeholder is dyn_size)
def _init_default_dyn_size_ext(self, dyn_size):
'\n :param tf.Tensor dyn_size:\n '
if self.dyn_size_ext:
if (self.dyn_size_ext.placeholder is not None):
assert (self.dyn_size_ext.placeholder is dyn_size)
else:
beam = getattr(dyn_size, '_RETURNN_dyn_size_beam', None)
self.dyn_size_ext = _t.Tensor(name=(('%s:dyn_size' % self.description) if self.description else dyn_size.op.name), dtype=_t.Tensor.size_dtype, shape=(), batch_dim_axis=0, batch=self.batch, beam=beam, control_flow_ctx=self.control_flow_ctx)
self.dyn_size_ext.placeholder = dyn_size
def get_dyn_size_ext_for_device(self: Dim, device: Optional[str]) -> _t.Tensor:
'\n :return: dyn_size_ext on the device\n '
assert self.dyn_size_ext
if (not device):
return self.dyn_size_ext
import returnn.frontend as rf
self._make_extra()
if (device in self._extra.cache_dyn_size_ext_dev):
return self._extra.cache_dyn_size_ext_dev[device]
self._extra.cache_dyn_size_ext_dev[device] = rf.copy_to_device(self.dyn_size_ext, device=device)
return self._extra.cache_dyn_size_ext_dev[device]
def get_mask(self: Dim, *, dim_order: Optional[Sequence[Dim]]=None, device: Optional[str]=None) -> _t.Tensor:
'\n :param dim_order: if given, the dims of the mask will be in this order.\n This can be useful if the mask is broadcasted against some other tensor.\n :param str|None device: if given, will move the mask to this device\n :return: if need_masking(), the corresponding mask.\n If this is e.g. the time-dim T of shape [B], then the mask will be of shape [B,T].\n The mask could be used with :func:`masked_select` (``boolean_mask``) or ``where``.\n '
import returnn.frontend as rf
assert (self.dyn_size_ext and (self.dyn_size_ext.raw_tensor is not None))
backend = self.dyn_size_ext._raw_backend
if (not device):
device = rf.get_default_device()
self._make_extra()
dim_order_default = (self.dyn_size_ext.dims + (self,))
if (dim_order is not None):
dim_order = tuple((d for d in dim_order if (d in dim_order_default)))
else:
dim_order = dim_order_default
cache_key = (device, dim_order)
if (cache_key in self._extra.cache_seq_mask):
return self._extra.cache_seq_mask[cache_key]
if self._extra.copy_same_as:
if dim_order:
dim_order = tuple(((self._extra.copy_same_as if (d == self) else d) for d in dim_order))
mask = self._extra.copy_same_as.get_mask(dim_order=dim_order, device=device)
(mask, _) = rf.replace_dim(mask, in_dim=self._extra.copy_same_as, out_dim=self)
return mask
size_ext = self.get_dyn_size_ext_for_device(device)
max_idx = rf.reduce(size_ext, axis=size_ext.dims, mode='max', use_mask=False)
size_ext = size_ext.copy_masked(max_idx)
idx_range = backend.range_over_dim(self, device=device)
seq_mask = rf.compare(idx_range, '<', size_ext, allow_broadcast_all_sources=True, dim_order=dim_order)
self._extra.cache_seq_mask[cache_key] = seq_mask
return seq_mask
def is_batch_dim(self):
'\n :return: whether this dim tag is of kind batch\n :rtype: bool\n '
return (self.kind == DimTypes.Batch)
def is_feature_dim(self):
'\n :return: whether this dim tag is of kind feature\n :rtype: bool\n '
return (self.kind == DimTypes.Feature)
def is_spatial_dim(self):
'\n :return: whether this dim tag is of kind spatial\n :rtype: bool\n '
return (self.kind == DimTypes.Spatial)
def is_dim_known(self):
'\n :return: whether we know the dimension; basically whether this is defined\n (although `not self.undefined` is defined slightly differently)\n :rtype: bool\n '
if self.is_batch_dim():
return True
if ((not self.dyn_size_ext) and (self.dimension is not None)):
return True
if self.dyn_size_ext:
return True
extra = self._get_same_base_extra()
if extra:
for (_, other) in extra.same_for_batch_ctx.items():
if other.dyn_size_ext:
return True
return False
def is_dim_known_in_batch_ctx(self: Dim, batch: BatchInfo, ctx: Optional[ControlFlowContext]) -> bool:
'\n :return: whether :func:`get_for_batch_ctx` would return a valid existing dim tag\n '
from returnn.tensor import ControlFlowContext
if self.is_batch_dim():
return True
if self.is_static():
return True
dim = self.get_for_batch_ctx(batch=batch, ctx=ctx, allow_none=True)
if dim:
return bool(dim.dyn_size_ext)
candidates = [self, self.get_same_base()]
if self._extra:
candidates += list(self._extra.same_for_batch_ctx.values())
for dim in candidates:
if (ControlFlowContext.is_parent_or_same(dim.control_flow_ctx, ctx) and dim.dyn_size_ext):
return True
return False
def is_dynamic_seq_length(self) -> bool:
'\n :return: whether the dim is not static. usually means that it has seq lengths\n '
return ((self.dimension is None) and ((self.dyn_size_ext and self.dyn_size_ext.dims) or ((not self.dyn_size_ext) and (not self.is_batch_dim()))))
def is_dynamic(self) -> bool:
'\n :return: whether the dim is not static. usually means that it has seq lengths\n '
return (self.dimension is None)
def is_static(self) -> bool:
'\n :return: static\n '
return (self.dimension is not None)
def need_masking(self):
'\n :return: whether dim is static or dynamic but with scalar dyn_size_ext\n '
if self.is_static():
if (self.capacity is not None):
return (self.size < self.capacity)
return False
if (self.capacity is not None):
return True
if (not self.dyn_size_ext):
if self.is_batch_dim():
return False
return True
return (self.dyn_size_ext.batch_ndim > 0)
def can_be_used_as_dim(self):
'\n :return: whether this can be used as a dim in :class:`Data`, i.e. it is not special\n :rtype: bool\n '
return (not self.special)
def is_same_size_tensor(self, x):
'\n :param tf.Tensor x:\n :return: whether this dim tag for this specific batch (incl beam) is the same as the given size\n :rtype: bool\n '
if (self.dyn_size_ext and (x is self.dyn_size_ext.placeholder)):
return True
tag = _DimMixin.get_tag_from_size_tensor(x)
if (tag and (tag == self)):
return True
if (not self._extra):
return False
if (util.RefIdEq(x) in self._extra.dyn_size_same):
return True
return False
def set_tag_on_size_tensor(self: Dim, x, batch=None, same_as_before=False) -> Dim:
'\n This function is used\n to couple a tf.Tensor instance representing the dyn size\n with the dim tag.\n\n This is usually a newly created dim tag,\n which is yet unset.\n\n It is also used to couple an existing dim tag with other dyn sizes\n which just differ by an expansion of the batch (e.g. search beam).\n\n See also :func:`get_tag_from_size_tensor`.\n Also see :func:`set_dyn_size_ext_for_batch_ctx`.\n\n :param x: raw tensor, for example tf.Tensor\n :param BatchInfo|None batch:\n :param bool same_as_before: implies it was set before, and the new size is the same.\n e.g. it could be some identity with added checks, or other change.\n :return: self or new dim tag\n '
assert self.can_be_used_as_dim()
if hasattr(x, '_is_size_of_dim_tag'):
assert (x._is_size_of_dim_tag in (None, self))
if (self.batch and batch and (self.batch != batch)):
assert (not same_as_before)
new_dim_tag = self.get_for_batch_ctx(batch=batch, ctx=self.control_flow_ctx)
new_dim_tag.set_tag_on_size_tensor(x, batch=batch)
return new_dim_tag
if ((self.dyn_size is not None) and (self.dyn_size is not x)):
if (self._extra and (util.RefIdEq(x) in self._extra.dyn_size_same)):
pass
elif same_as_before:
self._make_extra().dyn_size_same.add(util.RefIdEq(x))
else:
assert (self.batch and batch)
from returnn.frontend._backend import get_backend_by_raw_tensor_type
raise Exception('\n'.join([('%r (%r) already has size %r, and another incompatible size %r (batch %r) is being assigned.' % (self, self.description, self.dyn_size, x, batch)), '\nNew size computation graph:', get_backend_by_raw_tensor_type(type(x)).format_graph_output(x, max_depth=3), '\nThis is maybe the result of an incorrect declare_same_as. ', ('same_as = %s' % self.same_as)]))
if (batch and getattr(x, '_RETURNN_dyn_size_beam', None)):
assert (batch.beam == getattr(x, '_RETURNN_dyn_size_beam')), ('%s: dyn size %s has unexpected batch %s, expected %s' % (self, x, batch, getattr(x, '_RETURNN_dyn_size_beam')))
if (self.batch and batch):
assert (self.batch == batch)
elif (batch and (not self.batch)):
self.batch = batch
if ((not self.is_batch_dim()) and self.is_dynamic()):
if same_as_before:
assert (self.dyn_size_ext and (self.dyn_size_ext.placeholder is not None))
else:
self._init_default_dyn_size_ext(x)
if (getattr(x, '_is_size_of_dim_tag', None) is None):
setattr(x, '_is_size_of_dim_tag', self)
return self
@classmethod
def get_tag_from_size_tensor(cls, x) -> Optional[_d.Dim]:
'\n :param tf.Tensor x: size tensor. has been set before via :func:`set_tag_on_size_tensor`\n '
return getattr(x, '_is_size_of_dim_tag', None)
def complete_dyn_size(self, *, template_only=False, _backend=None):
'\n In case we can calculate the dyn size, do that now.\n\n :param bool template_only:\n :param _backend:\n '
if self.is_static():
return
self._validate_in_current_graph()
if (self.dyn_size_ext and ((self.dyn_size_ext.placeholder is not None) or template_only)):
return
same_base = self.get_same_base()
op = (self.derived_from_op or same_base.derived_from_op)
if (not op):
return
for x_dim in op.inputs:
if self.batch:
x_dim = x_dim.get_for_batch_ctx(self.batch, self.control_flow_ctx)
x_dim.complete_dyn_size(template_only=template_only)
backend = _backend
if (not backend):
for x_dim in op.inputs:
if self.batch:
x_dim = x_dim.get_for_batch_ctx(self.batch, self.control_flow_ctx)
if (x_dim.dyn_size_ext and (x_dim.dyn_size_ext.raw_tensor is not None)):
backend = x_dim.dyn_size_ext._raw_backend
break
size_dtype = None
for x_dim in op.inputs:
if self.batch:
x_dim = x_dim.get_for_batch_ctx(self.batch, self.control_flow_ctx)
if x_dim.dyn_size_ext:
size_dtype = x_dim.dyn_size_ext.dtype
break
if (not size_dtype):
size_dtype = _t.Tensor.size_dtype
import numpy
import returnn.frontend as rf
import contextlib
tf = tf_util = tensor_util = None
if (backend and backend.is_tensorflow):
import tensorflow as tf
if (backend.RawTensorType == tf.Tensor):
from returnn.tf.util import basic as tf_util
from tensorflow.python.framework import tensor_util
else:
tf = None
kind = op.kind
if kind.endswith('_right'):
kind = kind[:(- len('_right'))]
if kind.endswith('_left'):
kind = kind[:(- len('_left'))]
y_name = (self.description + ':seq-length')
def _is_negative(x__):
if isinstance(x__, numpy.ndarray):
return (x__ < 0).any()
if isinstance(x__, (int, float, numpy.number)):
return (x__ < 0)
if (not tf):
return False
assert isinstance(x__, tf.Tensor)
x__ = tensor_util.constant_value(x__)
if (x__ is not None):
return _is_negative(x__)
return False
if tf:
_ctx_for_inputs = tf_util.same_control_flow_ctx
else:
@contextlib.contextmanager
def _ctx_for_inputs(_arg):
(yield)
def _bin_op_tf(a, b):
if template_only:
return None
if ((a is None) or (b is None)):
return None
assert (isinstance(a, tf.Tensor) and isinstance(b, (int, tf.Tensor)))
if (kind == 'add'):
use_relu = (_is_negative(a) or _is_negative(b))
if use_relu:
return tf.convert_to_tensor(tf_util.simplify_non_negative_seq_length((a + b)))
return (a + b)
elif (kind == 'sub'):
return tf.convert_to_tensor(tf_util.simplify_non_negative_seq_length((a - b)))
elif (kind == 'mul'):
return (a * b)
elif (kind in ('floordiv', 'truediv')):
if util.is_onnx_export_global():
return tf_util.onnx_compat_floor_div(a, b)
return (a // b)
elif (kind == 'ceildiv'):
if util.is_onnx_export_global():
return (- tf_util.onnx_compat_floor_div((- a), b))
return (- ((- a) // b))
else:
raise ValueError(('unknown op kind %r' % op.kind))
def _bin_op(a, b):
if (a is None):
if isinstance(b, int):
if ((not template_only) and backend and (not tf)):
return backend.convert_to_tensor(b, dims=(), dtype=size_dtype, name=y_name, device='cpu')
else:
y__ = _t.Tensor(name=y_name, dims=(), dtype=size_dtype)
if ((not template_only) and tf):
with tf.control_dependencies(None):
y__.raw_tensor = tf.constant(b)
return y__
elif isinstance(b, _t.Tensor):
return b.copy(name=y_name)
else:
raise TypeError(f'complete_dyn_size: _bin_op: unexpected type {type(b)}')
assert isinstance(a, _t.Tensor)
if (template_only or (not backend)):
if isinstance(b, _t.Tensor):
return _t.Tensor.get_common_data([a, b], allow_broadcast_all_sources=True)
assert isinstance(b, int)
return a.copy_template()
if tf:
if isinstance(b, _t.Tensor):
res = _t.Tensor.get_common_data([a, b], allow_broadcast_all_sources=True)
with _ctx_for_inputs(a):
a = (a.copy_compatible_to_dims(res.dims) if a.dims else a)
with _ctx_for_inputs(b):
b = (b.copy_compatible_to_dims(res.dims) if b.dims else b)
else:
assert isinstance(b, int)
res = a.copy_template()
with _ctx_for_inputs([a, b]):
res.raw_tensor = _bin_op_tf(a.raw_tensor, (b.raw_tensor if isinstance(b, _t.Tensor) else b))
return res
if (kind == 'add'):
return _relu(rf.combine_bc(a, 'add', b))
elif (kind == 'sub'):
return _relu(rf.combine_bc(a, 'sub', b))
elif (kind == 'mul'):
return rf.combine_bc(a, 'mul', b)
elif (kind in ('floordiv', 'truediv')):
return rf.combine_bc(a, 'floordiv', b)
elif (kind == 'ceildiv'):
return (- rf.combine_bc((- a), 'floordiv', b))
else:
raise ValueError(('unknown op kind %r' % op.kind))
def _relu(a):
if isinstance(a, _t.Tensor):
return rf.relu(a)
elif isinstance(a, int):
return max(a, 0)
else:
raise TypeError(f'complete_dyn_size: _relu: unexpected type {type(a)}')
y: Optional[_t.Tensor] = None
y_max_value: Optional[_t.Tensor] = None
inputs = list(op.inputs)
assert inputs
for x_dim in inputs:
x_dim: Dim
if self.batch:
x_dim = x_dim.get_for_batch_ctx(self.batch, self.control_flow_ctx)
x_dim.complete_dyn_size(template_only=template_only, _backend=backend)
if ((not x_dim.dyn_size_ext) and (not x_dim.dimension)):
return
y = _bin_op(y, (x_dim.dimension or x_dim.dyn_size_ext))
if ((not template_only) and (y.raw_tensor is not None)):
y_max_value = _bin_op(y_max_value, x_dim.get_dim_value_tensor())
assert y, f'op {op}?'
if self.dyn_size_ext:
assert (self.dyn_size_ext.dim_tags == y.dim_tags)
if y.batch:
if self.batch:
assert (self.batch == y.batch)
else:
self.batch = y.batch
self.dyn_size_ext = y
if ((not template_only) and y_max_value):
assert (y_max_value and (y_max_value.raw_tensor is not None))
self._dyn_size_max_value = y_max_value
if (tf and (y.placeholder is not None)):
self.set_tag_on_size_tensor(y.placeholder)
_SimpleEquality = False
def is_equal(self: Dim, other: Dim, *, ignore_feature_dim=False, allow_same_feature_dim=False, allow_same_spatial_dim=None, treat_feature_as_spatial=False, broadcast_matches=False, unknown_spatial_matches=False, undefined_matches=False, derived_matches=False, allow_old_behavior=False) -> bool:
'\n Compares self to other for equality.\n\n Note that the default behavior is very restrictive.\n Use functions such as :func:`get_all_dimension_tags` or :func:`get_existing_tag_from_collection`\n to explicitly specify the behavior for the comparison.\n\n Also note that the definition is slightly ad-hoc for some cases,\n and might potentially change in the future.\n https://github.com/rwth-i6/returnn/issues/634\n\n :param Dim other:\n :param bool ignore_feature_dim:\n :param bool allow_same_feature_dim:\n :param bool|None allow_same_spatial_dim:\n :param bool treat_feature_as_spatial:\n :param bool broadcast_matches:\n :param bool unknown_spatial_matches:\n :param bool undefined_matches:\n :param bool derived_matches:\n :param bool allow_old_behavior: useful e.g. for find_matching_dim_map\n '
if (self is other):
return True
if (not isinstance(other, _d.Dim)):
return False
if (self.special or other.special):
return False
if (allow_same_spatial_dim is None):
allow_same_spatial_dim = allow_same_feature_dim
self_base = (self.get_same_derived_base(same_dim=True) if derived_matches else self.get_same_base())
other_base = (other.get_same_derived_base(same_dim=True) if derived_matches else other.get_same_base())
if (self_base is other_base):
return True
self_kind = self.kind
other_kind = other.kind
if ((self_kind == other_kind == DimTypes.Feature) and ignore_feature_dim):
return True
if treat_feature_as_spatial:
if ((self_kind == DimTypes.Feature) or (not self_kind)):
self_kind = DimTypes.Spatial
if ((other_kind == DimTypes.Feature) or (not other_kind)):
other_kind = DimTypes.Spatial
if (self.dimension != other.dimension):
if (broadcast_matches and (((self.dimension == 1) and self.auto_generated) or ((other.dimension == 1) and other.auto_generated))):
pass
else:
return False
if (self_kind != other_kind):
return False
if (self_kind == other_kind == DimTypes.Batch):
return True
if (self._SimpleEquality and (not allow_old_behavior)):
if ((not self.auto_generated) or (not other.auto_generated)):
if (broadcast_matches and (((self.dimension == 1) and self.auto_generated) or ((other.dimension == 1) and other.auto_generated))):
pass
else:
return False
if (self_kind == other_kind == DimTypes.Feature):
if allow_same_feature_dim:
return True
if (self_kind == other_kind == DimTypes.Spatial):
if allow_same_spatial_dim:
if (self.dimension is not None):
return True
if (broadcast_matches and ((self.dimension == 1) or (other.dimension == 1))):
return True
if (unknown_spatial_matches and ((self.dyn_size is None) or (other.dyn_size is None))):
return True
if (undefined_matches and (self.undefined or other.undefined)):
return True
if (self.auto_generated and other.auto_generated and (self.description == other.description)):
return True
return False
def __eq__(self: Dim, other: Dim) -> bool:
'\n :param other:\n :return: :func:`is_equal` with default options\n '
if (self is other):
return True
if (not isinstance(other, _d.Dim)):
return False
if self._SimpleEquality:
return self._eq_simple(other)
return self.is_equal(other)
def _eq_simple(self: Dim, other: Dim) -> bool:
if (self is other):
return True
if (not isinstance(other, _d.Dim)):
return False
while (self._extra and self._extra.same_as):
self = self._extra.same_as
while (other._extra and other._extra.same_as):
other = other._extra.same_as
if (self is other):
return True
if (self._extra and other._extra):
self_extra = self._extra
other_extra = other._extra
if (self_extra.kind == other_extra.kind == DimTypes.Batch):
return True
if (self_extra.auto_generated and other_extra.auto_generated and (self.description == other.description)):
return True
return False
def __ne__(self: Dim, other: Dim) -> bool:
'\n :param other:\n '
if (self is other):
return False
if (not isinstance(other, _d.Dim)):
return True
if self._SimpleEquality:
return (not self._eq_simple(other))
return (not self.is_equal(other))
def _ne_simple(self: Dim, other: Dim) -> bool:
return (not self._eq_simple(other))
def _ne_generic(self: Dim, other: Dim) -> bool:
return (not self.is_equal(other))
def __hash__(self):
'\n :rtype: int\n :return: hash, matching to :func:`__eq__`\n '
self_extra = self._extra
while (self_extra and self_extra.same_as):
self = self_extra.same_as
self_extra = self._extra
if self_extra:
if self_extra.special:
return hash(id(self))
if (self_extra.kind == DimTypes.Batch):
return hash(())
if self_extra.auto_generated:
return hash((self_extra.kind, self.size, self.name))
return hash(id(self))
def __lt__(self: Dim, other: Dim):
'\n Define some order. This is just such that `sorted` works, or some diff reporting, or so.\n It is on symbolic level, i.e. it does not consider the actual dimension value.\n The defined order somewhat arbitrary, so do not rely on the exact behavior,\n as this might change at some later point.\n Currently, it depends on the creation index.\n\n :param Dim other:\n :rtype: bool\n '
if (not isinstance(other, (_d.Dim, _m.MarkedDim))):
raise TypeError(('cannot compare %r with %r' % (self, other)))
if (self == other):
return False
return (dim_cmp_value(self) < dim_cmp_value(other))
def __gt__(self, other):
'\n See :func:`__lt__`.\n\n :param Dim other:\n :rtype: bool\n '
return (other < self)
def __ge__(self, other):
return (not (self < other))
def __le__(self, other):
return (not (self > other))
def get_same_base(self: _d.Dim) -> _d.Dim:
'\n :return: same base\n '
if (not self._extra):
return self
base = self
while base.same_as:
base = base.same_as
return base
def get_same_derived_base(self: _d.Dim, *, same_dim: bool=False) -> _d.Dim:
'\n :param same_dim: if True, return the last base which has the same dimension.\n The derived base might have a different dimension.\n In case it is dynamic, the dimension is None, so it is always the same.\n In case it is static, there might be a different dimension.\n :return: same base, but also consider derived_from_...\n '
last_base_self_dim = self
base = self
visited = {}
while (base.same_as or base.derived_from_tag):
assert (id(base) not in visited)
visited[id(base)] = base
if base.same_as:
base = base.same_as
continue
base = base.derived_from_tag
assert base
if (base.dimension == self.dimension):
last_base_self_dim = base
return (last_base_self_dim if same_dim else base)
def get_derived_bases_set(self):
'\n :rtype: set[Dim]\n '
res = set()
queue = [self]
visited = {}
while queue:
base = queue.pop((- 1))
if base.same_as:
base = base.same_as
if (id(base) in visited):
continue
visited[id(base)] = base
res.add(base)
if base.derived_from_op:
queue.extend(base.derived_from_op.inputs)
elif base.derived_from_tag:
queue.append(base.derived_from_tag)
return res
@property
def undefined(self: _d.Dim) -> bool:
'\n :return: whether the undefined flag is set, in self, bases, or any derived bases. also see :func:`is_dim_known`\n '
base = self
visited = {}
while (base.same_as or base.derived_from_tag):
assert (id(base) not in visited)
visited[id(base)] = base
if (base._extra and base._extra.undefined):
return True
if base.same_as:
base = base.same_as
continue
base = base.derived_from_tag
assert base
return (base._extra and base._extra.undefined)
def declare_same_as(self: _d.Dim, other: _d.Dim):
'\n :param other:\n '
assert (self.can_be_used_as_dim() and other.can_be_used_as_dim())
if (self is other):
return
self._maybe_update()
self._validate_in_current_graph()
other._validate_in_current_graph()
other_same_base = other.get_same_base()
if ((self is other_same_base) or (self.same_as is other_same_base)):
return
self_same_as = self.get_same_base()
if (self_same_as is other_same_base):
return
if (other_same_base.get_same_derived_base() is self_same_as):
with util.guard_infinite_recursion(_d.Dim.declare_same_as, other, self):
return other.declare_same_as(self)
if self.batch:
other_ = other.get_for_batch_ctx(self.batch, ctx=self.control_flow_ctx)
else:
other_ = other
if ((self.is_dim_known() and (not other_.is_dim_known())) or (self_same_as.derived_from_op and (not other_same_base.derived_from_op) and (other not in self.get_derived_bases_set())) or ((not self.undefined) and other_.undefined)):
with util.guard_infinite_recursion(_d.Dim.declare_same_as, other, self):
return other.declare_same_as(self)
other_derived_bases = other.get_derived_bases_set()
self_derived_bases = self.get_derived_bases_set()
if ((other_derived_bases != self_derived_bases) and self_derived_bases.issubset(other_derived_bases)):
with util.guard_infinite_recursion(_d.Dim.declare_same_as, other, self):
return other.declare_same_as(self)
if self._extra:
self._extra.derived_from_op = None
self._extra.derived_from_tag = None
if (self_same_as is not self):
assert (not self_same_as.same_as)
if (self_same_as is other_same_base):
return
with util.guard_infinite_recursion(_d.Dim.declare_same_as, self_same_as, other_same_base):
self_same_as.declare_same_as(other_same_base)
if (((self.dyn_size_ext is None) or (not self._validate_in_current_graph())) and self_same_as.dyn_size_ext):
self.dyn_size_ext = self_same_as.get_dyn_size_ext_for_batch_ctx(self.batch, self.control_flow_ctx, template_only=True)
elif self._extra:
for dim_ in self._extra.same_for_batch_ctx.values():
if dim_._extra:
dim_._extra.derived_from_op = None
dim_._extra.derived_from_tag = None
if other_same_base.derived_from_op:
for dim_ in ([self_same_as, self] + (list(self._extra.same_for_batch_ctx.values()) if self._extra else [])):
dim_.reset_raw()
other_same_base._merge_same_for_batch_ctx_dict(self)
if self_same_as._extra:
for (k, v) in self_same_as._extra.cache_dim_math.items():
other_same_base._extra.cache_dim_math.setdefault(k, v)
self_same_as._extra.cache_dim_math.clear()
other._maybe_update()
self.same_as = other_same_base
self._maybe_update()
if ((self.dyn_size is not None) and (other_same_base.dyn_size is not None)):
if (self.dyn_size is not other_same_base.dyn_size):
if ((self.batch == other_same_base.batch) and (self.control_flow_ctx == other_same_base.control_flow_ctx)):
print(('Warning: assuming dim tags are same with different size placeholders: %r vs %r' % (self.dyn_size, other_same_base.dyn_size)))
if ((other_same_base.dyn_size is not None) and self._extra and self._extra.src_data):
assert isinstance(self._extra.src_axis, int)
tag = self._extra.src_data.get_dim_tag(self._extra.src_axis)
if ((tag.description == self.description) and ((not tag.dyn_size_ext) or (not tag._validate_in_current_graph()))):
tag.dyn_size_ext = self.get_dyn_size_ext_for_batch_ctx(tag.batch, tag.control_flow_ctx, template_only=True)
tag._maybe_update()
if ((self.dyn_size is not None) and (other_same_base.dyn_size is not self.dyn_size)):
if ((other_same_base.dyn_size is None) or (not other_same_base._validate_in_current_graph())):
other_same_base.dyn_size_ext = self.get_dyn_size_ext_for_batch_ctx(other_same_base.batch, other_same_base.control_flow_ctx, template_only=True)
other_same_base._maybe_update()
if ((not self.dyn_size_ext) or (not self._validate_in_current_graph())):
self.dyn_size_ext = other_same_base.get_dyn_size_ext_for_batch_ctx(self.batch, self.control_flow_ctx, template_only=True)
self._maybe_update()
elif ((other_same_base.dyn_size_ext is None) or (not other_same_base._validate_in_current_graph())):
other_same_base.dyn_size_ext = self.get_dyn_size_ext_for_batch_ctx(other_same_base.batch, other_same_base.control_flow_ctx, template_only=True)
other_same_base._maybe_update()
if (self.is_dim_known() and other.is_dim_known()):
assert (self.dimension == other.dimension)
elif (self.is_dim_known() and (not other.is_dim_known())):
other.capacity = self.capacity
other.size = self.size
elif ((not self.is_dim_known()) and other.is_dim_known()):
self.capacity = other.capacity
self.size = other.size
if (self.vocab and (not other_same_base.vocab)):
other_same_base.vocab = self.vocab
elif (other_same_base.vocab and (not self.vocab)):
self.vocab = other_same_base.vocab
self._make_extra()
self_same_as._make_extra()
self._extra.auto_generated = self_same_as._extra.auto_generated = other_same_base.auto_generated
if (not self_derived_bases.issuperset(other_derived_bases)):
if (self.derived_from_op and (not other_same_base.derived_from_op)):
other_same_base._make_extra().derived_from_op = self.derived_from_op
elif (other_same_base.derived_from_op and (not self.derived_from_op)):
self._make_extra().derived_from_op = other_same_base.derived_from_op
if (self._extra and other_same_base.is_static()):
self._extra.batch = None
self._extra.control_flow_ctx = None
for (key, dim_) in self._extra.same_for_batch_ctx.items():
dim_extra = dim_._extra
if dim_extra:
dim_extra.batch = None
dim_extra.control_flow_ctx = None
if self.batch:
self_ = self.get_for_batch_ctx(batch=self.batch, ctx=self.control_flow_ctx)
if (self_ is not self):
self.control_flow_ctx = self_.control_flow_ctx
self.dyn_size_ext = self_.dyn_size_ext
def _merge_same_for_batch_ctx_dict(self: _d.Dim, other: _d.Dim):
'\n :param other:\n '
if ((not self._extra) and (not other._extra)):
return
self._validate_in_current_graph()
if self._extra:
for (_, dim) in list(self._extra.same_for_batch_ctx.items()):
assert isinstance(dim, _d.Dim)
dim._validate_in_current_graph()
if other._extra:
for (key, dim) in other._extra.same_for_batch_ctx.items():
if (not dim._validate_in_current_graph()):
continue
self_dim = self._make_extra().same_for_batch_ctx.get(key, None)
if (self_dim and (self_dim.dyn_size_ext or (not dim.dyn_size_ext))):
continue
if (not dim.dyn_size_ext):
continue
self._extra.same_for_batch_ctx[key] = dim
other._extra.same_for_batch_ctx.clear()
def derive_from(self: _d.Dim, base: _d.Dim, *, set_derived_from_flag: bool=True):
'\n :param base: dim\n :param set_derived_from_flag:\n '
self_base = self.get_same_base()
self_base_extra = self_base._make_extra()
if set_derived_from_flag:
if self_base_extra.derived_from_tag:
assert (self_base_extra.derived_from_tag == base)
else:
self_base_extra.derived_from_tag = base
if (self.is_dynamic() or (not self.is_dim_known())):
if ((not self.batch) and base.batch):
self.batch = base.batch
self.control_flow_ctx = base.control_flow_ctx
key = (base.batch, base.control_flow_ctx)
assert (key not in self_base_extra.same_for_batch_ctx)
self_base_extra.same_for_batch_ctx[key] = self
if (not self.dyn_size_ext):
if base.dyn_size_ext:
if (base.batch and (base.batch == self.batch) and (base.control_flow_ctx == self.control_flow_ctx)):
self.dyn_size_ext = base.dyn_size_ext.copy_template(name=('%s:size' % self_base.description))
elif base.is_batch_dim():
self.dyn_size_ext = _t.Tensor(name=('%s:batch' % self_base.description), shape=(), dtype='int32', batch_dim_axis=None)
def copy_from(self: Dim, other: Dim):
'define'
self.size = other.size
self.capacity = other.capacity
self.dyn_size_ext = other.dyn_size_ext
self.derive_from(other)
self._make_extra().copy_same_as = other
@classmethod
def get_existing_tag_from_collection(cls, other, tags, is_equal_opts=None):
'\n :param Dim other:\n :param list[Dim]|tuple[Dim]|set[Dim] tags:\n :param dict[str]|None is_equal_opts: passed to Dim.is_equal\n :rtype: Dim|None\n '
if (is_equal_opts is None):
is_equal_opts = {}
rounds = [{}]
if is_equal_opts:
if ('broadcast_matches' in is_equal_opts):
rounds.append({k: v for (k, v) in is_equal_opts.items() if (k != 'broadcast_matches')})
rounds.append(is_equal_opts)
for _is_equal_opts in rounds:
for _tag in tags:
if _tag.is_equal(other, **_is_equal_opts):
return _tag
return None
@classmethod
def get_all_dimension_tags(cls, data_list, is_equal_opts=None, unique_separate_axes=True):
'\n :param list[_t.Tensor] data_list:\n :param dict[str]|None is_equal_opts: passed to Dim.is_equal\n :param bool unique_separate_axes: e.g. data_list=[Data with shape (B,5,5,10)] results in 4 dim tags, not 3.\n :return: list of dimension tags, dict for data -> list of dimension tags (for each axis)\n :rtype: (list[Dim], util.DictRefKeys[_t.Tensor, list[Dim]])\n '
tags = []
data_axes_dict = util.DictRefKeys()
for data in data_list:
data_axes_dict[data] = []
existing_tag_collection_for_data = (list(tags) if unique_separate_axes else tags)
for axis in range(data.batch_ndim):
tag = data.get_dim_tag(axis)
existing_tag = cls.get_existing_tag_from_collection(tag, tags=existing_tag_collection_for_data, is_equal_opts=is_equal_opts)
if existing_tag:
if unique_separate_axes:
existing_tag_collection_for_data.remove(existing_tag)
replace_existing = (existing_tag.undefined and (not tag.undefined) and (tag.dimension == existing_tag.dimension))
if replace_existing:
tags[tags.index(existing_tag)] = tag
for (_, dims_) in data_axes_dict.items():
dims_[:] = [(tag if (d == existing_tag) else d) for d in dims_]
existing_tag = tag
else:
tags.append(tag)
data_axes_dict[data].append((existing_tag or tag))
return (tags, data_axes_dict)
@classmethod
def get_uniq_collection(cls, tags, is_equal_opts=None):
'\n :param list[Dim]|tuple[Dim]|set[Dim] tags:\n :param dict[str]|None is_equal_opts: passed to Dim.is_equal\n :rtype: list[Dim]\n '
res = []
for tag in tags:
ex = cls.get_existing_tag_from_collection(tag, res, is_equal_opts=is_equal_opts)
if (not ex):
res.append(tag)
return res
def get_size_tensor(self) -> _t.Tensor:
'\n :return: size tensor, or dyn_size_ext if defined\n :rtype: _t.Tensor\n '
if self.dyn_size_ext:
return self.dyn_size_ext
import returnn.frontend as rf
assert (self.size is not None)
return rf.convert_to_tensor(self.size, name=('%s:size' % self.description), device='cpu')
def get_dim_value(self) -> Union[(int, _t.RawTensorType)]:
'\n Infers the dim this axis should have if unbroadcasted.\n If `self.src_data` has a placeholder, will use the shape from there.\n Otherwise, uses `self.dimension` (if static) or `self.dyn_size` (if dynamic).\n\n :return: max(size or dyn_size)\n '
res = self.get_dim_value_tensor()
if isinstance(res, _t.Tensor):
assert (res.dims == ())
assert (res.raw_tensor is not None)
return res.raw_tensor
assert isinstance(res, int)
return res
def get_dim_value_tensor(self: Dim) -> Union[(int, _t.Tensor)]:
'\n Infers the dim this axis should have if unbroadcasted.\n If `self.src_data` has a placeholder, will use the shape from there.\n Otherwise, uses `self.dimension` (if static) or `self.dyn_size` (if dynamic).\n\n :return: max(size or dyn_size)\n '
import returnn.frontend as rf
if (self.dimension is not None):
return self.dimension
if (self._dyn_size_max_value is not None):
assert (self._dyn_size_max_value.raw_tensor is not None)
return self._dyn_size_max_value
if (self._extra and self._extra.src_data and (self._extra.src_axis is not None) and (self._extra.src_data.placeholder is not None)):
res = self._extra.src_data.get_dim(self._extra.src_axis)
if isinstance(res, int):
return res
res = _t.Tensor(f'{self._extra.src_data}:shape[{self._extra.src_axis}]', dims=(), dtype=rf.get_default_array_index_dtype(), raw_tensor=res)
self._dyn_size_max_value = res
return res
self.complete_dyn_size()
if (self._dyn_size_max_value is not None):
return self._dyn_size_max_value
if (self.dyn_size_ext and (self.dyn_size_ext.placeholder is not None)):
if (self.dyn_size_ext.batch_ndim > 0):
res = rf.reduce_max(self.dyn_size_ext, axis=self.dyn_size_ext.dim_tags, use_mask=False)
else:
res = self.dyn_size_ext.copy()
assert (res.raw_tensor is not None)
self._dyn_size_max_value = res
return res
if self.is_batch_dim():
res = None
if (self._extra and self._extra.src_data):
res = self._extra.src_data.get_batch_dim()
elif self.batch:
res = self.batch.dim
if isinstance(res, int):
return res
if (res is not None):
return _t.Tensor('batch', dims=(), dtype=rf.get_default_array_index_dtype(), raw_tensor=res)
raise Exception(('%s: need placeholder, self.dimension or self.dyn_size for dim value' % self))
def axis_split_info(self):
'\n :return: axis split info. see :func:`get_param_axes_split_info` and usage (e.g. pretraining)\n :rtype: list[int|None]\n '
same_base = self.get_same_base()
op = (self.derived_from_op or same_base.derived_from_op)
if (not op):
return [self.dimension]
if (op.kind == 'add'):
return sum([x.axis_split_info() for x in op.inputs], [])
if (op.kind == 'mul'):
res = [1]
for x in op.inputs:
res = sum([((n * x.axis_split_info()) if (n is not None) else None) for n in res], [])
return res
return [self.dimension]
def _get_same_base_extra(self) -> Optional[_DimExtra]:
if (not self._extra):
return None
base = self.get_same_base()
return base._extra
def _make_extra(self: _d.Dim) -> _DimExtra:
if (not self._extra):
self._extra = _DimExtra(dim=self)
return self._extra
@property
def vocab(self):
'\n :rtype: returnn.datasets.util.vocabulary.Vocabulary|None\n '
extra = self._get_same_base_extra()
if extra:
return extra.vocab
return None
@vocab.setter
def vocab(self, vocab):
'\n :param returnn.datasets.util.vocabulary.Vocabulary|None vocab:\n '
if (vocab is self.vocab):
return
if self.same_as:
self.get_same_base().vocab = vocab
return
extra = self._get_same_base_extra()
if extra:
extra.vocab = vocab
def __add__(self: Dim, other):
'\n :param Dim|int other:\n :return: self + other. note that this is not commutative, i.e. different from other + self.\n :rtype: Dim\n '
cache_key = ('add', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_add_sub_(other, kind='add', right=True)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def __radd__(self: Dim, other):
'\n :param Dim|int other:\n :return: other + self\n :rtype: Dim\n '
cache_key = ('add_left', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_add_sub_(other, kind='add', right=False)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def __sub__(self, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
return self.sub_right(other)
def sub_right(self: Dim, other):
'\n :param Dim|int other:\n :return: self - other\n :rtype: Dim\n '
cache_key = ('sub', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_add_sub_(other, kind='sub', right=True)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def sub_left(self: Dim, other):
'\n :param Dim|int other:\n :return: (-other) + self\n :rtype: Dim\n '
cache_key = ('sub_left', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_add_sub_(other, kind='sub', right=False)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def __mul__(self: Dim, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
cache_key = ('mul', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_mul_div_(other, kind='mul', right=True)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def __rmul__(self: Dim, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
cache_key = ('mul_left', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_mul_div_(other, kind='mul', right=False)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def __floordiv__(self: Dim, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
cache_key = ('floordiv', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_mul_div_(other, kind='floordiv', right=True)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def __truediv__(self, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
return self.div_right(other)
def div_left(self: Dim, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
cache_key = ('truediv_left', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_mul_div_(other, kind='truediv', right=False)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def div_right(self: Dim, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
cache_key = ('truediv', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_mul_div_(other, kind='truediv', right=True)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def ceildiv_left(self: Dim, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
cache_key = ('ceildiv_left', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_mul_div_(other, kind='ceildiv', right=False)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def ceildiv_right(self: Dim, other):
'\n :param Dim|int other:\n :rtype: Dim\n '
cache_key = ('ceildiv', other)
cache = self.get_same_base()._make_extra().cache_dim_math
cache_entry = cache.get(cache_key, None)
if cache_entry:
cache_entry.complete_dyn_size()
return cache_entry
term = _OpLinearTerm.from_dim(self)
term.extend_mul_div_(other, kind='ceildiv', right=True)
dim = term.as_dim()
cache[cache_key] = dim
return dim
def __neg__(self):
'\n :rtype: Dim\n '
return ((- 1) * self)
def is_constant_static_dim(self) -> bool:
'\n :return: derived op of type constant\n '
return (self.derived_from_op and (self.derived_from_op.kind == 'constant'))
def _cache_dim_math_get(self: Dim, op_kind: str, operand: Union[(Dim, int)]) -> Tuple[(Dict[(Tuple[(str, Union[(Dim, int)])], Dim)], Tuple[(str, Union[(Dim, int)])], Optional[Dim])]:
same_base = self.get_same_base()
extra = same_base._make_extra()
if (isinstance(operand, _d.Dim) and operand.is_constant_static_dim()):
operand = operand.dimension
cache = extra.cache_dim_math
cache_key = (op_kind, operand)
return (cache, cache_key, cache.get(cache_key, None))
|
def _make_constant_static_dim(value, kind=None):
'\n :param int value:\n :param Entity|None kind:\n :rtype: Dim\n '
return _d.Dim(dimension=value, kind=(kind or DimTypes.Unspecified), description=('unnamed_%sdim_%i' % (((kind.name + '_') if kind else ''), value)), derived_from_op=Op(kind='constant', inputs=[], attribs={'value': value}), auto_generated=True)
|
def _math_get_dim_via_bin_op(a: Dim, b: Dim, op_kind: str) -> Dim:
assert (op_kind in {'add', 'mul'})
op_kind_ = op_kind
(a_, b_) = (a, b)
if (a.is_constant_static_dim() and (not b.is_constant_static_dim())):
op_kind_ = (op_kind + '_left')
(a_, b_) = (b, a)
(cache, cache_key, res) = a_._cache_dim_math_get(op_kind_, b_)
if res:
return res
if ((a.dimension is not None) and (b.dimension is not None)):
dim_value = getattr(operator, op_kind)(a.dimension, b.dimension)
else:
dim_value = None
res = _d.Dim(kind=_get_merged_dim_kind((a, b)), description=((_get_description(a) + {'add': '+', 'mul': '*'}[op_kind]) + _get_description(b)), dimension=dim_value, derived_from_op=Op(kind=op_kind, inputs=[a, b]), derived_from_tag=_representative_tag((a, b)))
cache[cache_key] = res
return res
|
class Op():
'\n Op on :class:`Dim` which results in a derived :class:`Dim`.\n '
def __init__(self, kind, inputs, attribs=None):
'\n :param str kind: "add", "sub", "mul", "ceildiv"\n :param list[Dim] inputs:\n :param dict[str]|None attribs:\n '
self.kind = kind
self.inputs = inputs
self.output = None
self.attribs = attribs
def __repr__(self):
attribs = ((' %r' % self.attribs) if self.attribs else '')
return ('<Dim.Op %r %s%s>' % (self.kind, self.inputs, attribs))
def _value(self):
return (self.kind, tuple(self.inputs), (frozenset(self.attribs.items()) if self.attribs else None))
def __hash__(self):
with util.guard_infinite_recursion(Op.__hash__, self):
return hash(self._value())
def __eq__(self, other):
if isinstance(other, Op):
return (self._value() == other._value())
return False
def __ne__(self, other):
return (not self.__eq__(other))
|
def _get_description(dim, brackets=True):
'\n :param Dim dim:\n :param bool brackets: add brackets when necessary\n :rtype: str\n '
if (dim.description and dim.description.startswith('unnamed_') and (dim.dimension is not None)):
return str(dim.dimension)
if dim.description:
if brackets:
import re
if re.search('[+\\-/ ]', dim.description):
return ('(%s)' % dim.description)
return dim.description
return ('unnamed_%s_dim%s' % (dim.kind, (dim.dimension if (dim.dimension is not None) else '?')))
|
class _OpMultTerm():
'\n represents sth like a * b * c\n '
@classmethod
def from_dim(cls, dim: Dim) -> _OpMultTerm:
'\n :param dim:\n :return: op mult term\n '
dim = dim.get_same_base()
if ((dim.dimension == 1) and dim.is_constant_static_dim()):
return cls.one()
if (dim.derived_from_op and (dim.derived_from_op.kind == 'mul')):
return cls(list(dim.derived_from_op.inputs))
return cls([dim])
@classmethod
def from_dim_factors(cls, dims: List[Dim]) -> _OpMultTerm:
'from dim factors'
res = cls.one()
for d in dims:
res.extend_mul_div_(d, kind='mul', right=True)
return res
@classmethod
def one(cls) -> _OpMultTerm:
'1'
return cls([])
def __init__(self, terms: List[Dim]):
self.terms = terms
def __hash__(self):
return hash(tuple(self.terms))
def __eq__(self, other):
'\n :param _OpMultTerm other:\n '
if isinstance(other, _OpMultTerm):
return (self.terms == other.terms)
return False
def __ne__(self, other):
return (not self.__eq__(other))
def __repr__(self):
return ('Dim._OpMultTerm(%r)' % (self.terms,))
@property
def dimension(self) -> Optional[int]:
'static dim or None'
dim = 1
for part in self.terms:
if (part.dimension is None):
return None
dim *= part.dimension
return dim
def base_term(self) -> Dim:
'base term (Dim)'
assert self.terms
return self.terms[(- 1)]
def is_one(self) -> bool:
'is 1'
return (not self.terms)
def is_constant_static_dim(self) -> bool:
'is constant static dim'
if (not self.terms):
return True
return all((term.is_constant_static_dim() for term in self.terms))
def copy(self) -> _OpMultTerm:
'copy'
return _OpMultTerm(list(self.terms))
def negative(self) -> _OpMultTerm:
'negative'
if (self.terms and self.terms[0].is_constant_static_dim() and (self.terms[0].dimension == (- 1))):
return _OpMultTerm(self.terms[1:])
res = self.copy()
res.extend_mul_div_(_make_constant_static_dim((- 1)), kind='mul', right=False)
return res
def divisible(self, other, right):
'\n :param Dim other:\n :param bool right:\n :return: whether we can divide other, without remainder\n :rtype: bool\n '
if (not self.terms):
return False
if (other.derived_from_op and (other.derived_from_op.kind == 'mul')):
tmp = self.copy()
for term in (other.derived_from_op.inputs if right else reversed(other.derived_from_op.inputs)):
if (not tmp.divisible(term, right=right)):
return False
tmp.extend_mul_div_(term, kind='truediv', right=right)
return True
most_recent_term = self.terms[((- 1) if right else 0)]
if (other == most_recent_term):
return True
if ((most_recent_term.dimension is not None) and (other.dimension is not None)):
if ((most_recent_term.dimension % other.dimension) == 0):
return True
return False
def can_simplify(self, other, kind, right):
'\n :param Dim other:\n :param str kind:\n :param bool right:\n :return: whether we can simplify when applying this operation\n :rtype: bool\n '
if (other.derived_from_op and (other.derived_from_op.kind == 'mul')):
tmp = self.copy()
for term in (other.derived_from_op.inputs if right else reversed(other.derived_from_op.inputs)):
if (not tmp.can_simplify(term, kind=kind, right=right)):
return False
tmp.extend_mul_div_(term, kind=kind, right=right)
return True
idx = self._simplify_term_idx(other, kind=kind, right=right)
return (idx is not None)
def _simplify_term_idx(self, other, kind, right):
'\n :param Dim other:\n :param str kind:\n :param bool right:\n :return: index of term to simplify\n :rtype: int|None\n '
if (not self.terms):
return None
if (kind == 'mul'):
for (i, term) in (reversed(list(enumerate(self.terms))) if right else enumerate(self.terms)):
assert isinstance(term, _d.Dim)
if term.derived_from_op:
if (term.derived_from_op.kind == ('truediv_' + ('right' if right else 'left'))):
if (term.derived_from_op.inputs[(- 1)] == other):
return i
if other.derived_from_op:
if (other.derived_from_op.kind == ('truediv_' + ('right' if (not right) else 'left'))):
if (other.derived_from_op.inputs[(- 1)] == term):
return i
if (term.is_constant_static_dim() and other.is_constant_static_dim()):
return i
i = ((len(self.terms) - 1) if right else 0)
term = self.terms[i]
if (kind.endswith('div') and (other == term)):
return i
op_kind = ((kind + '_') + ('right' if right else 'left'))
if (term.derived_from_op and (term.derived_from_op.kind == op_kind)):
return i
return None
def extend_mul_div_(self, other, kind, right):
'\n :param Dim other:\n :param str kind:\n :param bool right:\n '
assert (kind in {'mul', 'floordiv', 'truediv', 'ceildiv'})
if (other.is_constant_static_dim() and (other.dimension == 1)):
return
if (not self.terms):
if (kind == 'mul'):
self.terms.append(other)
elif kind.endswith('div'):
self.terms = [_OpMultTerm.new_div_dim(self.as_dim(), other, kind=kind, right=right)]
return
if (other.derived_from_op and (other.derived_from_op.kind == 'mul')):
for term in (other.derived_from_op.inputs if right else reversed(other.derived_from_op.inputs)):
self.extend_mul_div_(term, kind=kind, right=right)
return
idx = self._simplify_term_idx(other, kind=kind, right=right)
if (idx is not None):
term = self.terms[idx]
assert isinstance(term, _d.Dim)
if (kind.endswith('div') and (other == term)):
self.terms.pop(idx)
return
if ((kind == 'mul') and term.derived_from_op):
if (term.derived_from_op.kind == ('truediv_' + ('right' if right else 'left'))):
if (term.derived_from_op.inputs[(- 1)] == other):
self.terms[idx] = term.derived_from_op.inputs[0]
return
if ((kind == 'mul') and other.derived_from_op):
if (other.derived_from_op.kind == ('truediv_' + ('right' if (not right) else 'left'))):
if (other.derived_from_op.inputs[(- 1)] == term):
self.terms[idx] = other.derived_from_op.inputs[0]
return
if (term.is_constant_static_dim() and other.is_constant_static_dim()):
if (kind == 'mul'):
if ((term.dimension * other.dimension) == 1):
self.terms.pop(idx)
return
self.terms[idx] = _make_constant_static_dim((term.dimension * other.dimension), kind=term.kind)
return
if (kind.endswith('div') and ((term.dimension % other.dimension) == 0)):
self.terms[idx] = _make_constant_static_dim((term.dimension // other.dimension), kind=term.kind)
return
op_kind = ((kind + '_') + ('right' if right else 'left'))
if (kind.endswith('div') and term.derived_from_op and (term.derived_from_op.kind == op_kind)):
numerator = term.derived_from_op.inputs[0]
denominator = term.derived_from_op.inputs[1]
self.terms[idx] = _OpMultTerm.new_div_dim(numerator, (denominator * other), kind=kind, right=right)
return
if kind.endswith('div'):
self.terms = [_OpMultTerm.new_div_dim(self.as_dim(), other, kind=kind, right=right)]
return
if (kind == 'mul'):
if right:
self.terms.append(other)
else:
self.terms.insert(0, other)
return
assert False
@classmethod
def new_div_dim(cls, numerator, denominator, kind, right):
'\n :param Dim numerator:\n :param Dim denominator:\n :param str kind: "floordiv" or "ceildiv" or "truediv"\n :param bool right:\n :rtype: Dim\n '
dim_value = None
a = numerator.dimension
b = denominator.dimension
if ((a is not None) and (b is not None)):
if (kind == 'floordiv'):
dim_value = (a // b)
elif (kind == 'ceildiv'):
dim_value = (- ((- a) // b))
if (((a % b) == 0) and right):
kind = 'floordiv'
elif (kind == 'truediv'):
if ((a % b) != 0):
raise ValueError(('%s truediv %s only allowed if the result is an integer' % (numerator, denominator)))
dim_value = (a // b)
if right:
kind = 'floordiv'
else:
raise ValueError(('invalid kind %r' % (kind,)))
(cache, cache_key, res) = numerator._cache_dim_math_get((kind + ('' if right else '_left')), denominator)
if res:
return res
if ((kind == 'floordiv') and right):
description = ('%s//%s' % (_get_description(numerator), _get_description(denominator)))
elif ((kind == 'ceildiv') and right):
description = ('⌈%s/%s⌉' % (_get_description(numerator), _get_description(denominator)))
else:
description = ('%s_%s(%s, %s)' % (kind, ('right' if right else 'left'), _get_description(numerator, brackets=False), _get_description(denominator, brackets=False)))
op_kind = kind
if ((a is not None) and (b is not None) and ((a % b) == 0)):
op_kind = 'truediv'
op_kind += ('_' + ('right' if right else 'left'))
res = _d.Dim(description=description, kind=numerator.kind, dimension=dim_value, derived_from_op=Op(kind=op_kind, inputs=[numerator, denominator]), derived_from_tag=numerator)
cache[cache_key] = res
return res
def as_dim(self):
'\n :rtype: Dim\n '
if self.is_one():
return _make_constant_static_dim(1)
if (len(self.terms) == 1):
return self.terms[0]
res = self.terms[0]
for operand in self.terms[1:]:
res = _math_get_dim_via_bin_op(res, operand, 'mul')
return res
|
class _OpLinearTerm():
'\n Linear combination of :class:`_OpMultTerm`.\n Represents sth like a * b + c of :class:`Dim`.\n '
@classmethod
def from_dim(cls, dim: Dim) -> _OpLinearTerm:
'from dim'
res = cls.zero()
res.extend_add_sub_(dim, kind='add', right=True)
return res
@classmethod
def zero(cls) -> _OpLinearTerm:
'0'
return _OpLinearTerm([])
def __init__(self, terms: List[_OpMultTerm]):
self.terms = terms
def __hash__(self):
return hash(tuple(self.terms))
def __eq__(self, other):
if isinstance(other, _OpLinearTerm):
return (self.terms == other.terms)
return False
def __ne__(self, other):
return (not self.__eq__(other))
def as_dim(self) -> Dim:
'as dim'
if self.is_zero():
return _make_constant_static_dim(0)
if (len(self.terms) == 1):
return self.terms[0].as_dim()
res = self.terms[0].as_dim()
for operand in self.terms[1:]:
res = _math_get_dim_via_bin_op(res, operand.as_dim(), 'add')
return res
def __repr__(self):
return ('Dim._OpLinearTerm(%r)' % (self.terms,))
def is_zero(self):
'\n :rtype: bool\n '
return (not self.terms)
def extend_add_sub_(self, other, kind, right):
'\n :param Dim|int other:\n :param str kind: "add" or "sub"\n :param bool right: or left. right means self + other, left means other + self\n '
assert (kind in {'add', 'sub'})
other = self._make_dim(other, kind=kind)
if (other.is_constant_static_dim() and (other.dimension == 0)):
return
if (other.derived_from_op and (other.derived_from_op.kind == 'add')):
for other_ in (other.derived_from_op.inputs if right else reversed(other.derived_from_op.inputs)):
self.extend_add_sub_(other_, kind=kind, right=right)
return
term = _OpMultTerm.from_dim(other)
neg_term = term.negative()
if (kind == 'sub'):
(term, neg_term) = (neg_term, term)
most_recent_term = (self.terms[((- 1) if right else 0)] if self.terms else None)
if most_recent_term:
if (most_recent_term == neg_term):
self.terms.pop(((- 1) if right else 0))
return
if (most_recent_term.is_constant_static_dim() and term.is_constant_static_dim()):
self.terms[((- 1) if right else 0)] = _OpMultTerm.from_dim(_make_constant_static_dim((most_recent_term.dimension + term.dimension), kind=other.kind))
return
if (most_recent_term.terms and term.terms and (most_recent_term.terms[(- 1)] == term.terms[(- 1)])):
a = _OpMultTerm.from_dim_factors(most_recent_term.terms[:(- 1)]).as_dim()
b = _OpMultTerm.from_dim_factors(term.terms[:(- 1)]).as_dim()
if (a.is_constant_static_dim() and (not b.is_constant_static_dim())):
a = a.dimension
elif (b.is_constant_static_dim() and (not a.is_constant_static_dim())):
b = b.dimension
res = _OpMultTerm.from_dim(((a + b) if right else (b + a)))
res.extend_mul_div_(term.terms[(- 1)], kind='mul', right=True)
self.terms[((- 1) if right else 0)] = res
return
if right:
self.terms.append(term)
else:
self.terms.insert(0, term)
def extend_mul_div_(self, other, kind, right):
'\n :param Dim|int other:\n :param str kind: "mul" or "ceildiv"\n :param bool right: or left. right means self * other, left means other * self\n '
assert (kind in {'mul', 'floordiv', 'truediv', 'ceildiv'})
other = self._make_dim(other, kind=kind)
if ((kind == 'mul') and right):
if (not all((term.can_simplify(other, kind=kind, right=right) for term in self.terms))):
(self.terms, other) = (_OpLinearTerm.from_dim(other).terms, self.as_dim())
right = False
if (other.is_constant_static_dim() and (other.dimension == 1)):
return
if (kind.endswith('div') and (len(self.terms) >= 2)):
if any(((not term.divisible(other, right=right)) for term in self.terms)):
self.terms = [_OpMultTerm.from_dim(_OpMultTerm.new_div_dim(self.as_dim(), other, kind=kind, right=right))]
return
for term in self.terms:
term.extend_mul_div_(other, kind=kind, right=right)
def _make_dim(self, other, kind):
'\n :param Dim|int other:\n :param str kind:\n :rtype: Dim\n '
if isinstance(other, int):
base_tag = self.representative_tag()
return _make_constant_static_dim(other, kind=(base_tag.kind if base_tag else None))
elif isinstance(other, _d.Dim):
return other.get_same_base()
else:
raise TypeError(('%s %s %s invalid for type %s' % (self, kind, other, type(other))))
def representative_tag(self):
'\n :rtype: Dim|None\n '
terms = [_representative_tag(term.terms) for term in self.terms]
return _representative_tag([term for term in terms if term])
|
def _get_merged_dim_kind(dim_tags):
'\n :param list[Dim]|tuple[Dim] dim_tags:\n :return: dim kind\n :rtype: Entity\n '
if any((tag.is_batch_dim() for tag in dim_tags)):
return DimTypes.Batch
elif any((tag.is_feature_dim() for tag in dim_tags)):
return DimTypes.Feature
else:
return DimTypes.Spatial
|
def _representative_tag(terms: Sequence[Dim]) -> Optional[Dim]:
for term_ in terms:
if term_.is_dynamic_seq_length():
return term_
for term_ in terms:
if (term_.kind != DimTypes.Unspecified):
return term_
for term_ in terms:
return term_
return None
|
def dim_cmp_value(obj):
'\n :param Dim|_MarkedDim obj:\n :return: anything which can be compared\n '
if isinstance(obj, _d.Dim):
obj = obj.get_same_base()
return ('', obj.description, obj.kind, obj.dimension, (obj.dyn_size_ext.dims if (obj.dyn_size_ext is not None) else None))
if isinstance(obj, _m.MarkedDim):
return (obj.__class__.__name__, obj.tag)
return obj
|
def _behavior_version_reset_callback():
_DimMixin._SimpleEquality = False
_DimMixin.__eq__ = _DimMixin.is_equal
_DimMixin.__ne__ = _DimMixin._ne_generic
|
def _behavior_version_handle_new_min_version_callback():
if (util.BehaviorVersion.get() >= 16):
_DimMixin._SimpleEquality = True
_DimMixin.__eq__ = _DimMixin._eq_simple
_DimMixin.__ne__ = _DimMixin._ne_simple
|
def _setup():
util.BehaviorVersion.reset_callbacks.append(_behavior_version_reset_callback)
util.BehaviorVersion.handle_new_min_version_callbacks.append(_behavior_version_handle_new_min_version_callback)
_behavior_version_handle_new_min_version_callback()
|
class _TensorExtra():
def __init__(self, *, tensor: Tensor, time_dim_axis=NotSpecified, available_for_inference=True, batch=None, beam=None, control_flow_ctx=None):
'\n :param tensor:\n :param int|None|NotSpecified time_dim_axis: where we have the time dim axis, after we added the batch-dim.\n this is often 1. however, can be None if there is no time-dim.\n :param bool available_for_inference: e.g. the extern data "classes" is usually not available for inference\n :param BatchInfo|None batch:\n :param SearchBeam|None beam: the batch-dim could be extended by a beam-size,\n such that it represents the merged dims [batch, beam_size].\n :param ControlFlowContext|None control_flow_ctx:\n '
self.tensor = tensor
if (beam and batch):
assert (batch.beam == beam)
self.batch = batch
del batch
self.beam = beam
del beam
self.control_flow_ctx = control_flow_ctx
self.available_for_inference = available_for_inference
if (time_dim_axis is NotSpecified):
if (self.tensor.version >= 2):
time_dim_axis = None
elif (time_dim_axis is None):
pass
elif isinstance(time_dim_axis, int):
assert (self.tensor.version == 1)
assert (0 <= time_dim_axis < self.tensor.batch_ndim)
else:
raise TypeError(f'unexpected time_dim_axis type {type(time_dim_axis)}')
self.time_dim_axis = time_dim_axis
def __getstate__(self):
d = vars(self)
d['batch'] = None
return d
|
class _TensorMixin(_TensorMixinBase):
@staticmethod
def from_tensor(x) -> Tensor:
'\n :param tf.Tensor x:\n '
assert x.get_shape().is_fully_defined()
x_shape = x.get_shape().as_list()
return _t.Tensor(name=str(x.op.name), shape=x_shape, batch_dim_axis=None, dtype=x.dtype.name, placeholder=x)
@staticmethod
def template_from_constant(x, name, dtype=None, shape=None, with_batch_dim=False, sparse_dim=None, feature_dim=None) -> Tensor:
'\n :param int|float|bool|numpy.ndarray x: not actually assigned to the returned Data, just for the shape and dtype\n :param str name:\n :param str|None dtype:\n :param list[Dim|int]|tuple[Dim|int]|None shape: for verification, and defining dim tags.\n might also already include the batch-dim. (Then with_batch_dim is ignored.)\n :param bool with_batch_dim:\n :param Dim|None sparse_dim:\n :param Dim|None feature_dim:\n :return: data template\n '
import numpy
if (dtype is None):
if isinstance(x, bool):
dtype = 'bool'
elif isinstance(x, int):
dtype = 'int32'
elif isinstance(x, float):
dtype = 'float32'
elif isinstance(x, numpy.ndarray):
dtype = str(x.dtype)
else:
raise TypeError(('%r: cannot handle value %r of type %r' % (name, x, type(x))))
shape_ = (x.shape if isinstance(x, numpy.ndarray) else ())
if (shape is not None):
if (len(shape) > len(shape_) == 0):
pass
else:
assert (len(shape) == len(shape_)), ('%r: shape does not match in ndim, %r vs %r' % (name, shape, shape_))
else:
shape = shape_
dim_tags = []
for (i, d) in enumerate(shape):
d_ = (shape_[i] if (len(shape_) > 0) else None)
if isinstance(d, Dim):
if (len(shape_) > 0):
assert (d.dimension == d_)
elif isinstance(d, int):
if (len(shape_) > 0):
assert (d == d_)
d = Dim(kind=(Dim.Types.Spatial if (i < (len(shape) - 1)) else Dim.Types.Feature), description=('%s:static:%i' % (name, i)), auto_generated=True, dimension=d)
else:
raise TypeError(('%r shape[%i] invalid type %r in shape %r' % (name, i, type(d), shape)))
dim_tags.append(d)
if (with_batch_dim and (batch_dim not in dim_tags)):
dim_tags.insert(0, batch_dim)
return _t.Tensor(name=name, dim_tags=dim_tags, dtype=dtype, sparse_dim=sparse_dim, feature_dim=feature_dim)
def _handle_extra_kwargs(self, *, shape=None, sparse=None, dim=NotSpecified, batch_dim_axis=NotSpecified, dim_tags=None, placeholder=None, size_placeholder=None, auto_create_placeholders=False, vocab=None, same_dim_tags_as=None, **kwargs):
'\n :param shape:\n :param sparse:\n :param dim:\n :param batch_dim_axis:\n :param Sequence[Dim]|None dim_tags:\n If tuple/list, this specifies the whole (batch) shape.\n :param tf.Tensor|None placeholder: with added batch-dim\n :param dict[int,tf.Tensor]|None size_placeholder: for every dynamic dim, this will describe the size.\n The keys are the axes but counted without batch-dim, and the value is the size.\n The size is always a tensor of shape (batch,), i.e. the size can be different for each sequence in a batch.\n This is the old deprecated way. Now this is all part of :class:`Dim`.\n :param bool auto_create_placeholders: This will create a tf.placeholder.\n This is deprecated. Rather, the placeholder should be created outside and passed in.\n :param str|dict[str]|returnn.datasets.util.vocabulary.Vocabulary|None vocab: vocab of the feature dim\n or sparse dim.\n This is deprecated. Rather, the vocab is part of the :class:`Dim`.\n :param dict[int|str,Dim]|None same_dim_tags_as: will mark our dimension tags to be the same\n '
assert isinstance(self, _t.Tensor)
(shape, sparse, dim, batch_dim_axis, dim_tags)
if (vocab is not None):
from returnn.datasets.util.vocabulary import Vocabulary
if isinstance(vocab, str):
vocab = Vocabulary(vocab)
elif isinstance(vocab, dict):
vocab = Vocabulary.create_vocab(**vocab)
assert isinstance(vocab, Vocabulary)
assert self.sparse, ('%s should represent indices of %s' % (self, vocab))
assert (self.dim == vocab.num_labels), ('%s dims do not match with vocab %s' % (self, vocab))
self.sparse_dim.vocab = vocab
if kwargs:
self._extra = _TensorExtra(tensor=self, **kwargs)
if size_placeholder:
self.size_placeholder = size_placeholder
if same_dim_tags_as:
for (_axis, _dim_tag) in sorted(same_dim_tags_as.items()):
_axis = self.get_axis_from_description(_axis)
assert isinstance(_dim_tag, Dim)
base_tag = self._dims[_axis]
if (base_tag != _dim_tag):
base_tag.declare_same_as(_dim_tag)
self._dims = ((self._dims[:_axis] + (_dim_tag,)) + self._dims[(_axis + 1):])
if (placeholder is not None):
self.raw_tensor = placeholder
elif auto_create_placeholders:
from returnn.tf.frontend_low_level._backend import TFBackend
self.raw_tensor = TFBackend.create_placeholder_raw(self)
_auto_create_size_placeholders_on_dim_tags(name=self.name, dim_tags=self._dims)
self._adapt_batch_consistent_dim_tags()
self.sanity_check(assume_complete=False)
@property
def _raw_backend(self) -> Optional[Type[Backend]]:
'\n :return: the backend for the raw tensor\n '
import returnn.frontend._backend as _backend_api
if (self._raw_tensor is None):
return None
return _backend_api.get_backend_by_raw_tensor_type(type(self._raw_tensor))
@property
def control_flow_ctx(self) -> Optional[ControlFlowContext]:
'\n :return: control flow ctx (graph-based)\n '
if (not self._extra):
return None
return self._extra.control_flow_ctx
@control_flow_ctx.setter
def control_flow_ctx(self, value: Optional[ControlFlowContext]):
if (value == self.control_flow_ctx):
return
self._make_extra().control_flow_ctx = value
@property
def available_for_inference(self) -> bool:
'\n :return: available for inference\n '
if (not self._extra):
return True
return self._extra.available_for_inference
@available_for_inference.setter
def available_for_inference(self, value: bool):
if (value == self.available_for_inference):
return
self._make_extra().available_for_inference = value
def _make_extra(self: Tensor) -> _TensorExtra:
if (not self._extra):
self._extra = _TensorExtra(tensor=self)
return self._extra
def sanity_check(self, ignore_placeholder=False, assume_complete=True):
'\n Performs some sanity checks on self, and raises exceptions if something is not sane.\n\n :param bool ignore_placeholder:\n :param bool assume_complete:\n '
special_axes_dict = {'time_dim_axis': self.time_dim_axis, 'feature_dim_axis': self.feature_dim_axis}
batch_dim_axis = self.batch_dim_axis
batch_ndim = self.batch_ndim
for (axis_name, axis) in special_axes_dict.items():
assert ((axis is None) or (0 <= axis < batch_ndim)), ('%s: axis %s (%i) invalid' % (self, axis_name, axis))
if (batch_dim_axis is not None):
for (axis_name, axis) in special_axes_dict.items():
assert (axis != batch_dim_axis), ('%s: axis %s (%i) must be different from batch_dim_axis (%i)' % (self, axis_name, axis, batch_dim_axis))
if (self.sparse_dim is not None):
assert (special_axes_dict['feature_dim_axis'] is None), ('%s: If sparse, there cannot be a feature dim axis.' % self)
for (axis, tag) in enumerate(self._dims):
if tag.is_batch_dim():
assert (axis == batch_dim_axis), ('%s: invalid %s' % (self, tag))
continue
if (tag.batch and self.batch):
assert ((tag.batch == self.batch) or self.batch.is_broadcast())
if tag.dyn_size_ext:
assert (tag.dyn_size_ext.dtype in {'int32', 'int64'})
if tag.dyn_size_ext.have_batch_axis():
assert (tag.batch == tag.dyn_size_ext.batch)
if ((not ignore_placeholder) and (self._raw_tensor is not None)):
backend = self._raw_backend
raw_shape = backend.get_known_shape_raw(self._raw_tensor)
assert (len(raw_shape) == batch_ndim), f'Mismatching shape ndim: Raw tensor {raw_shape} vs Tensor {self}'
for i in range(batch_ndim):
if (self._dims[i].dimension is None):
continue
if (raw_shape[i] != self._dims[i].dimension):
raise Exception((f'''Mismatching shape: Raw tensor {raw_shape} vs Tensor {self};
''' + backend.format_graph_output(self._raw_tensor, max_depth=3)))
backend.set_known_shape_raw(self._raw_tensor, self.batch_shape)
assert (backend.get_dtype_name_raw(self._raw_tensor) == self.dtype), f'{self} dtype {self.dtype} does not match raw tensor dtype {backend.get_dtype_name_raw(self._raw_tensor)}'
if assume_complete:
for tag in self._dims:
if tag.is_batch_dim():
continue
if tag.is_dynamic():
assert tag.dyn_size_ext, ('%s sanity_check: dynamic dim %s undefined' % (self, tag))
if (not ignore_placeholder):
if (tag.dyn_size_ext.placeholder is None):
tag.complete_dyn_size()
if (self.placeholder is not None):
assert (tag.dyn_size_ext.placeholder is not None), ('%s sanity_check: dynamic dim %s value unknown' % (self, tag))
assert tag.is_dim_known()
def get_runtime_sanity_check_op(self: Tensor):
'\n :return: op which does a couple of runtime sanity checks on the placeholder\n :rtype: tensorflow.Operation|Any\n '
assert (self._raw_tensor is not None)
return self._raw_backend.runtime_sanity_checks(self)
def verify_out_shape(self, out_shape, allow_missing_implicit_dims=False):
"\n Verifies that ``out_shape`` matches our shape, i.e. specifically the dim tags.\n https://github.com/rwth-i6/returnn/issues/706\n Throws an exception if this is not the case.\n\n :param set[Dim|_MarkedDim]|tuple|list out_shape:\n It must be a set, with the only exception when it is empty (then it doesn't matter).\n See :func:`dim_tags_set`.\n :param bool allow_missing_implicit_dims:\n "
actual_dims_str = ('{%s}' % ', '.join([str(d) for d in (list(self.dim_tags) + sorted(self.dim_tags_set_implicit_only_wrapped))]))
expected_dims_str = ('{%s}' % ', '.join([str(d) for d in sorted(out_shape)]))
self_dim_tags = self.dim_tags_set_implicit
self_dim_tags_implicit_only = self.dim_tags_set_implicit_only_wrapped
if (not out_shape):
if self_dim_tags:
raise VerifyOutShapeException((('%s verify_out_shape:\n' % self) + ('Actual dims: %s\nExpected empty out_shape: %s' % (actual_dims_str, expected_dims_str))))
return
if (not isinstance(out_shape, set)):
raise TypeError(('%s verify_out_shape: expects a set but got %s' % (self, type(out_shape))))
remaining = set(self_dim_tags)
for dim in out_shape:
if isinstance(dim, Dim):
dim_tag = dim
elif isinstance(dim, _m.ImplicitDim):
dim_tag = dim.tag
if (dim not in self_dim_tags_implicit_only):
raise VerifyOutShapeException(('%s verify_out_shape:\nActual dims: %s\nExpected out_shape: %s\n%s is not an implicit dim in self' % (self, actual_dims_str, expected_dims_str, dim)))
elif isinstance(dim, _m.OptionalDim):
dim_tag = dim.tag
if (dim_tag not in remaining):
continue
else:
raise TypeError(('%s verify_out_shape with out_shape %s: expect dim tags but got %s' % (self, out_shape, type(dim))))
if (dim_tag not in remaining):
if (dim_tag in self_dim_tags):
raise VerifyOutShapeException(((('%s verify_out_shape does not match:\n' % self) + ('Actual dims: %s\nExpected out_shape: %s\n' % (actual_dims_str, expected_dims_str))) + ('Dim %s multiple times in out_shape' % dim)))
raise VerifyOutShapeException(((('%s verify_out_shape:\n' % self) + ('Actual dims: %s\nExpected out_shape: %s\n' % (actual_dims_str, expected_dims_str))) + ('Dim %s not in self' % dim)))
remaining.discard(dim_tag)
if remaining:
if (allow_missing_implicit_dims and remaining.issubset(self.dim_tags_set_implicit_only)):
pass
else:
raise VerifyOutShapeException(((('%s verify_out_shape missing dims:\n' % self) + ('Actual dims: %s\nExpected out_shape: %s\n' % (actual_dims_str, expected_dims_str))) + ('Missing dims: %s' % ', '.join(map(str, sorted(remaining))))))
def get_placeholder_kwargs(self, with_batch=True):
'\n :param bool with_batch:\n :return: kwargs for tf.compat.v1.placeholder\n :rtype: dict[str]\n '
return dict(name=self.name, dtype=self.dtype, shape=(self.batch_shape if with_batch else self.shape))
def get_axes_with_size(self):
'\n :return: list of axes which can vary in size for each entry of the batch-dim, e.g. the time-dim-axis.\n The axis index is counted without the batch-dim.\n :rtype: list[int]\n '
return [i for (i, dim) in enumerate(self.shape) if (dim is None)]
def get_kwargs(self, *, include_special_axes=True):
'\n :param bool include_special_axes: whether to include time and feature special axis marker\n :return: relevant attrib items for copying\n :rtype: dict[str]\n '
keys = ['name', 'dims', 'dtype']
if include_special_axes:
if ((self.version <= 1) and (self.time_dim_axis_or_unspecified is not NotSpecified)):
keys += ['time_dim_axis']
if (self.feature_dim_axis_or_unspecified is not NotSpecified):
keys += ['feature_dim_axis']
if self.sparse_dim:
keys += ['sparse_dim']
if (self.version == 1):
keys += ['version']
if self._extra:
if (self.batch is not None):
keys += ['batch']
if (self.beam is not None):
keys += ['beam']
if self.control_flow_ctx:
keys += ['control_flow_ctx']
if (not self.available_for_inference):
keys += ['available_for_inference']
return {key: getattr(self, key) for key in keys}
def get_description(self, with_name=True, with_placeholder=False, catch_exceptions=False):
'\n :param bool with_name:\n :param bool with_placeholder:\n :param bool catch_exceptions:\n :return: description of self. also used for __repr__\n :rtype: str\n '
keys = []
if self.sparse:
keys.append('dtype')
keys.append('sparse_dim')
elif (self.dtype != 'float32'):
keys.append('dtype')
if with_placeholder:
keys.append('placeholder')
if (not self.available_for_inference):
keys.append('available_for_inference')
if (self.beam is not None):
if ((not self.batch) or (self.batch.beam != self.beam)):
keys.append('beam')
args = []
if with_name:
name = getattr(self, 'name', None)
args += [(repr(name) if name else '<undefined>')]
try:
batch_shape_meta = ('[%s]' % ','.join(self.get_batch_axes_short_description()))
except Exception as exc:
if catch_exceptions:
batch_shape_meta = ('<!%s: %s>' % (type(exc).__name__, exc))
else:
raise
args += [batch_shape_meta]
for key in keys:
try:
value_repr = repr(getattr(self, key))
except Exception as exc:
if catch_exceptions:
value_repr = ('<!%s: %s>' % (type(exc).__name__, exc))
else:
raise
args += [('%s=%s' % (key, value_repr))]
if self.control_flow_ctx:
try:
value_repr = self.control_flow_ctx.repr_inner()
except Exception as exc:
if catch_exceptions:
value_repr = ('<!%s: %s>' % (type(exc).__name__, exc))
else:
raise
args += [('ctx=' + value_repr)]
return ('Tensor{%s}' % ', '.join(args))
def get_batch_axes_short_description(self, special_axes=True):
'\n :param bool special_axes: special markers for old-style time_dim_axis and feature_dim_axis\n :rtype: list[str]\n '
res = []
for (axis, dim_tag) in enumerate(self.dim_tags):
descriptions = []
if (axis == self.batch_dim_axis):
if self.batch:
descriptions.append(self.batch.short_repr())
else:
descriptions.append('B?')
if special_axes:
if (axis == self.time_dim_axis):
descriptions.append('T')
if (axis == self.feature_dim_axis):
descriptions.append('F')
if (self.batch_shape[axis] is None):
if (axis == self.batch_dim_axis):
pass
else:
descriptions.append(dim_tag.short_repr())
elif ((axis != self.batch_dim_axis) or (not self.batch)):
descriptions.append(dim_tag.short_repr())
res.append(('|'.join(descriptions) or '?'))
return res
def get_compare_key(self):
'\n :return: some key which can be used for compare functions, i.e. such that\n cmp(get_compare_key(self), get_compare_key(other)) == cmp(self, other),\n i.e. we define some order by that.\n Note that this order is not totally fixed, and might change.\n :rtype: object\n '
return (self.dtype, self.shape, self.batch_dim_axis, self.feature_dim_axis, self.time_dim_axis, self.dim_tags, self.batch, self.beam)
def __repr__(self):
return self.get_description(catch_exceptions=True)
def __hash__(self):
return id(self)
def _sis_hash(self):
if ((self.raw_tensor is not None) and hasattr(self.raw_tensor, '_sis_hash')):
return self.raw_tensor._sis_hash()
from sisyphus.hash import sis_hash_helper
return sis_hash_helper(self.get_kwargs())
def __getstate__(self):
d = {k: getattr(self, k) for k in self.__slots__}
d['_raw_tensor'] = None
return d
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
def reset(self: Tensor):
'\n Reset raw_tensor and batch info.\n '
self._raw_tensor = None
self.batch = None
def _adapt_batch_consistent_dim_tags(self):
if (not self._extra):
return
if (not self.batch):
return
dims = tuple((tag.get_for_batch_ctx(batch=self.batch, ctx=self.control_flow_ctx) for tag in self._dims))
assert all(dims)
dims: Tuple[(Dim, ...)]
self._dims = dims
def copy(self, name: Optional[str]=None) -> _t.Tensor:
'\n :param name: if given, will overwrite this name\n :return: copy of myself, using self.get_kwargs(), and with placeholder and size_placeholder\n '
data = _t.Tensor(**self.get_kwargs())
data._raw_tensor = self._raw_tensor
if name:
data.name = name
return data
def copy_as_batch_major(self) -> _t.Tensor:
'\n :return: copy of myself with batch_dim_axis == 0\n '
return self.copy_with_batch_dim_axis(0)
def copy_as_time_major(self) -> _t.Tensor:
'\n :return: copy of myself with time_dim_axis == 0\n '
assert (self.time_dim_axis is not None)
return self.copy_with_time_dim_axis(0)
def copy_with_batch_dim_axis(self, batch_dim_axis) -> _t.Tensor:
'\n :param int batch_dim_axis:\n :return: copy of myself with specific batch_dim_axis\n '
assert (self.batch_dim_axis is not None)
return self.copy_move_axis(self.batch_dim_axis, batch_dim_axis)
def copy_with_time_dim_axis(self, time_dim_axis) -> _t.Tensor:
'\n :param int time_dim_axis:\n :return: copy of myself with specific time_dim_axis\n '
assert (self.time_dim_axis is not None)
return self.copy_move_axis(self.time_dim_axis, time_dim_axis)
def copy_transpose(self: Tensor, perm: Sequence[Union[(int, Dim)]], *, allow_int: bool=True) -> _t.Tensor:
'\n :param perm: permutation of the axes. Maps the new axes to the old axes\n :param allow_int: allow int as axis, otherwise only :class:`Dim`\n :return: copy of myself with permuted axes\n '
assert (len(perm) == len(self._dims)), f'{self}: invalid perm {perm!r} length'
if (not perm):
return self.copy()
if (allow_int and isinstance(perm[0], int)):
assert all((isinstance(a, int) for a in perm)), f'{self}: invalid perm {perm!r} types'
assert (set(perm) == set(range(len(perm)))), f'{self}: invalid perm {perm!r}'
return self._copy_compatible_to_dims_with_perm([self._dims[i] for i in perm], perm)
else:
assert all((isinstance(a, Dim) for a in perm)), f'{self}: invalid perm {perm!r} types'
return self.copy_compatible_to_dims(perm)
def copy_move_axis(self, old_axis, new_axis) -> _t.Tensor:
'\n :param int old_axis: counted with batch-dim\n :param int new_axis: counted with batch-dim\n :return: copy of myself with moved axis (see :func:`move_axis`)\n '
if (old_axis < 0):
old_axis += self.batch_ndim
assert (old_axis >= 0)
assert (0 <= old_axis < self.batch_ndim)
if (new_axis < 0):
new_axis += self.batch_ndim
assert (new_axis >= 0)
assert (0 <= new_axis < self.batch_ndim)
if (old_axis == new_axis):
return self.copy()
perm = list(range(self.batch_ndim))
old = perm.pop(old_axis)
perm.insert(new_axis, old)
return self.copy_transpose(perm)
def copy_swap_axes(self, axis1, axis2) -> _t.Tensor:
'\n Like :func:`Tensor.copy_move_axis`, but keeps all other axes unchanged.\n :param int axis1: counted with batch-dim\n :param int axis2: counted with batch-dim\n :return: copy of myself with moved axis (see :func:`swapaxes`)\n '
if (axis1 < 0):
axis1 += self.batch_ndim
assert (0 <= axis1 < self.batch_ndim)
if (axis2 < 0):
axis2 += self.batch_ndim
assert (0 <= axis2 < self.batch_ndim)
if (axis1 == axis2):
return self.copy()
perm = list(range(self.batch_ndim))
(perm[axis1], perm[axis2]) = (perm[axis2], perm[axis1])
return self.copy_transpose(perm)
def copy_as_bt_or_tb_major(self) -> _t.Tensor:
'\n :return: copy of myself in batch-time-major or time-batch-major\n '
assert (self.have_batch_axis() and self.have_time_axis())
if (self.batch_dim_axis == 0):
return self.copy_with_time_dim_axis(1)
if (self.time_dim_axis == 0):
return self.copy_with_batch_dim_axis(1)
if (self.batch_dim_axis > self.time_dim_axis):
return self.copy_as_time_major().copy_as_bt_or_tb_major()
return self.copy_as_batch_major().copy_as_bt_or_tb_major()
def copy_with_feature_dim_axis(self, feature_dim_axis) -> _t.Tensor:
'\n :param int feature_dim_axis: can also be negative\n :return: copy of myself with specific feature dim axis\n '
assert (self.feature_dim_axis is not None)
return self.copy_move_axis(self.feature_dim_axis, feature_dim_axis)
def copy_as_batch_feature_major(self) -> _t.Tensor:
'\n :return: copy of self with batch_dim_axis == 0 and feature_dim_axis == 1\n '
assert (self.batch_dim_axis is not None)
assert (self.feature_dim_axis is not None)
data = self.copy_as_batch_major()
data = data.copy_with_feature_dim_axis(1)
return data
def copy_as_time_batch_major(self) -> _t.Tensor:
'\n :return: copy of self with batch_dim_axis == 1 and time_dim_axis == 0\n '
assert (self.have_batch_axis() and self.have_time_axis())
data = self.copy_as_bt_or_tb_major()
if (data.time_dim_axis == 1):
data = data.copy_move_axis(0, 1)
return data
def copy_as_batch_spatial_major(self) -> _t.Tensor:
'\n :return: copy with batch_dim_axis == 0, then all dynamic axes, then any other spatial axes, last feature axis\n '
data = self.copy_as_batch_major()
if (data.feature_dim_axis is not None):
data = data.copy_with_feature_last()
if data.size_placeholder:
for (i, (j, size)) in enumerate(sorted(data.size_placeholder.items())):
data = data.copy_move_axis(data.get_batch_axis(j), (i + 1))
if (data.feature_dim_axis is not None):
assert (data.feature_dim_axis == (data.batch_ndim - 1))
if (data.feature_dim_axis_or_unspecified is not NotSpecified):
if (data._default_feature_dim_axis() == data.feature_dim_axis):
data.feature_dim_axis = NotSpecified
return data
def copy_with_feature_last(self) -> _t.Tensor:
'\n :return: copy of self with feature_dim_axis being the very last axis\n '
assert (self.feature_dim_axis is not None)
return self.copy_with_feature_dim_axis((- 1))
def copy_add_batch_dim(self, batch_dim_axis, batch=None, dim_tag=None) -> _t.Tensor:
'\n :param int batch_dim_axis:\n :param BatchInfo|None batch:\n :param Dim|None dim_tag:\n :return: copy of myself with added batch-dim\n '
if self.have_batch_axis():
raise Exception(f'{self} copy_add_batch_dim: already has batch-dim at axis {self.batch_dim_axis}, cannot add tag {dim_tag!r}')
assert (self.batch_dim_axis is None)
if (batch_dim_axis < 0):
assert (((batch_dim_axis + self.batch_ndim) + 1) >= 0)
batch_dim_axis += (self.batch_ndim + 1)
assert (0 <= batch_dim_axis <= self.batch_ndim)
data_opts = self.get_kwargs(include_special_axes=False)
placeholder = self.placeholder
if (placeholder is not None):
backend = self._raw_backend
placeholder = backend.expand_dims_raw(placeholder, batch_dim_axis)
if batch:
batch_dim_ = batch.dim
elif dim_tag:
if dim_tag.dyn_size_ext:
assert (dim_tag.dyn_size_ext.dims == ())
assert (dim_tag.dyn_size_ext.raw_tensor is not None)
batch_dim_ = dim_tag.dyn_size_ext.raw_tensor
elif dim_tag.dimension:
batch_dim_ = dim_tag.dimension
else:
raise Exception(f'{self} copy_add_batch_dim: unknown batch dim for {dim_tag!r}')
else:
raise Exception(f'{self} copy_add_batch_dim: unknown batch dim ')
if ((not isinstance(batch_dim_, int)) or (batch_dim_ != 1)):
placeholder = backend.expand_raw(placeholder, batch_dim_axis, batch_dim_)
dim_tags = list(self.dim_tags)
if dim_tag:
assert dim_tag.is_batch_dim()
assert (dim_tag.batch == batch)
if batch:
assert ((dim_tag.dimension == batch.static_dim) or (dim_tag.dimension is None))
elif batch:
dim_tag = batch.batch_dim_tag
else:
dim_tag = Dim(kind=Dim.Types.Batch, description='batch', dimension=(batch.static_dim if batch else None), batch=batch)
dim_tags.insert(batch_dim_axis, dim_tag)
data_opts['dims'] = dim_tags
if batch:
data_opts['batch'] = batch
data_opts['beam'] = batch.beam
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
for (k, a) in other_special_axes.items():
data_opts[k] = (a if (a < batch_dim_axis) else (a + 1))
return _t.Tensor(placeholder=placeholder, **data_opts)
def copy_add_spatial_dim(self, spatial_dim_axis=None, dim=1, auto_time_dim_axis=True) -> _t.Tensor:
'\n :param int|None spatial_dim_axis: counted with batch-dim. if there is no time-dim, this will be it.\n :param int|None dim:\n :param bool auto_time_dim_axis:\n :return: copy of myself with added spatial-dim\n '
if (dim is None):
assert (not self.placeholder)
dim_tag = Dim(description='added_spatial', dimension=dim, kind=Dim.Types.Spatial)
if (spatial_dim_axis is None):
spatial_dim_axis = self.get_default_new_axis_for_dim_tag(dim_tag)
v = self.copy_add_dim_by_tag(dim_tag, unbroadcast=True, axis=spatial_dim_axis)
if (auto_time_dim_axis and (self.time_dim_axis is None)):
v.time_dim_axis = spatial_dim_axis
return v
def copy_add_feature_dim(self, axis=None) -> _t.Tensor:
"\n :param int|None axis:\n :return: self with a new feature dim axis with dim 1.\n If there is an existing feature dim, the new feature dim will be added right after.\n If we are sparse, we don't add a feature dim, but it becomes a spatial dim instead.\n "
if self.sparse:
return self.copy_add_spatial_dim(spatial_dim_axis=axis)
dim_tag = Dim(description='feature1', dimension=1, kind=Dim.Types.Feature)
if (axis is None):
axis = self.get_default_new_axis_for_dim_tag(dim_tag)
v = self.copy_add_dim_by_tag(dim_tag, axis=axis)
if (v.feature_dim_axis_or_unspecified is not NotSpecified):
v.feature_dim_axis = NotSpecified
if (axis < 0):
axis += v.batch_ndim
assert (axis >= 0)
assert (0 <= axis < v.batch_ndim)
if (v.feature_dim_axis != axis):
v.feature_dim_axis = axis
return v
def get_default_new_axis_for_dim_tag(self, dim_tag: Dim) -> int:
'\n :param dim_tag:\n '
if dim_tag.is_batch_dim():
return 0
if (dim_tag.is_feature_dim() and (not self.sparse)):
if (self.feature_dim_axis is not None):
return (self.feature_dim_axis + 1)
else:
return self.batch_ndim
if (dim_tag.is_dynamic() and self.get_dynamic_axes()):
return (self.get_dynamic_axes()[(- 1)] + 1)
if (dim_tag.is_spatial_dim() and self.get_spatial_batch_axes()):
return (self.get_spatial_batch_axes()[(- 1)] + 1)
elif (dim_tag.is_spatial_dim() and (self.feature_dim_axis is not None)):
return self.feature_dim_axis
else:
return self.batch_ndim
def copy_add_dim_by_tag(self, dim_tag, unbroadcast=False, axis=None) -> _t.Tensor:
'\n :param Dim dim_tag:\n :param bool unbroadcast: If True unbroadcast the newly added axis.\n Will infer the unbroadcast shape via :func:`Dim.get_dim_value`\n :param int|None axis:\n '
assert dim_tag.can_be_used_as_dim()
if (axis is None):
axis = self.get_default_new_axis_for_dim_tag(dim_tag=dim_tag)
if (axis < 0):
axis += (self.batch_ndim + 1)
assert (0 <= axis <= self.batch_ndim)
if dim_tag.is_batch_dim():
if unbroadcast:
return self.copy_add_batch_dim(batch_dim_axis=axis, batch=dim_tag.batch, dim_tag=dim_tag)
else:
if (dim_tag.batch or self.batch):
from returnn.tf.util.data import BatchInfo
batch_info = BatchInfo.make_global_broadcast_batch_info()
else:
batch_info = None
if (dim_tag and (dim_tag.dimension == 1) and (dim_tag.batch == batch_info)):
pass
else:
dim_tag = Dim(kind=Dim.Types.Batch, description='batch-broadcast', dimension=1, batch=batch_info, auto_generated=True)
return self.copy_add_batch_dim(batch_dim_axis=axis, batch=batch_info, dim_tag=dim_tag)
data_opts = self.get_kwargs()
if (self.sparse and dim_tag.is_feature_dim()):
dim_tag = dim_tag.copy(same_as_self=True, kind=Dim.Types.Spatial)
if ((not unbroadcast) and (dim_tag.dimension != 1)):
dim_tag = Dim(kind=dim_tag.kind, description=('%s_dummy_dim1' % (dim_tag.description or 'unnamed')), dimension=1, auto_generated=True)
data_opts['dims'] = ((self._dims[:axis] + (dim_tag,)) + self._dims[axis:])
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
for (k, a) in other_special_axes.items():
data_opts[k] = (a if (a < axis) else (a + 1))
if (dim_tag.is_feature_dim() and (self.feature_dim_axis is None)):
data_opts.pop('feature_dim_axis', None)
if (dim_tag.is_spatial_dim() and (self.time_dim_axis is None)):
data_opts.pop('time_dim_axis', None)
if (self.placeholder is not None):
backend = self._raw_backend
placeholder = backend.expand_dims_raw(self.placeholder, axis)
if ((dim_tag.dimension is None) or (dim_tag.dimension > 1)):
placeholder = backend.expand_raw(placeholder, axis, dim_tag.get_dim_value())
data_opts['placeholder'] = placeholder
return _t.Tensor(**data_opts)
def copy_split_feature_dim(self, new_feature_dim) -> _t.Tensor:
'\n Split it into (new_spatial_dim, new_feat_dim), in that order.\n This will increase the feature_dim_axis by one.\n\n :param int new_feature_dim: will be the new dim\n '
assert (not self.sparse)
assert (self.feature_dim_axis is not None)
assert (self.dim is not None)
assert ((self.dim % new_feature_dim) == 0), 'must be a multiple of the input feature dim'
feature_dim_rem = (self.dim // new_feature_dim)
new_feature_dim_axis = (self.feature_dim_axis + 1)
data_opts = self.get_kwargs(include_special_axes=False)
dim_tag_split_rem = Dim(kind=Dim.Types.Spatial, description=('feature_split_rem_%i' % feature_dim_rem), auto_generated=True, dimension=feature_dim_rem)
dim_tag_new = Dim(kind=self.dim_tags[self.feature_dim_axis].kind, description=('feature_split_new_%i' % new_feature_dim), auto_generated=True, dimension=new_feature_dim)
dim_tags = ((self.dim_tags[:self.feature_dim_axis] + (dim_tag_split_rem, dim_tag_new)) + self.dim_tags[(self.feature_dim_axis + 1):])
data_opts['dims'] = dim_tags
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
other_special_axes.pop('feature_dim_axis', None)
for (k, a) in other_special_axes.items():
data_opts[k] = (a if (a < new_feature_dim_axis) else (a + 1))
if (self.placeholder is not None):
backend = self._raw_backend
backend.set_known_shape_raw(self.placeholder, self.batch_shape)
old_shape = backend.get_shape_tuple_raw(self.placeholder)
new_shape = ((old_shape[:self.feature_dim_axis] + (feature_dim_rem, new_feature_dim)) + old_shape[(self.feature_dim_axis + 1):])
data_opts['placeholder'] = backend.reshape_raw(self.placeholder, new_shape)
return _t.Tensor(**data_opts)
def copy_extend_batch(self, batch) -> _t.Tensor:
'\n Similar as copy_compatible_to with unbroadcast=True,\n we would possibly extend/expand our batch dim.\n See :class:`BatchInfo`.\n This assumes that we already have a batch dim\n (otherwise see :func:`copy_add_batch_dim`).\n\n This excludes any beam expansion, which is handled explicitly elsewhere\n (e.g. see :func:`copy_extend_with_beam`).\n\n :param BatchInfo batch:\n '
assert self.have_batch_axis()
assert self.batch, ('%s: batch unset' % self)
data = self.copy()
batch = batch.copy_set_beam(data.beam)
if (data.batch.beam != data.beam):
data.batch = data.batch.copy_set_beam(data.beam)
if (data.batch == batch):
return data
data.batch = batch
self._adapt_batch_consistent_dim_tags()
if (self.placeholder is not None):
assert self._raw_backend.is_tensorflow
assert set(self.batch.virtual_dims).issubset(batch.virtual_dims)
import tensorflow as tf
from returnn.tf.util.basic import get_shape
from returnn.util.basic import ensure_list_of_type
from returnn.tf.util.data import BatchInfo
with tf.name_scope('copy_extend_batch'):
axis = self.batch_dim_axis
x = self.placeholder
shape = get_shape(x)
old_dims = ensure_list_of_type(self.batch.virtual_dims, BatchInfo.FixedDim)
new_dims = ensure_list_of_type(batch.virtual_dims, BatchInfo.FixedDim)
batch_broadcast_shape = []
ndim_batch_split = ((self.batch_ndim - 1) + len(new_dims))
tiles = ([1] * ndim_batch_split)
old_idx = 0
for (new_idx, new_dim) in enumerate(new_dims):
old_dim = (old_dims[old_idx] if (old_idx < len(old_dims)) else None)
if (old_dim == new_dim):
batch_broadcast_shape.append(old_dim.size)
old_idx += 1
else:
batch_broadcast_shape.append(1)
tiles[(axis + new_idx)] = new_dim.size
assert (old_idx == len(old_dims))
shape_batch_split = ((shape[:axis] + batch_broadcast_shape) + shape[(axis + 1):])
x = tf.reshape(x, shape_batch_split)
x = tf.tile(x, tiles)
shape = ((shape[:axis] + [batch.dim]) + shape[(axis + 1):])
x = tf.reshape(x, shape)
data.placeholder = x
return data
def copy_compatible_to(self: Tensor, data: Tensor, add_dims=True, unbroadcast=False, except_feature=False, except_axis=None, check_sparse=True, check_dtype=True) -> Tensor:
'\n :param data: other data which the returned tensor should be compatible to\n It would add any missing axes with a dim 1 axis for automatic broadcasting (with add_dims=True).\n It currently does not check whether existing dims match.\n :param bool add_dims: whether to add (broadcast, or unbroadcasted) dims. throws error if missing dim\n :param bool unbroadcast: if True, all added broadcast axes (axes with dim 1) will be tiled such that they match\n :param bool except_feature: if unbroadcast, do not unbroadcast the feature dim\n :param Dim|int|None except_axis: if unbroadcast, do not unbroadcast this axis\n :param bool check_sparse:\n :param bool check_dtype:\n :returns: Tensor, might add broadcast dimensions\n '
assert ((not check_sparse) or (self.sparse == data.sparse))
assert ((not check_dtype) or (self.dtype == data.dtype))
v = self.copy()
if (v.have_batch_axis() and data.have_batch_axis() and v.batch and data.batch and (v.batch != data.batch)):
v = v.copy_extend_batch(data.batch)
v.sparse_dim = data.sparse_dim
if ((v.batch_dim_axis is not None) and (data.batch_dim_axis is None)):
raise ValueError(('copy_compatible_to: self %r has batch-dim, but target data %r has not' % (self, data)))
if (data.batch_ndim < v.batch_ndim):
raise ValueError(('copy_compatible_to: self %r already has more dims than target data %r' % (self, data)))
is_equal_opts = dict(allow_same_feature_dim=True, allow_same_spatial_dim=True, treat_feature_as_spatial=True, ignore_feature_dim=True)
mapped_axes = data.find_matching_dim_map(v, list(range(v.batch_ndim)), is_equal_opts)
assert (len(mapped_axes) == v.batch_ndim)
except_axis_int = (data.get_axis_from_description(except_axis, allow_int=True) if (except_axis is not None) else None)
for target_axis in range(data.batch_ndim):
new_v_axis = min(target_axis, v.batch_ndim)
if (target_axis not in mapped_axes.values()):
if (not add_dims):
raise ValueError(('%s.copy_compatible_to(%s) not allowed, axis %i (%s) not in source' % (self, data, target_axis, data.dim_tags[target_axis])))
unbroadcast_axis = (unbroadcast and (not (except_feature and (data.feature_dim_axis == target_axis))) and (not ((except_axis_int is not None) and (except_axis_int == target_axis))))
v = v.copy_add_dim_by_tag(data.get_dim_tag(target_axis), axis=new_v_axis, unbroadcast=unbroadcast_axis)
mapped_axes = {(v_ax + (1 if (v_ax >= new_v_axis) else 0)): trg_ax for (v_ax, trg_ax) in mapped_axes.items()}
mapped_axes[new_v_axis] = target_axis
else:
matching_v_axes = [v_ax for (v_ax, trg_ax) in mapped_axes.items() if (trg_ax == target_axis)]
assert (len(matching_v_axes) == 1)
matching_v_axis = matching_v_axes[0]
if (target_axis != matching_v_axis):
v = v.copy_swap_axes(matching_v_axis, new_v_axis)
(mapped_axes[matching_v_axis], mapped_axes[new_v_axis]) = (mapped_axes[new_v_axis], mapped_axes[matching_v_axis])
assert (v.batch_ndim == data.batch_ndim)
assert all(((mapped_axes[ax] == ax) for ax in range(v.batch_ndim)))
if (self.version == 1):
assert (v.batch_dim_axis == data.batch_dim_axis)
if (v.time_dim_axis != data.time_dim_axis):
v.time_dim_axis = NotSpecified
if (v.time_dim_axis != data.time_dim_axis):
v.time_dim_axis = data.time_dim_axis
if (v.feature_dim_axis != data.feature_dim_axis):
v.feature_dim_axis = NotSpecified
if (v.feature_dim_axis != data.feature_dim_axis):
v.feature_dim_axis = data.feature_dim_axis
elif (v.feature_dim_axis != data.feature_dim_axis):
v.feature_dim_axis = data.feature_dim_axis
if self.sparse:
v.feature_dim_axis = NotSpecified
v.sparse_dim = self.sparse_dim
v.sanity_check()
return v
def get_out_permutation_to_dims(self, dims: Sequence[Dim]) -> List[int]:
'\n :param dims: superset of our dims\n :return: out_permutation list of len dims, where -1 means no match, and otherwise the axis in self.\n Thus, sorted(p for p in out_permutation if p >= 0) == range(len(self._dims)).\n '
out_permutation: List[int] = []
count = 0
taken = ([False] * len(self._dims))
for dim in dims:
candidates: List[int] = []
for j in range(len(self._dims)):
if taken[j]:
continue
if (dim is self._dims[j]):
candidates = [j]
break
if (dim == self._dims[j]):
candidates.append(j)
if (not candidates):
out_permutation.append((- 1))
elif (len(candidates) == 1):
out_permutation.append(candidates[0])
taken[candidates[0]] = True
count += 1
else:
max_match_priority_idx = None
max_match_priority = None
count_same_match_priority = 0
for j in range(len(candidates)):
dim = self._dims[candidates[j]]
match_priority = dim.match_priority
if ((j > 0) and (match_priority == max_match_priority)):
count_same_match_priority += 1
if ((j == 0) or (match_priority > max_match_priority)):
max_match_priority = match_priority
count_same_match_priority = 1
max_match_priority_idx = j
assert (count_same_match_priority >= 1)
if (count_same_match_priority > 1):
raise ValueError(f'{self}: dim {dim} is ambiguous, from tensor dims {self._dims} and raw_tensor shape {dims}')
out_permutation.append(candidates[max_match_priority_idx])
taken[candidates[max_match_priority_idx]] = True
count += 1
assert (count == len(self._dims))
assert (len(out_permutation) == len(dims))
return out_permutation
def copy_compatible_to_dims(self: _t.Tensor, dims: Sequence[Dim]) -> _t.Tensor:
'\n Simpler variant of :func:`copy_compatible_to` which just takes a list of dims,\n and uses simple Dim equality.\n\n :param dims:\n :return: self with dims permuted and broadcast dims added\n '
out_permutation = self.get_out_permutation_to_dims(dims)
if (out_permutation == list(range(len(self._dims)))):
return self.copy()
return self._copy_compatible_to_dims_with_perm(dims, out_permutation)
def _copy_compatible_to_dims_with_perm(self, dims: Sequence[Dim], out_permutation: Sequence[int]):
raw_tensor = self._raw_tensor
if (raw_tensor is not None):
backend = self._raw_backend
raw_shape = backend.get_shape_tuple_raw(raw_tensor)
raw_tensor = backend.transpose_raw(raw_tensor, [p for p in out_permutation if (p >= 0)])
raw_tensor = backend.reshape_raw(raw_tensor, [(raw_shape[p] if (p >= 0) else 1) for p in out_permutation])
out_dims = [(dims[i] if (p >= 0) else Dim(kind=dims[i].kind, description=('%s_bc_dim1' % (dims[i].description or 'unnamed')), dimension=1, auto_generated=True)) for (i, p) in enumerate(out_permutation)]
kwargs = self.get_kwargs(include_special_axes=False)
kwargs['dims'] = out_dims
kwargs['raw_tensor'] = raw_tensor
res = _t.Tensor(**kwargs)
if (self.version <= 1):
if (self.time_dim_axis is None):
if (res.time_dim_axis is not None):
res.time_dim_axis = None
else:
axis = out_permutation.index(self.time_dim_axis)
assert (axis >= 0)
if (res.time_dim_axis != axis):
res.time_dim_axis = axis
if (self.feature_dim_axis is None):
if (res.feature_dim_axis is not None):
res.feature_dim_axis = None
else:
axis = out_permutation.index(self.feature_dim_axis)
assert (axis >= 0)
if (res.feature_dim_axis != axis):
res.feature_dim_axis = axis
return res
def copy_compatible_to_dims_raw(self: _t.Tensor, dims: Sequence[Dim]) -> _t.RawTensorType:
'\n Simpler variant of :func:`copy_compatible_to` which just takes a list of dims,\n and uses simple Dim equality.\n\n :param dims:\n :return: raw tensor from self with dims permuted and broadcast dims added\n '
raw_tensor = self._raw_tensor
assert (raw_tensor is not None), f'{self} copy_compatible_to_dims_raw: no raw tensor'
out_permutation = self.get_out_permutation_to_dims(dims)
if (out_permutation == list(range(len(self._dims)))):
return raw_tensor
backend = self._raw_backend
raw_shape = backend.get_shape_tuple_raw(raw_tensor)
raw_tensor = backend.transpose_raw(raw_tensor, [p for p in out_permutation if (p >= 0)])
raw_tensor = backend.reshape_raw(raw_tensor, [(raw_shape[p] if (p >= 0) else 1) for p in out_permutation])
return raw_tensor
def copy_time_flattened(self) -> _t.Tensor:
'\n :return: copy of myself where the time-axis is flattened away into the batch-dim-axis.\n See :func:`get_placeholder_time_flattened` and :func:`flatten_with_seq_len_mask for more details.\n '
assert (self.batch_dim_axis is not None)
assert (self.time_dim_axis is not None)
data_opts = self.get_kwargs(include_special_axes=False)
if (self.placeholder is not None):
data_opts['placeholder'] = self.get_placeholder_time_flattened()
dim_tag = self.dim_tags[self.time_dim_axis]
dim_tag = Dim(kind=Dim.Types.Spatial, description=('%s_flattened' % (dim_tag.description or 'unnamed')), auto_generated=True, dimension=None)
data_opts['dims'] = ((dim_tag,) + tuple((tag for (i, tag) in enumerate(self.dim_tags) if (i not in (self.batch_dim_axis, self.time_dim_axis)))))
data_opts['time_dim_axis'] = None
data_opts.pop('feature_dim_axis', None)
return _t.Tensor(**data_opts)
def copy_extend_with_beam(self, beam) -> Tensor:
'\n :param SearchBeam|None beam:\n :return: copy of myself where the batch-dim is extended/multiplied by beam_size, using tile_transposed\n '
data = self.copy()
if (data.beam and (data.beam == beam)):
return data
assert (data.beam is None), ('incompatible beam (%r vs %r)' % (data.beam, beam))
if (beam is None):
return data
data.beam = beam
assert data.batch
data.batch = data.batch.copy_set_beam(beam)
if (data.placeholder is not None):
assert data._raw_backend.is_tensorflow
import tensorflow as tf
from returnn.tf.util.basic import get_valid_scope_name_from_str, same_control_flow_ctx, tile_transposed
with tf.name_scope(('%s_data_extend_with_beam' % get_valid_scope_name_from_str(self.name))):
with same_control_flow_ctx(data.placeholder):
data.placeholder = tile_transposed(data.placeholder, axis=data.batch_dim_axis, multiples=beam.beam_size)
setattr(data.placeholder, '_RETURNN_beam_expanded_base_data', self)
data._adapt_batch_consistent_dim_tags()
return data
def copy_merge_into_batch(self, axes) -> Tensor:
'\n :param list[int] axes: All axes to be merged into the batch axis.\n Must include the batch_dim_axis. The order is kept.\n :return: copy of myself where the the given axes are merged into the batch dim\n '
assert self.batch
assert (self.batch_dim_axis in axes)
assert (sorted(set(axes)) == sorted(axes))
min_axis = min(axes)
axes = list(axes)
data = self.copy()
if (axes != list(range(min_axis, (min_axis + len(axes))))):
rem_axes_start = list(range(min_axis))
rem_axes_end = [a for a in range(min_axis, self.batch_ndim) if (a not in axes)]
data = data.copy_transpose(((rem_axes_start + axes) + rem_axes_end))
axes = list(range(min_axis, (min_axis + len(axes))))
assert (data.batch_dim_axis in axes)
tensor = data.placeholder
batch = data.batch
data = data.copy_template()
batch_idx = 0
for axis in axes:
if (axis == data.batch_dim_axis):
batch_idx = len(batch.virtual_dims)
continue
batch = batch.copy_extend_with_padded_or_fixed_dim_tag(dim_tag=data.dim_tags[axis], new_dim_idx=batch_idx)
batch_idx += 1
for axis in reversed(sorted(axes)):
if (axis != data.batch_dim_axis):
data = data.copy_template_excluding_axis(axis)
data.batch = batch
if (tensor is not None):
assert self._raw_backend.is_tensorflow
import tensorflow as tf
from returnn.tf.util.basic import get_shape
shape = get_shape(tensor)
tensor = tf.reshape(tensor, ((shape[:min_axis] + [(- 1)]) + shape[(min_axis + len(axes)):]))
data.placeholder = tensor
return data
def copy_squeeze_axes(self, axes) -> Tensor:
'\n :param list[int] axes: counted with batch dim\n :return: copy of myself, with squeezed axes\n '
assert isinstance(axes, (list, tuple))
assert all(((self.batch_shape[axis] == 1) for axis in axes))
assert all(((0 <= axis < self.batch_ndim) for axis in axes))
if (not axes):
return self.copy()
data_opts = self.get_kwargs(include_special_axes=False)
if (self._raw_tensor is not None):
backend = self._raw_backend
data_opts['raw_tensor'] = backend.squeeze_raw(self._raw_tensor, axes)
data_opts['dims'] = [tag for (i, tag) in enumerate(self._dims) if (i not in axes)]
if ((self.time_dim_axis is not None) and (self.time_dim_axis_or_unspecified is not NotSpecified)):
if (self.time_dim_axis not in axes):
data_opts['time_dim_axis'] = (self.time_dim_axis - len([axis for axis in axes if (axis < self.time_dim_axis)]))
if ((self.feature_dim_axis is not None) and (self.feature_dim_axis_or_unspecified is not NotSpecified)):
if (self.feature_dim_axis not in axes):
data_opts['feature_dim_axis'] = (self.feature_dim_axis - len([axis for axis in axes if (axis < self.feature_dim_axis)]))
return _t.Tensor(**data_opts)
def copy_template(self, name=None, *, dtype=None) -> _t.Tensor:
'\n :param str|None name:\n :param str|None dtype:\n :return: copy of myself, using self.get_kwargs(), without placeholder\n '
kwargs = self.get_kwargs()
if name:
kwargs['name'] = name
if dtype:
kwargs['dtype'] = dtype
return _t.Tensor(**kwargs)
def copy_template_dense(self, name=None, dtype=None) -> Tensor:
'\n :param str|None name:\n :param str|None dtype:\n :return: copy of myself, using self.get_kwargs(), without placeholder\n '
out = self.copy_template(name=name)
if out.sparse:
feat_dim = out.sparse_dim
out.sparse = False
out.dtype = 'float32'
out = out.copy_add_dim_by_tag(dim_tag=feat_dim, unbroadcast=True, axis=(- 1))
out.feature_dim_axis = NotSpecified
assert (out.feature_dim_axis == (out.batch_ndim - 1))
if dtype:
out.dtype = dtype
return out
def copy_template_excluding_axis(self, exclude_axis, name=None) -> _t.Tensor:
'\n :param int exclude_axis: axis to be removed.\n :param str|None name: if set, this will be the new name.\n :return: copy of myself excluding exclude_axis axis, without placeholder.\n '
kwargs = self.get_kwargs(include_special_axes=False)
if (exclude_axis < 0):
exclude_axis += self.batch_ndim
assert (exclude_axis >= 0)
assert (0 <= exclude_axis < self.batch_ndim)
if (exclude_axis == self.feature_dim_axis):
kwargs.pop('dim', None)
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
for (axis_name, axis) in other_special_axes.items():
if (axis == exclude_axis):
continue
kwargs[axis_name] = (axis if (axis < exclude_axis) else (axis - 1))
new_dim_tags = list(self.dim_tags)
del new_dim_tags[exclude_axis]
kwargs['dims'] = new_dim_tags
if name:
kwargs['name'] = name
return _t.Tensor(**kwargs)
def copy_template_excluding_spatial_dim(self, spatial_axis_num, name=None) -> Tensor:
'\n :param int spatial_axis_num: index in self.get_spatial_batch_axes()\n :param str|None name: if set, this will be the new name\n :return: copy of myself excluding the time-dimension without placeholder\n '
spatial_axes = self.get_spatial_batch_axes()
if (spatial_axis_num < 0):
spatial_axis_num += len(spatial_axes)
assert (spatial_axis_num >= 0)
assert (0 <= spatial_axis_num < len(spatial_axes))
axis_to_exclude = spatial_axes[spatial_axis_num]
return self.copy_template_excluding_axis(exclude_axis=axis_to_exclude, name=name)
def copy_template_excluding_time_dim(self, name=None) -> _t.Tensor:
'\n :param str|None name: if set, this will be the new name\n :return: copy of myself excluding the time-dimension without placeholder\n '
assert (self.time_dim_axis is not None)
return self.copy_template_excluding_axis(exclude_axis=self.time_dim_axis, name=name)
def copy_template_adding_time_dim(self, name=None, time_dim_axis=0) -> _t.Tensor:
'\n Adds a time-dim-axis.\n If a time-dim-axis already exists, it will anyway create this new one.\n\n :param str|None name: if set, this will be the new name\n :param int time_dim_axis: the new time-dim-axis index\n :return: copy of myself adding the time-dimension without placeholder\n '
if (time_dim_axis < 0):
time_dim_axis += (self.batch_ndim + 1)
assert (time_dim_axis >= 0)
assert (0 <= time_dim_axis <= self.batch_ndim)
kwargs = self.get_kwargs(include_special_axes=False)
dim_tag = Dim(kind=Dim.Types.Time, description='unknown_time', dimension=None, auto_generated=True)
dim_tags = ((self.dim_tags[:time_dim_axis] + (dim_tag,)) + self.dim_tags[time_dim_axis:])
kwargs['dims'] = dim_tags
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
other_special_axes.pop('time_dim_axis', None)
for (axis_name, axis) in other_special_axes.items():
kwargs[axis_name] = (axis if (axis < time_dim_axis) else (axis + 1))
kwargs['time_dim_axis'] = time_dim_axis
if name:
kwargs['name'] = name
return _t.Tensor(**kwargs)
def copy_template_replace_dim_tag(self, axis, new_dim_tag, name=None) -> _t.Tensor:
'\n :param int axis:\n :param Dim new_dim_tag:\n :param str|None name: new name\n '
assert new_dim_tag.can_be_used_as_dim()
if (axis < 0):
assert ((axis + self.batch_ndim) >= 0)
axis += self.batch_ndim
assert (0 <= axis < self.batch_ndim)
opts = self.get_kwargs()
if self.dim_tags[axis].is_batch_dim():
opts.pop('batch', None)
if new_dim_tag.is_batch_dim():
if (self.time_dim_axis == axis):
opts.pop('time_dim_axis', None)
if (self.feature_dim_axis == axis):
opts.pop('feature_dim_axis', None)
dim_tags = ((self.dim_tags[:axis] + (new_dim_tag,)) + self.dim_tags[(axis + 1):])
opts['dims'] = dim_tags
if (self.feature_dim_axis_or_unspecified is not NotSpecified):
if ((self.feature_dim_axis == axis) and self.dim_tags[axis].is_feature_dim() and (not new_dim_tag.is_feature_dim())):
opts['feature_dim_axis'] = None
if name:
opts['name'] = name
return _t.Tensor(**opts)
def copy_template_replace_dim(self, axis, new_dim, new_size=None) -> _t.Tensor:
'\n :param int axis:\n :param int|None new_dim:\n :param tf.Tensor|None new_size:\n '
dim_tag = self.dim_tags[axis]
if dim_tag.is_batch_dim():
assert (new_dim is None)
return self.copy_template()
dim_tag = Dim(kind=dim_tag.kind, description=('%s_replaced' % (dim_tag.description or 'unnamed')), auto_generated=True, dimension=new_dim, dyn_size=new_size)
return self.copy_template_replace_dim_tag(axis=axis, new_dim_tag=dim_tag)
def copy_template_new_dim_tags(self, new_dim_tags, name=None, keep_special_axes=False) -> _t.Tensor:
'\n :param list[Dim]|tuple[Dim] new_dim_tags:\n :param str|None name:\n :param bool keep_special_axes:\n '
if keep_special_axes:
assert (len(new_dim_tags) == self.batch_ndim)
opts = self.get_kwargs(include_special_axes=keep_special_axes)
opts['dims'] = new_dim_tags
if name:
opts['name'] = name
return _t.Tensor(**opts)
def copy_template_set_ctx(self, ctx) -> Tensor:
'\n :param ControlFlowContext ctx:\n :return: new Tensor instance\n '
kwargs = self.get_kwargs()
kwargs['control_flow_ctx'] = ctx
return _t.Tensor(**kwargs)
def copy_template_unpack_batch(self) -> Tensor:
'\n If the batch dim contains a :class:`BatchInfo.PackedDim`,\n unpack it and restore the data from before the packing.\n '
assert self.have_batch_axis()
assert self.batch, ('%s: batch unset' % self)
data = self.copy()
kwargs = self.get_kwargs(include_special_axes=False)
from returnn.tf.util.data import BatchInfo
dim_tags = []
for dim_tag in data.dim_tags:
if (dim_tag.is_batch_dim() and dim_tag.batch and (len(dim_tag.batch.virtual_dims) > 0)):
batch = dim_tag.batch
new_batch_dim_tag = None
for virtual_dim in batch.virtual_dims:
if isinstance(virtual_dim, BatchInfo.PackedDim):
dim_tags.append(virtual_dim.dim_tag)
batch = batch.copy_remove_dim(virtual_dim)
elif isinstance(virtual_dim, BatchInfo.GlobalBatchDim):
assert (not new_batch_dim_tag)
if ((batch is None) or batch.is_global_batch()):
new_batch_dim_tag = batch_dim
else:
new_batch_dim_tag = Dim(kind=Dim.Types.Batch, description=dim_tag.description, dimension=None)
dim_tags.append(new_batch_dim_tag)
assert new_batch_dim_tag, ('%s: batch info %r invalid' % (self, batch))
new_batch_dim_tag.batch = batch
kwargs['batch'] = batch
else:
dim_tags.append(dim_tag)
kwargs['dims'] = dim_tags
return _t.Tensor(**kwargs)
def _get_variable_dim_pattern(self):
'\n :return: tuple with bools specifying which dims of the shape (excluding batch-dim) are of variable length.\n e.g. (time,feature), shape=(None,128), this returns (True, False)\n :rtype: tuple[bool]\n '
return tuple([(dim is None) for dim in self.shape])
def _get_var_len_axes(self):
return [i for (i, d) in enumerate(self._get_variable_dim_pattern()) if d]
def matches_var_dim_pattern(self, other: _t.Tensor) -> bool:
'\n :param other:\n :return: whether the variable-dims pattern matches,\n i.e. same variable dims (get_variable_dim_pattern), same time dim, excluding batch-dim.\n i.e. the size_placeholder should be compatible.\n (deprecated)\n '
if (self.time_dim_axis_excluding_batch != other.time_dim_axis_excluding_batch):
return False
return (self._get_var_len_axes() == other._get_var_len_axes())
@property
def dim_tags(self) -> Tuple[(Dim, ...)]:
'\n :return: dim tags, i.e. the shape. (alias for :func:`dims`)\n '
return self._dims
@property
def shape(self) -> Tuple[(Optional[int], ...)]:
'\n :return: shape *without* batch-dim. e.g. (time,feat) = (None,128)\n see also :func:`batch_shape` or `dims`\n '
return tuple((tag.dimension for tag in self._dims if (not tag.is_batch_dim())))
@shape.setter
def shape(self, shape):
'\n :param tuple[int|None] shape:\n '
if (tuple(shape) == self.shape):
return
raise Exception(('%s: setting the shape is not allowed (new shape %s)' % (self, shape)))
@property
def batch_shape(self) -> Tuple[(Optional[int], ...)]:
'\n :return: shape with added batch-dim. e.g. (batch,time,feat) = (None,None,128)\n '
return tuple((tag.dimension for tag in self.dim_tags))
def get_batch_shape(self, batch_dim):
'\n :param int|tf.Tensor|None batch_dim:\n :return: shape with added batch-dim. e.g. (batch,time,feat) = (None,None,128)\n :rtype: tuple[int|None]\n '
if (self.batch_dim_axis is not None):
return ((self.shape[:self.batch_dim_axis] + (batch_dim,)) + self.shape[self.batch_dim_axis:])
return self.shape
def get_dynamic_batch_shape(self):
'\n :rtype: list[int|tf.Tensor]\n '
return [self.get_dim(axis) for axis in range(self.batch_ndim)]
def have_varying_shape_in_ctx(self):
'\n :return: whether the (dynamic) shape can change in this control flow context.\n E.g. when self.control_flow_context is a loop, and we have one dynamic dim\n where dyn_size_ext has the same control_flow_context\n (such that dyn_size_ext has e.g. shape [B,T] outside the loop).\n This can be relevant for accumulating values of self.placeholder\n e.g. via tf.TensorArray.\n :rtype: bool\n '
return any((tag.control_flow_ctx for tag in self.dim_tags))
@property
def size_placeholder(self: Tensor):
'\n For compatibility, return a proxy object which behaves like the original dict.\n\n :rtype: _SizePlaceholderProxy\n '
return _SizePlaceholderProxy(self)
@size_placeholder.setter
def size_placeholder(self, sizes):
'\n :param dict[int,tf.Tensor]|None sizes:\n '
if (sizes is None):
return
for (axis_wo_b, size) in sizes.items():
self.set_dynamic_size(axis=self.get_batch_axis(axis_wo_b), sizes=size)
@property
def shape_dense(self):
'\n :return: shape with feature dim axis\n :rtype: tuple[int|None]\n '
if self.sparse:
return (self.shape + (self.dim,))
return self.shape
@property
def batch_shape_dense(self):
'\n :rtype: tuple[int|None]\n '
if self.sparse:
return (self.batch_shape + (self.dim,))
return self.batch_shape
@property
def dim_tags_sparse(self):
'\n :return: dim tags without feature dim axis\n :rtype: tuple[Dim]\n '
if (self.sparse or (not self.have_feature_axis())):
return self.dim_tags
return (self.dim_tags[:self.feature_dim_axis] + self.dim_tags[(self.feature_dim_axis + 1):])
@property
def dim_tags_set_implicit_only_wrapped(self):
'\n :return: Dim tags implicit by sparse dim, or dynamic sizes, and not present as explicit dims.\n Also see :func:`dim_tags_set`.\n :rtype: set[_ImplicitDim]\n '
self_dim_tags = set(self.dim_tags)
dims = set()
if (self.sparse_dim and (self.sparse_dim not in self_dim_tags)):
dims.add(_m.ImplicitSparseDim(self.sparse_dim))
for dim in self.dim_tags:
if dim.dyn_size_ext:
for dim_ in dim.dyn_size_ext.dim_tags:
if (dim_ not in self_dim_tags):
dims.add(_m.ImplicitDynSizeDim(dim_))
return dims
@property
def dim_tags_set_implicit_only(self):
'\n :return: Dim tags implicit by sparse dim, or dynamic sizes, and not present as explicit dims.\n Also see :func:`dim_tags_set`.\n :rtype: set[Dim]\n '
return set((dim.tag for dim in self.dim_tags_set_implicit_only_wrapped))
@property
def dim_tags_set_implicit(self):
'\n This is mostly intended to be used for verification, such as ``out_shape`` in a layer.\n https://github.com/rwth-i6/returnn/issues/706\n\n We return a set because when dim tags (dimensions, and the shape) are checked,\n we never want that the order plays any role.\n https://github.com/rwth-i6/returnn/wiki/RETURNN-principles\n Further, dimension tags should ideally be unique.\n https://github.com/rwth-i6/returnn/issues/632\n (This is not enforced currently, but we should not treat this specially now.)\n\n :return: set of dim tags\n :rtype: set[Dim]\n '
dims = set(self.dim_tags)
dims.update(self.dim_tags_set_implicit_only)
return dims
def remaining_dims(self: _t.Tensor, remove: Optional[Union[(Dim, Sequence[Dim])]]=None) -> List[Dim]:
'\n :param remove: dims to remove from self.dims\n :return: ordered batch dims\n '
batch_dims = list(self._dims)
if (not remove):
pass
elif isinstance(remove, Dim):
batch_dims.remove(remove)
else:
for remove_ in remove:
batch_dims.remove(remove_)
return batch_dims
@property
def ndim(self):
'\n :rtype: int\n :return: ndim counted without batch-dim\n '
return len(self.shape)
@property
def ndim_dense(self):
'\n :rtype: int\n :return: ndim counted without batch-dim, added by 1 if we are sparse\n '
if self.sparse:
return (self.ndim + 1)
return self.ndim
@property
def batch_ndim(self):
'\n :rtype: int\n :return: ndim counted with batch-dim\n '
return len(self._dims)
@property
def batch_ndim_dense(self):
'\n :rtype: int\n :return: ndim counted with batch-dim, added by 1 if we are sparse\n '
if self.sparse:
return (self.batch_ndim + 1)
return self.batch_ndim
@property
def is_time_major(self):
'\n :return: whether this is in time-major format, i.e. (time,batch,...)\n :rtype: bool\n '
return (self.time_dim_axis == 0)
@property
def is_batch_major(self):
'\n :return: whether this is in batch-major format, i.e. (batch,...)\n :rtype: bool\n '
return (self.batch_dim_axis == 0)
@property
def is_batch_feature_major(self):
'\n :return: whether this is in batch-feature-major format, i.e. (batch,feature,...) (NC...)\n :rtype: bool\n '
return ((self.batch_dim_axis == 0) and (self.feature_dim_axis == 1))
@property
def batch_dim_axis(self):
'\n :return: batch dim axis, counted with batch-dim\n :rtype: int|None\n '
return _batch_dim_axis_from_dim_tags_tuple(self._dims)
@batch_dim_axis.setter
def batch_dim_axis(self, axis):
'\n :param int|None axis:\n '
if (axis == self.batch_dim_axis):
return
raise Exception(('%s: cannot set batch_dim_axis = %s' % (self, axis)))
def _default_feature_dim_axis(self):
'\n :return: feature dim axis, counted with batch-dim\n :rtype: int|None\n '
if (self.version >= 2):
return None
return _default_feature_dim_axis(batch_dim_axis=self.batch_dim_axis, time_dim_axis=self.time_dim_axis, batch_shape=self.batch_shape, sparse=self.sparse)
@property
def feature_dim_axis(self):
'\n :return: feature dim axis, counted with batch-dim\n :rtype: int|None\n '
if (self._feature_dim_axis is not NotSpecified):
return self._feature_dim_axis
return self._default_feature_dim_axis()
@feature_dim_axis.setter
def feature_dim_axis(self: _t.Tensor, value):
'\n :param int|None|NotSpecified value:\n '
assert ((value is NotSpecified) or (value is None) or isinstance(value, int))
if (self.feature_dim_axis_or_unspecified == value):
return
if ((self.version >= 2) and (value is NotSpecified)):
value = None
if isinstance(value, int):
assert (0 <= value < self.batch_ndim)
self._feature_dim_axis = value
@property
def feature_dim_axis_or_unspecified(self):
'\n :return: feature dim axis, counted with batch-dim. could also be unspecified\n :rtype: int|None|NotSpecified\n '
return self._feature_dim_axis
@property
def time_dim_axis(self) -> Optional[int]:
'\n :return: time dim axis (deprecated)\n '
if (self.version >= 2):
return None
if (self.time_dim_axis_or_unspecified is not NotSpecified):
return self.time_dim_axis_or_unspecified
return _default_time_dim_axis_dim_tags(self.dim_tags)
@time_dim_axis.setter
def time_dim_axis(self: _t.Tensor, value):
'\n :param int|None|NotSpecified value:\n '
assert ((value is NotSpecified) or (value is None) or isinstance(value, int))
if (self.time_dim_axis_or_unspecified == value):
return
if ((self.version >= 2) and (value in (None, NotSpecified))):
return
assert (self.version == 1), 'time_dim_axis is deprecated'
if isinstance(value, int):
assert (0 <= value < self.batch_ndim)
self._make_extra().time_dim_axis = value
@property
def time_dim_axis_or_unspecified(self):
'\n :return: time dim axis, counted with batch-dim. could also be unspecified\n :rtype: int|None|NotSpecified\n '
if (self.version >= 2):
return NotSpecified
if (not self._extra):
return NotSpecified
return self._extra.time_dim_axis
@property
def time_dim_axis_excluding_batch(self):
'\n :rtype: int|None\n '
if (self.time_dim_axis is None):
return None
return self.get_batch_axis_excluding_batch(self.time_dim_axis)
@property
def placeholder(self):
'\n (Old alias for raw_tensor.)\n\n :rtype: T\n '
return self._raw_tensor
@placeholder.setter
def placeholder(self: _t.Tensor, value: Optional[_t.RawTensorType]):
'\n (Old alias for raw_tensor.)\n\n :param value:\n '
self.raw_tensor = value
@property
def batch(self):
'\n :rtype: BatchInfo|None\n '
if (not self._extra):
return None
return self._extra.batch
@batch.setter
def batch(self, batch):
'\n :param BatchInfo|None batch:\n '
if batch:
assert (batch.beam == self.beam)
if (self.batch == batch):
return
self._make_extra().batch = batch
self._adapt_batch_consistent_dim_tags()
@property
def beam(self):
'\n :rtype: SearchBeam|None\n '
if (not self._extra):
return None
if self._extra.beam:
return self._extra.beam
if self._extra.batch:
return self._extra.batch.beam
return None
@beam.setter
def beam(self, beam):
'\n :param SearchBeam|None beam:\n '
if (self.beam == beam):
return
self._make_extra().beam = beam
if self._extra.batch:
self._extra.batch = self.batch.copy_set_beam(beam=beam)
self._adapt_batch_consistent_dim_tags()
@property
def dim(self):
'\n :rtype: int|None\n '
tag = self.feature_dim_or_sparse_dim
if tag:
return tag.dimension
return None
@dim.setter
def dim(self, dim):
'\n It is deprecated to explicitly set this.\n We just have this here to support some legacy code.\n It does nothing but checks the validity.\n\n :param int|None dim:\n '
assert (dim == self.dim)
@property
def feature_dim_or_sparse_dim(self: Tensor):
'\n :return: if we have a feature dim, return its dim tag. if we are sparse, return the sparse_dim. otherwise None\n :rtype: Dim|None\n '
if self.sparse_dim:
return self.sparse_dim
feature_dim_axis = self.feature_dim_axis
if (feature_dim_axis is not None):
return self._dims[feature_dim_axis]
return None
@property
def sparse(self):
'\n :rtype: bool\n :return: whether the values represent class indices. see ``sparse_dim``\n '
return (self.sparse_dim is not None)
@sparse.setter
def sparse(self, sparse):
'\n It is deprecated to explicitly set this.\n We just have this here to support some legacy code.\n\n :param bool sparse:\n '
if (self.sparse == sparse):
return
if (not sparse):
self.sparse_dim = None
return
raise Exception(('%s: setting sparse=True not supported anymore. set sparse_dim instead' % self))
@property
def vocab(self):
'\n :rtype: returnn.datasets.util.vocabulary.Vocabulary|None\n '
if self.sparse_dim:
return self.sparse_dim.vocab
if self.have_feature_axis():
return self.dim_tags[self.feature_dim_axis].vocab
return None
@vocab.setter
def vocab(self, vocab):
'\n :param returnn.datasets.util.vocabulary.Vocabulary|None vocab:\n '
raise Exception(('%s: setting vocab not supported anymore. set sparse_dim instead' % self))
def time_dimension(self) -> Union[(int, _t.RawTensorType)]:
'\n :return: shape(placeholder)[time_dim_axis], int scalar\n :rtype: tf.Tensor\n '
assert (self.time_dim_axis is not None)
return self.get_dim(self.time_dim_axis)
def get_dim(self, axis: int) -> Union[(int, _t.RawTensorType)]:
'\n :param axis: counted with batch-dim\n :return: shape[axis]\n '
if (self.batch_shape[axis] is not None):
return self.batch_shape[axis]
assert (self._raw_tensor is not None)
backend = self._raw_backend
return backend.get_shape_tuple_raw(self._raw_tensor)[axis]
def get_placeholder_as_time_major(self):
'\n :rtype: tf.Tensor\n '
assert (self.placeholder is not None)
return self.copy_as_time_major().placeholder
def get_placeholder_as_batch_major(self):
'\n :rtype: tf.Tensor\n '
assert (self.placeholder is not None)
return self.copy_as_batch_major().placeholder
def get_placeholder_with_runtime_sanity_checks(self):
'\n :return: identity(self.placeholder) with added checks\n :rtype: tf.Tensor\n '
assert (self._raw_tensor is not None)
backend = self._raw_backend
return backend.identity_with_control_dependencies_raw(self._raw_tensor, [self.get_runtime_sanity_check_op()])
def get_placeholder_time_flattened(self):
'\n :return: via :func:`flatten_with_seq_len_mask`\n :rtype: tensorflow.Tensor\n '
assert self._raw_backend.is_tensorflow, 'get_placeholder_time_flattened only implemented for TF yet'
from returnn.tf.util.basic import flatten_with_seq_len_mask, get_shape
import tensorflow as tf
assert (self.placeholder is not None)
assert self.have_time_axis()
assert (0 in [self.time_dim_axis, self.batch_dim_axis])
time_dim = self.get_time_dim_tag()
if time_dim.need_masking():
assert (time_dim.dyn_size_ext.dims == (self.get_batch_dim_tag(),))
return flatten_with_seq_len_mask(self.placeholder, time_dim.dyn_size, batch_dim_axis=self.batch_dim_axis, time_dim_axis=self.time_dim_axis)
else:
x = tf.transpose(self.placeholder, ([self.batch_dim_axis, self.time_dim_axis] + [i for i in range(self.batch_ndim) if (i not in [self.batch_dim_axis, self.time_dim_axis])]))
shape = get_shape(x)
return tf.reshape(x, ([(shape[0] * shape[1])] + shape[2:]))
def get_placeholder_flattened(self, keepdims=False):
"\n :param bool keepdims: if set, it will add broadcast dimensions after the flattening behind the first axis\n :rtype: tf.Tensor\n :return: placeholder where all dynamic axes are flattened into a single axis.\n e.g. for the usual case (batch, time, dim), it becomes (batch'|time', dim),\n or (batch, time, height, dim) will also become (batch'|time', dim).\n with keep_dims, (batch, time, height, dim) will become (batch'|time', 1, 1, dim).\n "
assert (self.placeholder is not None)
assert self._raw_backend.is_tensorflow, 'get_placeholder_flattened only implemented for TF yet'
import tensorflow as tf
x = self.placeholder
orig_dyn_axes = (self.get_spatial_batch_axes() + [self.batch_dim_axis])
dyn_axes = list(orig_dyn_axes)
if (dyn_axes == [self.batch_dim_axis]):
return x
assert (0 in dyn_axes), 'would need some transpose, not supported at the moment'
assert (len(dyn_axes) > 1)
orig_num_dyn_axes = len(dyn_axes)
ndim = len(self.batch_shape)
if self.have_time_axis():
x = self.get_placeholder_time_flattened()
removed_axis = max(self.time_dim_axis, self.batch_dim_axis)
dyn_axes.remove(removed_axis)
dyn_axes = [(i if (i < removed_axis) else (i - 1)) for i in dyn_axes]
ndim -= 1
if (len(dyn_axes) > 1):
shape = tf.shape(x)
x = tf.reshape(x, ([tf.reduce_prod([shape[i] for i in dyn_axes])] + [shape[i] for i in range(ndim) if (i not in dyn_axes)]))
dyn_axes = [0]
assert (dyn_axes == [0])
if (keepdims and (orig_num_dyn_axes >= 2)):
for i in orig_dyn_axes:
if (i not in dyn_axes):
x = tf.expand_dims(x, axis=i)
x.set_shape(([None] * self.batch_ndim))
return x
def get_axes(self, exclude_time=False, exclude_batch=False, exclude_feature=False):
'\n :param bool exclude_time: will filter out the time-axis\n :param bool exclude_batch: will filter out the batch-axis\n :param bool exclude_feature: will filter out the feature-axis\n :return: list of axes, like `range(len(self.shape))`, calculated with batch dim.\n :rtype: list[int]\n '
axes = list(range(len(self.batch_shape)))
if (exclude_time and (self.time_dim_axis is not None)):
axes.pop(axes.index(self.time_dim_axis))
if (exclude_batch and (self.batch_dim_axis is not None)):
axes.pop(axes.index(self.batch_dim_axis))
if (exclude_feature and (self.feature_dim_axis is not None)):
axes.pop(axes.index(self.feature_dim_axis))
return axes
@classmethod
def _verify_axis_int_from_description(cls, allow_int=NotSpecified):
'\n Call this when you have the case that ``axis`` or ``axes``\n in :func:`get_axes_from_description` or :func:`get_axis_from_description`\n was specified as int.\n\n :param bool|NotSpecified allow_int:\n '
msg = 'Do not specify axis as int but as str or Dim instead.'
if (allow_int is NotSpecified):
from returnn.util import BehaviorVersion
BehaviorVersion.require(condition=False, message=msg, version=5)
if allow_int:
return
raise Exception(msg)
@classmethod
def _verify_axis_order_dependent(cls):
'\n Call this when you have the case that ``axis`` or ``axes``\n in :func:`get_axes_from_description` or :func:`get_axis_from_description`\n depends on the order of the axes.\n '
from returnn.util import BehaviorVersion
BehaviorVersion.require(condition=False, message='Do not specify axis or axes in a way that depends on the order of the axes.', version=7)
def _make_valid_int_axis(self, axis):
'\n :param int axis: counted with batch. anything in [-ndim,ndim-1]\n :return: axis in [0,ndim-1]\n :rtype: int\n '
if (axis < 0):
assert ((axis + self.batch_ndim) >= 0)
axis += self.batch_ndim
assert (axis < self.batch_ndim)
return axis
def get_axes_from_description(self, axes, allow_int=NotSpecified):
'\n :param int|list[int]|str|typing.Sequence[str|Dim]|Dim|None axes: one axis or multiple axis, or none.\n This is counted with batch-dim, which by default is axis 0 (see enforce_batch_dim_axis).\n It also accepts the special tokens "B"|"batch", "spatial", "spatial_except_time", or "F"|"feature",\n and more (see the code).\n :param bool|NotSpecified allow_int: whether to allow an int directly.\n in almost all cases, it is better to use a symbolic name\n to specify an axis, as different layers could reorder them,\n and maybe also change their behavior in the future.\n :return: list of axes, counted with batch-dim\n :rtype: list[int]\n '
if ((axes is None) or (isinstance(axes, str) and (axes == ''))):
return []
if isinstance(axes, Dim):
dims = [i for (i, tag) in enumerate(self.dim_tags) if (tag == axes)]
if (len(dims) > 1):
max_match_priority = max((self.dim_tags[i].match_priority for i in dims))
dims = [i for i in dims if (self.dim_tags[i].match_priority == max_match_priority)]
assert (len(dims) <= 1), ('%s: matching dim %s must be unique, use `match_priority` to resolve the matching order of ambiguous dimensions' % (self, axes))
return dims
if isinstance(axes, int):
self._verify_axis_int_from_description(allow_int=allow_int)
return [self._make_valid_int_axis(axes)]
assert isinstance(axes, (str, int, list, tuple, Sequence))
if isinstance(axes, str):
import re
axes = axes.lower()
if (axes in ['b', 'batch']):
assert (self.batch_dim_axis is not None)
return [self.batch_dim_axis]
elif (axes == 'spatial'):
return self.get_spatial_batch_axes()
elif re.match('(s|spatial):-?\\d+$', axes):
self._verify_axis_order_dependent()
s = int(axes.split(':')[1])
spatial_axes = self.get_spatial_batch_axes()
if (s < 0):
s += len(spatial_axes)
assert (s < len(spatial_axes)), ('%s get_axes_from_description: %r invalid' % (self, axes))
return [spatial_axes[s]]
elif (axes in ['dyn', 'dynamic']):
return self.get_dynamic_axes()
elif re.match('(d|dyn|dynamic):-?\\d+$', axes):
self._verify_axis_order_dependent()
s = int(axes.split(':')[1])
dyn_axes = self.get_dynamic_axes()
if (s < 0):
s += len(dyn_axes)
assert (0 <= s < len(dyn_axes)), ('%s get_axes_from_description: %r invalid' % (self, axes))
return [dyn_axes[s]]
elif (axes == 'spatial_except_time'):
axes = self.get_spatial_batch_axes()
assert (self.time_dim_axis is not None)
axes.remove(self.time_dim_axis)
return axes
elif (axes in ['t', 'time']):
assert (self.time_dim_axis is not None)
return [self.time_dim_axis]
elif (axes == 't?'):
return ([self.time_dim_axis] if (self.time_dim_axis is not None) else [])
elif (axes == 'except_time'):
axes = list(range(self.batch_ndim))
axes.remove(self.batch_dim_axis)
if (self.time_dim_axis is not None):
axes.remove(self.time_dim_axis)
return axes
elif (axes == 'except_batch'):
axes = list(range(self.batch_ndim))
axes.remove(self.batch_dim_axis)
return axes
elif re.match('(except_batch):-?\\d+$', axes):
self._verify_axis_order_dependent()
s = int(axes.split(':')[1])
non_batch_axes = list(range(self.batch_ndim))
if (self.batch_dim_axis is not None):
non_batch_axes.remove(self.batch_dim_axis)
if (s < 0):
s += len(non_batch_axes)
assert (0 <= s < len(non_batch_axes)), ('%s get_axes_from_description: %r invalid' % (self, axes))
return [non_batch_axes[s]]
elif (axes == '*'):
return list(range(self.batch_ndim))
elif (axes == 'static'):
return self.get_static_axes()
elif re.match('(static):-?\\d+$', axes):
self._verify_axis_order_dependent()
s = int(axes.split(':')[1])
static_axes = self.get_static_axes()
if (s < 0):
s += len(static_axes)
assert (0 <= s < len(static_axes)), ('%s get_axes_from_description: %r invalid' % (self, axes))
return [static_axes[s]]
elif re.match('(dim):\\d+$', axes):
s = int(axes.split(':')[1])
dims = [a for a in range(self.batch_ndim) if (self.batch_shape[a] == s)]
assert dims, ("%s get_axes_from_description: 'dim:%i' not found" % (self, s))
assert (len(dims) == 1), ("%s get_axes_from_description: 'dim:%i' only allowed when unique" % (self, s))
return dims
elif (axes in ['f', 'feature', 'non_spatial']):
return self.get_feature_batch_axes()
elif all([(a in 'btf') for a in axes]):
return self.get_axes_from_description(list(axes))
elif axes.startswith('stag:'):
return [self.get_axis_by_tag_name(axes[len('stag:'):], spatial_only=True)]
elif axes.startswith('stag-single:'):
(_, idx_s, name) = axes.split(':', 2)
idx = int(idx_s)
return [self.get_axes_by_tag_name(name, spatial_only=True)[idx]]
elif axes.startswith('tag:'):
return [self.get_axis_by_tag_name(axes[len('tag:'):])]
raise Exception(('invalid axis mode %r' % axes))
assert isinstance(axes, (tuple, list, Sequence)), ('invalid axes %r' % axes)
flat_axes = []
for i in axes:
if isinstance(i, int):
self._verify_axis_int_from_description(allow_int=allow_int)
flat_axes.append(self._make_valid_int_axis(i))
else:
assert isinstance(i, (str, tuple, list, Dim))
flat_axes += self.get_axes_from_description(i, allow_int=allow_int)
res = []
for i in flat_axes:
if (i not in res):
res.append(i)
return res
def get_dim_tag_from_description(self, axis):
'\n :param str|Dim axis:\n :return: our matching dim tag. this assumes it exists.\n :rtype: Dim\n '
axis_int = self.get_axis_from_description(axis, allow_int=False)
return self.dim_tags[axis_int]
def get_axis_from_description(self, axis, allow_int=NotSpecified):
'\n :param int|str|Dim axis:\n :param bool|NotSpecified allow_int:\n :return: axis, counted with batch-dim\n :rtype: int\n '
if isinstance(axis, Dim):
res_idx: Optional[int] = None
res_tag: Optional[Dim] = None
for (i, tag) in enumerate(self._dims):
tag: Dim
if ((tag is axis) or (tag == axis)):
if ((res_tag is None) or (res_tag.match_priority < tag.match_priority)):
res_idx = i
res_tag = tag
continue
if (res_tag.match_priority > tag.match_priority):
continue
raise Exception(f'{self}: get_axis_from_description({axis}) not unique. use match_priority to resolve ambiguity')
if (res_idx is None):
raise Exception(f'{self}: get_axis_from_description({axis}) not found')
return res_idx
axes = self.get_axes_from_description(axis, allow_int=allow_int)
assert axes, ('%s: %r axis not found' % (self, axis))
assert (len(axes) == 1), ('%r: %r is not a unique axis but %r' % (self, axis, axes))
return axes[0]
def get_description_from_axis(self, axis):
'\n :param int axis:\n :return: some canonical description, such that ``self.get_axis_from_description(res) == axis``.\n This is quite heuristically for now. We use both strings as also Dim when appropriate.\n The behavior could potentially change in the future, also the condition will always hold.\n :rtype: str|Dim\n '
assert (0 <= axis < self.batch_ndim)
if (axis == self.batch_dim_axis):
return 'B'
dim_tag = self.dim_tags[axis]
matching_tags = [i for (i, tag) in enumerate(self.dim_tags) if (tag == dim_tag)]
if (dim_tag.dyn_size_ext and (len(matching_tags) == 1)):
return dim_tag
if (axis == self.time_dim_axis):
return 'T'
if (axis == self.feature_dim_axis):
return 'F'
if (len(matching_tags) == 1):
return dim_tag
name = dim_tag.description
matching_axes = self.get_axes_by_tag_name(name, spatial_only=True)
assert (axis in matching_axes)
return ('stag-single:%i:%s' % ((matching_axes.index(axis) - len(matching_axes)), name))
def has_axis(self, axis):
'\n :param str|Dim axis:\n :return: whether the axis exists\n :rtype: bool\n '
axes = self.get_axes_from_description(axis, allow_int=False)
return (len(axes) > 0)
def get_axes_by_tag_name(self, name, spatial_only=False):
'\n :param str name: the tag name, or part of it (must be unique, and must exist)\n :param bool spatial_only:\n :rtype: list[int]\n '
dim_tags = self.get_batch_shape_dim_tags()
matching_dim_tags = [(axis, tag) for (axis, tag) in enumerate(dim_tags) if ((name.lower() in tag.description.lower()) or (name.lower() in tag.get_same_base().description.lower()))]
if spatial_only:
spatial_axes = self.get_spatial_batch_axes()
matching_dim_tags = [(axis, tag) for (axis, tag) in matching_dim_tags if ((axis in spatial_axes) or tag.is_spatial_dim())]
return [ax for (ax, _) in matching_dim_tags]
def get_axis_by_tag_name(self, name, spatial_only=False):
'\n :param str name: the tag name, or part of it (must be unique, and must exist)\n :param bool spatial_only:\n :rtype: int\n '
matching_dim_tags = self.get_axes_by_tag_name(name, spatial_only)
assert (len(matching_dim_tags) > 0), ('%r: no %stag found with name %r' % (self, ('spatial ' if spatial_only else ''), name))
assert (len(matching_dim_tags) == 1), ('%r: tag name %r is not unique in dim tags %r' % (self, name, self.get_batch_shape_dim_tags()))
return matching_dim_tags[0]
def get_batch_axis_excluding_batch(self, axis):
'\n :param int axis: counted with batch-dim\n :return: axis counted without batch-dim\n :rtype: int|None\n '
return _get_axis_wo_b(axis, batch_dim_axis=self.batch_dim_axis, batch_ndim=self.batch_ndim)
def have_dim_tag(self, tag, include_implicit=True, unique=False):
'\n :param Dim tag:\n :param bool include_implicit:\n :param bool unique:\n :rtype: bool\n '
dims = list(self.dim_tags)
if include_implicit:
dims.extend(self.dim_tags_set_implicit_only)
matching_dims = [dim for dim in dims if (dim == tag)]
return ((len(matching_dims) == 1) if unique else (len(matching_dims) >= 1))
def get_batch_axis(self, axis):
'\n :param int axis: counted without batch-dim\n :return: axis counted with batch-dim\n :rtype: int\n '
return _get_axis_wb(axis, batch_dim_axis=self.batch_dim_axis)
def have_batch_axis(self):
'\n :rtype: bool\n '
return (self.batch_dim_axis is not None)
def have_time_axis(self):
'\n :rtype: bool\n '
return (self.time_dim_axis is not None)
def have_feature_axis(self):
'\n :rtype: bool\n '
return (self.feature_dim_axis is not None)
def is_time_axis_dynamic(self):
'\n :return: whether there are different seq-lens for the time, or all the same (static)\n :rtype: bool\n '
assert (self.time_dim_axis is not None)
if (self.placeholder is None):
return (self.batch_shape[self.time_dim_axis_excluding_batch] is None)
if (self.time_dim_axis_excluding_batch in self.size_placeholder):
return True
assert isinstance(self.shape[self.time_dim_axis_excluding_batch], int), ('%s: dynamic time axis dim (None) (axis %i) but size_placeholder %r misses information' % (self, self.time_dim_axis, self.size_placeholder))
return False
def is_axis_dynamic(self, axis):
'\n :param int axis: counted with batch-dim axis\n :return: dynamic, i.e. we have it in size_placeholder.\n Note that this does not perfectly match with :func:`get_dynamic_axes`,\n but more with :func:`is_time_axis_dynamic`,\n although probably in most (all?) cases it should match.\n If True, you can get the size via :func:`get_dynamic_size`.\n :rtype: bool\n '
if (axis == self.batch_dim_axis):
return False
return (self.batch_shape[axis] is None)
def has_dynamic_size(self, axis):
'\n :param int axis: counted with batch-dim axis. implies that you can call :func:`get_dynamic_size`.\n :rtype: bool\n '
return (self.dim_tags[axis].dyn_size is not None)
def get_dynamic_size(self, axis):
'\n :param int axis: counted with batch-dim axis. :func:`get_dynamic_size` should be True.\n :return: shape (B,)\n :rtype: tf.Tensor\n '
tag = self.dim_tags[axis]
assert (tag.dyn_size is not None), ('%s: axis %i has no dyn size' % (self, axis))
return tag.dyn_size
def set_dynamic_size(self, axis, sizes):
'\n :param int axis: counted with batch-dim\n :param tf.Tensor sizes: shape [B]\n '
if (getattr(sizes, '_RETURNN_dyn_size_beam', NotSpecified) is NotSpecified):
sizes._RETURNN_dyn_size_beam = self.beam
if (self.beam and (getattr(sizes, '_RETURNN_dyn_size_beam', None) != self.beam)):
tag = Dim.get_tag_from_size_tensor(sizes)
assert (tag and self.batch)
tag = tag.get_for_batch_ctx(batch=self.batch, ctx=self.control_flow_ctx)
assert (tag.dyn_size is not None)
sizes = tag.dyn_size
sizes_tag = Dim.get_tag_from_size_tensor(sizes)
if sizes_tag:
assert sizes_tag.is_same_size_tensor(sizes)
tag = self._dims[axis]
assert tag.is_dynamic()
if tag.is_same_size_tensor(sizes):
pass
elif (tag.dyn_size is None):
if sizes_tag:
assert sizes_tag.is_same_size_tensor(sizes)
tag = sizes_tag
else:
tag = tag.set_tag_on_size_tensor(sizes, batch=self.batch)
else:
assert sizes_tag, ('%s: assign dyn sizes %s without defined dim tag' % (self, sizes))
tag = sizes_tag
if self.batch:
tag = tag.get_for_batch_ctx(batch=self.batch, ctx=self.control_flow_ctx)
if (tag is not self._dims[axis]):
self._dims = ((self._dims[:axis] + (tag,)) + self._dims[(axis + 1):])
if (tag.dyn_size is None):
tag.dyn_size = sizes
def get_dynamic_axes(self):
'\n :return: list of axes, counted with batch-dim axis (but we exclude the batch dim axis itself)\n :rtype: list[int]\n '
return [axis for (axis, dim) in enumerate(self.batch_shape) if ((axis != self.batch_dim_axis) and (dim is None))]
def get_static_axes(self):
'\n :return: list of axes, counted with batch-dim axis (but we exclude the batch dim axis itself)\n :rtype: list[int]\n '
return [axis for (axis, dim) in enumerate(self.batch_shape) if ((axis != self.batch_dim_axis) and (dim is not None))]
def mark_same_time(self, tags, must_match=False):
'\n If the given dimension tag matches any of our axes, we set our time axis to the selected one.\n\n :param set[Dim]|Dim tags:\n :param bool must_match: if True, throw an exception if not found\n :return: whether we have found the same\n :rtype: bool\n '
if isinstance(tags, Dim):
tags = {tags}
assert all((isinstance(tag, Dim) for tag in tags))
for (axis, dim_tag) in enumerate(self.dim_tags):
if (dim_tag in tags):
self.time_dim_axis = axis
return True
if must_match:
raise Exception(('%s mark_same_time: %s not found' % (self, tags)))
return False
def is_same_time_dim(self, other: Tensor) -> bool:
'\n Checks whether we have a matching/compatible time dim.\n\n :param other:\n '
assert self.have_time_axis()
if (not other.have_time_axis()):
return False
tag_self = self.get_dim_tag(self.time_dim_axis)
tag_other = other.get_dim_tag(other.time_dim_axis)
return (tag_self == tag_other)
def get_sequence_lengths(self) -> _t.RawTensorType:
'\n Deprecated. Access the information directly from dim tags, whatever you need.\n\n Warning: This assumes TensorFlow in the fallback case.\n\n :return: seq lens tensor of shape [B] of dtype int32. also see :func:`get_dynamic_size`\n :rtype: tf.Tensor\n '
assert (self.time_dim_axis is not None)
dim = self._dims[self.time_dim_axis]
assert isinstance(dim, Dim)
if dim.dyn_size_ext:
if (dim.dyn_size_ext.raw_tensor is None):
dim.complete_dyn_size()
assert (dim.dyn_size_ext.raw_tensor is not None)
return dim.dyn_size_ext.raw_tensor
assert (self.batch_shape[self.time_dim_axis] is not None)
assert (self.batch_dim_axis is not None)
batch_dim_ = self._dims[self.batch_dim_axis]
assert isinstance(batch_dim_, Dim)
if (batch_dim_.dyn_size_ext and (batch_dim_.dyn_size_ext.raw_tensor is not None)):
backend = batch_dim_.dyn_size_ext._raw_backend
return backend.fill_raw([batch_dim_.dyn_size_ext.raw_tensor], dim.size)
import tensorflow as tf
return tf.fill([self.get_batch_dim()], dim.size)
def get_sequence_mask(self):
'\n :return: seq mask of shape (batch,time) if we are batch-major, else (time,batch) if we are time-major\n :rtype: tf.Tensor\n '
from returnn.frontend._backend import get_backend_by_raw_tensor_type
assert (self.time_dim_axis is not None)
assert (self.batch_dim_axis is not None)
dyn_seq_len = self.get_sequence_lengths()
backend = get_backend_by_raw_tensor_type(type(dyn_seq_len))
if self.is_time_major:
assert (self.batch_dim_axis == 1)
return backend.sequence_mask_raw(dyn_seq_len, batch_major=False)
else:
assert (self.batch_dim_axis == 0)
assert (self.time_dim_axis == 1)
return backend.sequence_mask_raw(dyn_seq_len, batch_major=True)
def get_sequence_mask_broadcast(self: Tensor, axis=None) -> _t.RawTensorType:
'\n :param Dim|int|None axis:\n :return: seq mask of shape ((batch,time) or (time,batch)) + (1,)s for remaining dims\n if BT or TB major, and axis is T or None.\n In general compatible to placeholder, i.e. same ndim, with broadcast dims.\n We assert here that the axis is dynamic (:func:`is_axis_dynamic`), i.e. we have the size.\n '
if isinstance(axis, Dim):
axis = self.get_axis_from_description(axis)
if (axis is None):
assert (self.time_dim_axis is not None)
axis = self.time_dim_axis
if (axis < 0):
assert ((axis + self.batch_ndim) > 0)
axis += self.batch_ndim
assert (0 <= axis < self.batch_ndim)
assert (axis != self.batch_dim_axis)
tag: Dim = self.dim_tags[axis]
assert (tag.dyn_size_ext and (tag.dyn_size_ext.raw_tensor is not None))
backend = tag.dyn_size_ext._raw_backend
assert set(tag.dyn_size_ext.dim_tags).issubset(self.dim_tags)
with backend.name_scope_raw('get_sequence_mask_broadcast'):
if (backend.have_sequence_mask_raw() and tag.dyn_size_ext.have_batch_axis() and (tag.dyn_size_ext.batch_ndim == 1)):
size = tag.dyn_size
seq_mask = backend.sequence_mask_raw(size, batch_major=(axis >= self.batch_dim_axis))
shape = ([1] * self.batch_ndim)
shape[self.batch_dim_axis] = self.get_batch_dim()
shape[axis] = tag.get_dim_value()
seq_mask = backend.reshape_raw(seq_mask, shape)
assert (seq_mask.get_shape().ndims == self.batch_ndim)
else:
seq_mask = self.get_sequence_mask_tensor(axis).copy_compatible_to_dims_raw(self.dims)
return seq_mask
def get_sequence_mask_tensor(self: Tensor, axis: int) -> Tensor:
'\n :param axis:\n :return: mask\n '
if (axis < 0):
assert ((axis + self.batch_ndim) > 0)
axis += self.batch_ndim
assert (0 <= axis < self.batch_ndim)
assert (axis != self.batch_dim_axis)
tag: Dim = self.dim_tags[axis]
return tag.get_mask(dim_order=self.dims, device=self.device)
def get_sequence_lengths_broadcast(self, axis=None):
'\n :param int|None axis:\n :return: seq len of some shape which is broadcastable to self.placeholder.\n Note that this is not always possible, e.g. when the seq len has shape [B]\n but the tensor has just shape [T]. We currently throw an error then.\n :rtype: tf.Tensor\n '
if (axis is None):
assert (self.time_dim_axis is not None)
axis = self.time_dim_axis
if (axis < 0):
assert ((axis + self.batch_ndim) > 0)
axis += self.batch_ndim
assert (0 <= axis < self.batch_ndim)
assert (axis != self.batch_dim_axis)
tag = self.dim_tags[axis]
assert tag.dyn_size_ext
return tag.dyn_size_ext.copy_compatible_to(self, check_dtype=False, check_sparse=False).placeholder
def num_elements(self: Tensor) -> Union[(int, Tensor)]:
'\n :return: number of elements in this tensor, i.e. prod(self.shape)\n :rtype: tf.Tensor\n '
import returnn.frontend as rf
return rf.num_elements_of_shape(self.dims)
def copy_masked(self: Tensor, mask_value: Union[(Tensor, float, int, _t.RawTensorType)], *, dims: Optional[Sequence[Union[(Dim, int)]]]=None, allow_int: bool=NotSpecified) -> Tensor:
'\n :param mask_value:\n :param dims:\n :param allow_int: in dims\n '
assert (self.raw_tensor is not None)
if (dims is None):
axes = range(self.batch_ndim)
else:
axes = [self.get_axis_from_description(dim, allow_int=allow_int) for dim in dims]
assert (len(set(axes)) == len(dims)), f'{self} copy_masked, dims {dims} not unique, axes {axes}'
axes_ = []
for axis in axes:
tag: Dim = self.dims[axis]
if (not tag.need_masking()):
continue
if set(tag.dyn_size_ext.dim_tags).issubset(self.dim_tags):
axes_.append(axis)
axes = axes_
if (not axes):
return self.copy()
use_padding_info = False
tf_util = None
if self._raw_backend.is_tensorflow:
import returnn.tf.util.basic as tf_util
use_padding_info = isinstance(mask_value, (int, float))
if use_padding_info:
d = tf_util.get_padding_info_dict_ref(self.raw_tensor)
existing_pad_values = [d.get(self.dim_tags[axis]) for axis in axes]
if (set(existing_pad_values) == {mask_value}):
return self.copy()
import returnn.frontend as rf
mask = None
for axis in axes:
mask_ = self._dims[axis].get_mask(dim_order=self.dims, device=self.device)
mask = (rf.logical_and(mask, mask_) if (mask is not None) else mask_)
assert isinstance(mask, _t.Tensor)
res = rf.where(mask, self, mask_value)
if use_padding_info:
d = tf_util.get_padding_info_dict_ref(res.raw_tensor)
d.clear()
d.update({self.dim_tags[axis]: mask_value for axis in axes})
return res
def get_batch_dim(self) -> Union[(_t.RawTensorType, int)]:
'\n Warning: This assumes TensorFlow and is also mostly TF specific.\n\n :return: batch dim\n '
assert (self.batch_dim_axis is not None)
if self.batch:
if self.beam:
assert (self.batch.beam == self.beam)
dim = self.batch.dim
if (not isinstance(dim, int)):
batch_dim_ = self.dim_tags[self.batch_dim_axis]
batch_dim_.set_tag_on_size_tensor(dim, batch=self.batch)
return dim
from returnn.tf.layers.base import LayerBase
batch = LayerBase.get_recent_layer().get_batch_info()
batch = batch.copy_set_beam(self.beam)
return batch.dim
def get_batch_dim_tag(self):
'\n :rtype: Dim\n '
assert self.have_batch_axis()
return self.dim_tags[self.batch_dim_axis]
def get_static_batch_dim(self):
'\n :rtype: int|None\n '
if self.batch:
return self.batch.static_dim
if self.have_batch_axis():
return self.get_batch_dim_tag().dimension
return None
def get_spatial_batch_axes(self):
'\n :rtype: list[int]\n :return: list of axes which are not batch axes and not feature or which are time axis or dynamic.\n counted with batch-dim.\n '
return [axis for axis in range(self.batch_ndim) if ((axis != self.batch_dim_axis) and ((axis != self.feature_dim_axis) or (axis == self.time_dim_axis) or (self.batch_shape[axis] is None)))]
def get_spatial_axes(self):
'\n :rtype: list[int]\n :return: list of axes which are not feature and batch axes, counted without batch-dim.\n '
return [self.get_batch_axis_excluding_batch(axis) for axis in self.get_spatial_batch_axes()]
def get_feature_batch_axes(self):
'\n :rtype: list[int]\n :return: list of axes which are feature axes, counted with batch-dim.\n currently there is only one or zero such axis.\n '
if (self.feature_dim_axis is not None):
return [self.feature_dim_axis]
return []
def get_feature_axes(self):
'\n :rtype: list[int]\n :return: list of axes which are feature axes, counted without batch-dim.\n '
return [self.get_batch_axis_excluding_batch(axis) for axis in self.get_feature_batch_axes()]
SpecialAxesNames = ('time_dim_axis', 'feature_dim_axis')
def get_special_axes_dict(self, counted_with_batch_dim=True, only_available=False):
'\n :param bool counted_with_batch_dim:\n :param bool only_available:\n :return: dict axis-name -> axis\n :rtype: dict[str,int]\n '
axes = list(self.SpecialAxesNames)
d = {k: getattr(self, k) for k in axes}
if (not counted_with_batch_dim):
d = {k: (self.get_batch_axis_excluding_batch(v) if (v is not None) else None) for (k, v) in d.items()}
if only_available:
d = {k: v for (k, v) in d.items() if (v is not None)}
if (self.feature_dim_axis_or_unspecified is NotSpecified):
d.pop('feature_dim_axis', None)
return d
def get_bc_spatial_batch_shape(self):
'\n :return: shape which will broadcast along all spatial dimensions and time/batch dim\n :rtype: tuple[int|None]\n '
dyn_axes = self.get_spatial_batch_axes()
if (self.batch_dim_axis is not None):
dyn_axes += [self.batch_dim_axis]
return tuple([(1 if (axis in dyn_axes) else dim) for (axis, dim) in enumerate(self.batch_shape)])
def get_bc_shape(self, opts=None):
'\n :param dict[Dim|str|list[Dim|str]|tuple[Dim|str],int|str|None]|None opts:\n ``key`` specifies the axes.\n ``value`` 1 (\'x\') is broadcasting, -1 (None) is not broadcasting\n Axes should not be defined multiple times.\n The default behavior if an axis is not specified is like :func:`get_bc_spatial_batch_shape`,\n i.e. it will broadcast in batch and spatial dims only.\n Or if "*" is in the dict, this overwrites the default behavior for all axes.\n :return: shape where 1 means broadcasting, None or >1 means not broadcasting.\n can be used for :func:`TFUtil.dropout`\n :rtype: tuple[int|None]\n '
if (opts is None):
opts = {}
default_axes_map = dict(enumerate(self.get_bc_spatial_batch_shape()))
axes_map = {}
for (key, value) in opts.items():
assert (value in ((- 1), 1, 'x', None)), ('%r get_bc_shape: invalid value in opts %r' % (self, opts))
if (value == 'x'):
value = 1
if (value == (- 1)):
value = None
key_axes = self.get_axes_from_description(key)
for key_axis in key_axes:
assert (key_axis not in axes_map), ('%r get_bc_shape: axis %i is defined multiple times in opts %r' % (self, key_axis, opts))
assert (0 <= key_axis < self.batch_ndim), ('%r get_bc_shape: invalid axis %i in opts %r' % (self, key_axis, opts))
(axes_map if (key != '*') else default_axes_map)[key_axis] = (self.batch_shape[key_axis] if (value is None) else value)
remaining_axes = sorted(set(range(self.batch_ndim)).difference(axes_map.keys()))
for axis in remaining_axes:
axes_map[axis] = default_axes_map[axis]
assert (sorted(axes_map.keys()) == list(range(self.batch_ndim)))
return tuple([axes_map[i] for i in range(self.batch_ndim)])
def get_scope_name(self):
'\n :return: via self.placeholder or any self.size_placeholder, or None\n :rtype: str|None\n '
if (self.placeholder is not None):
return os.path.dirname(self.placeholder.name)
if self.size_placeholder:
for (i, v) in sorted(self.size_placeholder.items()):
if (v is not None):
return os.path.dirname(v.name)
return None
def get_full_name(self):
'\n :return: if we have a defined scope (via :func:`self.get_scope_name`), then scope_name + "/" + self.name,\n otherwise just self.name\n :rtype: str\n '
scope_name = self.get_scope_name()
if scope_name:
return ('%s/%s' % (scope_name, self.name))
return self.name
def get_dim_tag(self, axis):
'\n :param int axis: counted with batch-dim\n :rtype: Dim\n '
return self._dims[axis]
def get_time_dim_tag(self):
'\n :rtype: Dim\n '
assert (self.time_dim_axis is not None)
return self.get_dim_tag(self.time_dim_axis)
def get_dyn_size_tags(self):
'\n :return: all dim tags with dynamic size\n :rtype: list[Dim]\n '
return [dim_tag for dim_tag in self._dims if dim_tag.is_dynamic_seq_length()]
def get_size_dim_tag(self, number):
'\n :param int number: index in sorted(size_placeholder.keys())\n :rtype: Dim\n '
dyn_size_tags = self.get_dyn_size_tags()
return dyn_size_tags[number]
def get_batch_shape_dim_tags(self):
'\n :return: list of dimension tags, for each axis (counted with batch dim, i.e. len is batch_ndim)\n :rtype: tuple[Dim]\n '
return self.dim_tags
@classmethod
def get_common_data(cls, sources: List[Tensor], ignore_feature_dim=False, allow_broadcast_all_sources=NotSpecified, name=None) -> Optional[Tensor]:
'\n :param sources:\n :param bool ignore_feature_dim: when set, the feature dim does not have to match in the sources\n :param bool|NotSpecified allow_broadcast_all_sources:\n :param str|None name:\n :return: some generic data where the sources should be compatible to (with copy_compatible_to),\n i.e. it contains the union of all axes from all sources (least common multiple).\n This is always a template, and a new copy.\n '
from returnn.util import BehaviorVersion
if (not sources):
return None
assert sources
if (len(sources) == 1):
return sources[0].copy_template()
max_ndim = max([s.batch_ndim for s in sources])
if any((src.batch for src in sources)):
from returnn.tf.util.data import BatchInfo
common_batch = BatchInfo.get_common_batch_info([src.batch for src in sources if src.batch])
else:
common_batch = None
common = [s for s in sources if (s.batch_ndim == max_ndim)][0]
common = common.copy_template(name=name)
common.beam = None
if common_batch:
common.batch = common_batch.copy_set_beam(None)
if any([s.beam for s in sources]):
from returnn.tf.util.data import SearchBeam
common.beam = SearchBeam.get_combined_beam(*[s.beam for s in sources])
is_equal_opts = dict(ignore_feature_dim=ignore_feature_dim, treat_feature_as_spatial=True, allow_same_spatial_dim=True, undefined_matches=True, derived_matches=True)
if (BehaviorVersion.get() < 11):
is_equal_opts['broadcast_matches'] = True
(all_dim_tags, tags_dict) = Dim.get_all_dimension_tags(sources, is_equal_opts=is_equal_opts)
for dim_tag in all_dim_tags:
common_tag = Dim.get_existing_tag_from_collection(dim_tag, common.dim_tags, is_equal_opts=is_equal_opts)
if common_tag:
if (dim_tag != common_tag):
axis = common.dim_tags.index(common_tag)
common = common.copy_template_replace_dim_tag(axis=axis, new_dim_tag=dim_tag)
else:
axis = common.get_default_new_axis_for_dim_tag(dim_tag)
common = common.copy_add_dim_by_tag(dim_tag, unbroadcast=True, axis=axis)
if all(((s.batch_ndim < common.batch_ndim) for s in sources)):
from returnn.util.basic import validate_broadcast_all_sources
validate_broadcast_all_sources(allow_broadcast_all_sources=allow_broadcast_all_sources, inputs=sources, common=common)
return common
def find_matching_dims(self: Tensor, dim_tag: Dim, is_equal_opts) -> List[int]:
'\n Finds the dimensions of this Tensor that match another Dim\n\n :param dim_tag:\n :param dict[str,bool]|None is_equal_opts: passed to Dim.is_equal\n :return: a list of matching axes, counted with batch dim. Sorted in ascending order\n '
return [axis for axis in range(self.batch_ndim) if self.get_dim_tag(axis).is_equal(dim_tag, **is_equal_opts)]
def find_matching_dim_map(self: Tensor, other: Tensor, other_axes, is_equal_opts=None) -> Dict[(int, int)]:
'\n Looks up all other_axes of another Tensor in this Tensor. Does not allow duplicates.\n\n :param other:\n :param list[int] other_axes: list of axes of ``other``, counted with batch dim, to be mapped\n :param dict[str,bool]|None is_equal_opts: passed to Dim.is_equal\n :return: dict mapping other axes (from ``other_axes``) to own axes, all counted with batch dim\n '
if (is_equal_opts is None):
is_equal_opts = dict(allow_same_feature_dim=True, allow_same_spatial_dim=True, treat_feature_as_spatial=True)
def map_other_axis_to_self(other_axis: int, taken_self_axes: Set[int]) -> int:
'\n :param other_axis: counted with batch dim\n :param taken_self_axes: axes that should not be used again\n :return: the axis of ``self`` that matches ``other_axis``, counted with batch dim\n '
other_axis_dim_tag = other.dims[other_axis]
is_equal_opts_ = None
matching = None
for opt in [{}, is_equal_opts, 'broadcast_matches', 'unknown_spatial_matches']:
if isinstance(opt, dict):
is_equal_opts_ = opt.copy()
elif isinstance(opt, str):
if (opt in is_equal_opts_):
continue
is_equal_opts_[opt] = True
matching = [self_axis for self_axis in self.find_matching_dims(other_axis_dim_tag, is_equal_opts_) if (self_axis not in taken_self_axes)]
if (opt == 'unknown_spatial_matches'):
assert (len(matching) <= 1), ('cannot match axes %s from %s to %s, failed at other %s, not unique after %s' % (other_axes, other, self, other_axis, opt))
if matching:
break
assert matching, ('cannot match the axes %s from %s to %s. Failing at axis %s, tag %s' % (other_axes, other, self, other_axis, other.dim_tags[other_axis]))
if (len(matching) == 1):
return matching[0]
max_match_priority = max((dim.match_priority for dim in self.dims))
return max(matching, key=(lambda ax: ((max_match_priority + 1) if (self.dims[ax] is other_axis_dim_tag) else self.dims[ax].match_priority)))
other_to_self_mapping = {}
for axis in other_axes:
other_to_self_mapping[axis] = map_other_axis_to_self(axis, set(other_to_self_mapping.values()))
assert (len(other_to_self_mapping) == len(other_axes)), 'other_axes may not contain duplicates'
return other_to_self_mapping
def is_valid_in_current_graph(self: _t.Tensor) -> bool:
'\n :return: whether the raw tensor is valid in the current graph.\n In eager mode, this is always True.\n '
if (self._raw_tensor is None):
return True
return self._raw_backend.is_valid_in_current_graph(self)
def mark_as_loss(self: Tensor, name: str, *, scale: Optional[float]=1.0, as_error: bool=False, use_normalized_loss: bool=False, use_flatten_frames: bool=True, custom_inv_norm_factor: Optional[Tensor]=None) -> None:
'\n Mark this as a loss.\n Please refer to :func:`RunCtx.mark_as_loss` for more details.\n\n :param name:\n :param scale:\n :param as_error:\n :param use_normalized_loss:\n :param use_flatten_frames:\n :param custom_inv_norm_factor:\n '
import returnn.frontend as rf
rf.get_run_ctx().mark_as_loss(loss=self, name=name, scale=scale, as_error=as_error, use_normalized_loss=use_normalized_loss, use_flatten_frames=use_flatten_frames, custom_inv_norm_factor=custom_inv_norm_factor)
def mark_as_output(self: Tensor, name: str, *, shape: Optional[Sequence[Dim]]=None) -> None:
'\n Mark this as an output.\n See :func:`RunCtx.mark_as_output` for more details.\n\n :param name:\n :param shape:\n '
import returnn.frontend as rf
rf.get_run_ctx().mark_as_output(self, name=name, dims=shape)
def mark_as_default_output(self: Tensor, *, shape: Optional[Sequence[Dim]]=None) -> None:
'\n Mark this as the default output.\n See :func:`RunCtx.mark_as_default_output` for more details.\n\n :param shape:\n '
import returnn.frontend as rf
rf.get_run_ctx().mark_as_default_output(self, shape=shape)
|
def infer_sparse_dim(*, name: str, sparse: Optional[bool]=None, sparse_dim, dim=NotSpecified, **_other_kwargs) -> Optional[Dim]:
'\n :param name:\n :param sparse:\n :param sparse_dim:\n :param dim:\n :return: sparse dim\n '
if (sparse is None):
sparse = (sparse_dim not in (None, NotSpecified))
if (sparse_dim in (None, NotSpecified)):
if sparse:
assert (dim is not NotSpecified), 'need dim (num classes) if sparse'
assert ((dim is None) or isinstance(dim, int))
sparse_dim = Dim(kind=Dim.Types.Feature, dimension=dim, description=('%s:sparse-dim' % name), auto_generated=True)
else:
sparse_dim = None
if (sparse_dim is not None):
assert isinstance(sparse_dim, Dim)
assert sparse_dim.can_be_used_as_dim()
assert sparse
if (dim is not NotSpecified):
assert (sparse_dim.dimension == dim)
else:
assert (not sparse)
return sparse_dim
|
def infer_dim_tags(*, name, batch_dim_axis=NotSpecified, time_dim_axis=NotSpecified, feature_dim_axis=NotSpecified, dim_tags: Optional[Sequence[Dim]]=None, shape: Optional[Sequence[Optional[int]]]=None, sparse_dim: Optional[Dim]=None, dim=NotSpecified, size_placeholder=None, auto_create_placeholders=False, batch=None, **_other_kwargs) -> Tuple[(Dim, ...)]:
"\n :param name:\n :param int|None|NotSpecified batch_dim_axis: where we add the batch-dim.\n e.g. shape=(time,...), 0 -> (batch,time,...), 1 -> (time,batch,...).\n Default is 0.\n This is normally always set, and a lot of code expects this. However, you can set it to None\n if this Tensor does not have a batch-dim.\n :param int|None|NotSpecified time_dim_axis: where we have the time dim axis, after we added the batch-dim.\n this is often 1. however, can be None if there is no time-dim.\n :param int|None|NotSpecified feature_dim_axis: feature dim axis. by default it's the last one\n :param dim_tags:\n :param shape: including time-dim (can be None). excluding batch-dim.\n e.g. (time,feat)=(None,128)\n :param sparse_dim:\n :param int|None|NotSpecified dim: feature dimension, shape[-1] if not sparse, otherwise like num_classes\n :param size_placeholder:\n :param auto_create_placeholders:\n :param batch:\n :return: dims\n "
if (dim_tags is not None):
return tuple(dim_tags)
if (batch_dim_axis is NotSpecified):
batch_dim_axis = 0
if (shape is None):
if (time_dim_axis is NotSpecified):
time_dim_axis = _default_time_dim_axis_no_shape(batch_dim_axis=batch_dim_axis, feature_dim_axis=feature_dim_axis)
(shape, time_dim_axis) = _infer_default_shape_and_time(batch_dim_axis=batch_dim_axis, feature_dim_axis=feature_dim_axis, time_dim_axis=time_dim_axis, sparse=bool(sparse_dim), dim=dim)
elif (time_dim_axis is NotSpecified):
time_dim_axis = _default_time_dim_axis(batch_dim_axis=batch_dim_axis, shape=shape)
dims = _infer_dim_tags_tuple_from_shape(shape, batch_dim_axis=batch_dim_axis, time_dim_axis=time_dim_axis, feature_dim_axis=feature_dim_axis, size_placeholder=size_placeholder, name=name, extern_data=auto_create_placeholders, sparse=bool(sparse_dim), batch=batch)
if (dim is not NotSpecified):
if sparse_dim:
assert (sparse_dim.dimension == dim)
elif (feature_dim_axis is None):
assert (dim is None)
elif (feature_dim_axis is NotSpecified):
pass
else:
assert (dims[feature_dim_axis].dimension == dim)
return dims
|
class _SizePlaceholderProxy():
'\n This is a proxy object to emulate the original Tensor.size_placeholder behavior,\n which was a dict[int,tf.Tensor], axis_wo_batch -> sizes.\n '
def __init__(self, data: Tensor):
'\n :param data:\n '
self.data = data
def _assert_sane_axis_wo_batch(self, idx):
assert (isinstance(idx, int) and (0 <= idx < self.data.ndim))
def __contains__(self, item):
if (not isinstance(item, int)):
return False
if (not (0 <= item < self.data.ndim)):
return False
return self.data.has_dynamic_size(axis=self.data.get_batch_axis(item))
def __getitem__(self, item):
self._assert_sane_axis_wo_batch(item)
return self.data.get_dynamic_size(axis=self.data.get_batch_axis(item))
def __setitem__(self, key, value):
self._assert_sane_axis_wo_batch(key)
self.data.set_dynamic_size(axis=self.data.get_batch_axis(key), sizes=value)
def __delitem__(self, key):
self._assert_sane_axis_wo_batch(key)
raise Exception(('%s: cannot delete items from size_placeholder' % self.data))
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def __bool__(self):
return bool(self.keys())
__nonzero__ = __bool__
def __repr__(self):
return repr(self.as_dict())
def get(self, axis_wo_b, default=None):
'\n :param int axis_wo_b:\n :param tf.Tensor|None default:\n :rtype: tf.Tensor|None\n '
if (axis_wo_b in self):
return self[axis_wo_b]
return default
def pop(self, axis_wo_b, *default):
'\n :param int axis_wo_b:\n '
if (default and (axis_wo_b not in self)):
(default,) = default
return default
res = self[axis_wo_b]
del self[axis_wo_b]
return res
def clear(self):
'\n Remove all.\n '
raise Exception(('%s: cannot clear size_placeholder' % self.data))
def keys(self):
'\n :rtype: list[int]\n '
return [i for i in range(self.data.ndim) if (i in self)]
def values(self):
'\n :rtype: list[tf.Tensor]\n '
return [self[i] for i in self.keys()]
def items(self):
'\n :rtype: list[(int,tf.Tensor)]\n '
return [(i, self[i]) for i in self.keys()]
def copy(self):
'\n :return: a copy-like object\n :rtype: dict[int,tf.Tensor]\n '
return self.as_dict()
def as_dict(self):
'\n :rtype: dict[int,tf.Tensor]\n '
return dict(self.items())
|
def _batch_dim_axis_from_dim_tags_tuple(dim_tags):
'\n :param Sequence[Dim] dim_tags:\n :return: batch_dim_axis. int or None if not existing\n :rtype: int|None\n '
for (axis, dim_tag) in enumerate(dim_tags):
if dim_tag.is_batch_dim():
return axis
return None
|
def _batch_shape_from_shape(shape, batch_dim_axis):
'\n :param Sequence[int|None] shape: without batch-dim\n :param int|None batch_dim_axis:\n :return: shape with batch dim if existing\n :rtype: tuple[int|None]\n '
shape = tuple(shape)
if (batch_dim_axis is not None):
assert (0 <= batch_dim_axis <= len(shape))
return ((shape[:batch_dim_axis] + (None,)) + shape[batch_dim_axis:])
else:
return shape
|
def _create_size_placeholder(name, axis_wo_b, tag, batch_dim):
'\n :param str name:\n :param int axis_wo_b:\n :param Dim tag:\n :param Dim|None batch_dim:\n '
from returnn.tf import compat as tf_compat
from returnn.tf.util.basic import reuse_name_scope
with reuse_name_scope(('extern_data/placeholders/%s' % name), absolute=True):
dyn_size_name = ('%s_dim%i_size' % (name, axis_wo_b))
if (not tag.dyn_size_ext):
dyn_size_ext = _t.Tensor(name=dyn_size_name, dtype=_t.Tensor.size_dtype, dim_tags=([batch_dim] if batch_dim else []), batch=None)
else:
dyn_size_ext = tag.dyn_size_ext.copy_template()
dyn_size_ext.batch = None
dyn_size = tf_compat.v1.placeholder(name=dyn_size_name, dtype=dyn_size_ext.dtype, shape=dyn_size_ext.batch_shape)
dyn_size_ext.placeholder = dyn_size
if dyn_size_ext.batch:
tag.set_dyn_size_ext_for_batch_ctx(batch=dyn_size_ext.batch, ctx=dyn_size_ext.control_flow_ctx, dyn_size_ext=dyn_size_ext)
else:
tag.reset_batch_ctx()
tag.dyn_size_ext = dyn_size_ext
tag.set_tag_on_size_tensor(dyn_size)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.