code stringlengths 17 6.64M |
|---|
class MultiProcDataset(CachedDataset2):
'\n Dataset which uses multi-processing to load the data from another dataset.\n\n To get deterministic behavior, it will use round-robin scheduling.\n\n There is one process just for generating the sequence order, i.e. list of sequences.\n Then there are ``num_workers`` processes which will load the data for the shard of the sequences.\n This means, one epoch (or subepoch) is exactly as in the original dataset.\n '
def __init__(self, dataset: Dict[(str, Any)], num_workers: int, buffer_size: int, _meta_info_cache: Optional[Dict[(str, Any)]]=None, **kwargs):
'\n :param dataset: the dataset to use\n :param num_workers: number of workers to use\n :param buffer_size: buffer size for each worker, amount of seqs to prefetch\n :param _meta_info_cache: for internal use\n '
super().__init__(**kwargs)
assert ((num_workers > 0) and (buffer_size > 0))
dataset = dataset.copy()
for (k, v) in kwargs.items():
if (k not in dataset):
dataset[k] = v
if ('random_seed_offset' not in dataset):
dataset['random_seed_offset'] = self.random_seed_offset
self.dataset = dataset
self.num_workers = num_workers
self.buffer_size = buffer_size
self._data_keys = None
self._num_seqs = None
self._total_num_seqs = None
self._worker_parent_conns = None
self._seq_order_proc_parent_conn = None
self._seq_order_proc = None
self._worker_procs = None
if _meta_info_cache:
self.num_inputs = _meta_info_cache['num_inputs']
self.num_outputs = _meta_info_cache['num_outputs']
self._total_num_seqs = _meta_info_cache['total_num_seqs']
self.labels = _meta_info_cache['labels']
def initialize(self):
'init'
if (not self.num_outputs):
self._lazy_init()
super().initialize()
@property
def _meta_info_cache(self):
if (not self.num_outputs):
return None
return {'num_inputs': self.num_inputs, 'num_outputs': self.num_outputs, 'total_num_seqs': self._total_num_seqs, 'labels': self.labels}
def _lazy_init(self):
if (not self._worker_procs):
global_config = get_global_config(raise_exception=False)
seq_order_to_worker = []
worker_from_seq_order = []
for i in range(self.num_workers):
(reader, writer) = _mp.Pipe(duplex=False)
seq_order_to_worker.append(writer)
worker_from_seq_order.append(reader)
worker_parent_conns = []
worker_child_conns = []
for i in range(self.num_workers):
(parent_conn, child_conn) = _mp.Pipe()
worker_parent_conns.append(parent_conn)
worker_child_conns.append(child_conn)
(seq_order_proc_parent_conn, seq_order_proc_child_conn) = _mp.Pipe()
seq_order_proc = _mp.Process(name=f'{self.name} seq order proc', target=self._seq_order_proc_loop, args=(global_config, self.dataset, seq_order_proc_child_conn, seq_order_to_worker), daemon=True)
seq_order_proc.start()
seq_order_proc_child_conn.close()
worker_procs = []
for i in range(self.num_workers):
worker_proc = _mp.Process(name=f'{self.name} worker proc {(i + 1)}/{self.num_workers}', target=self._worker_proc_loop, args=(global_config, self.dataset, self.buffer_size, worker_child_conns[i], worker_from_seq_order[i]), daemon=True)
worker_proc.start()
worker_procs.append(worker_proc)
worker_child_conns[i].close()
self._seq_order_proc_parent_conn = seq_order_proc_parent_conn
self._seq_order_proc = seq_order_proc
self._worker_parent_conns = worker_parent_conns
self._worker_procs = worker_procs
self._seq_order_proc_parent_conn.send(('init', {}))
(msg, self.num_inputs) = self._seq_order_proc_parent_conn.recv()
assert (msg == 'num_inputs')
(msg, self.num_outputs) = self._seq_order_proc_parent_conn.recv()
assert (msg == 'num_outputs')
(msg, self._total_num_seqs) = self._seq_order_proc_parent_conn.recv()
assert (msg == 'total_num_seqs')
(msg, self.labels) = self._seq_order_proc_parent_conn.recv()
assert (msg == 'labels')
def __del__(self):
if self._seq_order_proc:
try:
self._seq_order_proc_parent_conn.send(('exit', {}))
self._seq_order_proc.join()
except Exception:
pass
if self._worker_procs:
got_exception = False
for worker_parent_conn in self._worker_parent_conns:
try:
worker_parent_conn.send(('exit', {}))
except Exception:
got_exception = True
if (not got_exception):
for worker_proc in self._worker_procs:
try_run(worker_proc.join)
@staticmethod
def _seq_order_proc_loop(global_config: Optional[Config], dataset_dict: Dict[(str, Any)], parent: mpConnection, workers: List[mpConnection]):
if (sys.platform == 'linux'):
with open('/proc/self/comm', 'w') as f:
f.write(f'MPD seq order')
if global_config:
set_global_config(global_config)
num_workers = len(workers)
dataset = init_dataset(dataset_dict)
try:
while True:
(msg, kwargs) = parent.recv()
if (msg == 'exit'):
break
elif (msg == 'init'):
parent.send(('num_inputs', dataset.num_inputs))
parent.send(('num_outputs', dataset.num_outputs))
try:
total_num_seqs = dataset.get_total_num_seqs()
except NotImplementedError:
total_num_seqs = None
parent.send(('total_num_seqs', total_num_seqs))
parent.send(('labels', dataset.labels))
elif (msg == 'init_seq_order'):
dataset.init_seq_order(**kwargs)
seq_order = dataset.get_current_seq_order()
for (i, worker) in enumerate(workers):
worker.send(('seq_order_shard', seq_order[i::num_workers]))
parent.send(('num_seqs', len(seq_order)))
else:
raise Exception(f'unknown msg {msg!r}')
except KeyboardInterrupt:
pass
@staticmethod
def _worker_proc_loop(global_config: Optional[Config], dataset_dict: Dict[(str, Any)], buffer_size: int, parent: mpConnection, seq_order: mpConnection):
if (sys.platform == 'linux'):
with open('/proc/self/comm', 'w') as f:
f.write(f'MPD worker')
if global_config:
set_global_config(global_config)
dataset = init_dataset(dataset_dict)
got_init_seq_order = False
cache = []
next_seq_idx = 0
def _add_to_cache():
nonlocal next_seq_idx
if (len(cache) >= buffer_size):
return False
if (not dataset.is_less_than_num_seqs(next_seq_idx)):
return False
dataset.load_seqs(next_seq_idx, (next_seq_idx + 1))
seq_tag = dataset.get_tag(next_seq_idx)
features = {data_key: dataset.get_data(next_seq_idx, data_key) for data_key in dataset.get_data_keys()}
res = DatasetSeq(seq_idx=next_seq_idx, seq_tag=seq_tag, features=features)
cache.append(res)
next_seq_idx += 1
return True
def _get_from_cache(seq_idx: int) -> Optional[DatasetSeq]:
if (not cache):
return None
if (seq_idx > cache[(- 1)].seq_idx):
return None
for seq in cache:
assert (seq.seq_idx <= seq_idx)
if (seq.seq_idx == seq_idx):
return seq
assert False
def _get(seq_idx: int) -> Optional[DatasetSeq]:
if (cache and (seq_idx < cache[0].seq_idx)):
raise Exception(f'requested seq idx {seq_idx} is smaller than cache start {cache[0].seq_idx}, cannot go backwards')
res = _get_from_cache(seq_idx)
if res:
return res
if (not dataset.is_less_than_num_seqs(seq_idx)):
return None
assert (next_seq_idx <= seq_idx)
while True:
if (not _add_to_cache()):
raise Exception(f'buffer too small, requested seq idx {seq_idx}, cache starts at {(cache[0].seq_idx if cache else None)}')
assert (cache[(- 1)].seq_idx <= seq_idx)
if (cache[(- 1)].seq_idx == seq_idx):
return cache[(- 1)]
try:
while True:
if got_init_seq_order:
while (not parent.poll()):
if (not _add_to_cache()):
break
(msg, kwargs) = parent.recv()
if (msg == 'exit'):
break
elif (msg == 'get_data_seq'):
seq_idx = kwargs['seq_idx']
while (cache and (cache[0].seq_idx < seq_idx)):
cache.pop(0)
res = _get(seq_idx)
parent.send(('data_seq', res))
elif (msg == 'init_seq_order'):
(msg_, seq_order_) = seq_order.recv()
assert (msg_ == 'seq_order_shard')
dataset.init_seq_order(seq_order=seq_order_, **kwargs)
got_init_seq_order = True
next_seq_idx = 0
cache[:] = []
else:
raise Exception(f'unknown msg {msg!r}')
except KeyboardInterrupt:
pass
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :type epoch: int|None\n :param list[str]|None seq_list: List of sequence tags, to set a predefined order.\n :param list[int]|None seq_order: List of corpus sequence indices, to set a predefined order. Only possible\n if the dataset has such indices (see self.have_corpus_seq_idx()).\n :rtype: bool\n :returns whether the order changed (True is always safe to return)\n '
super().init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if ((epoch is not None) or (seq_list is not None) or (seq_order is not None)):
self._lazy_init()
self._seq_order_proc_parent_conn.send(('init_seq_order', {'epoch': epoch, 'seq_list': seq_list, 'seq_order': seq_order}))
for i in range(self.num_workers):
self._worker_parent_conns[i].send(('init_seq_order', {'epoch': epoch}))
(msg, num_seqs) = self._seq_order_proc_parent_conn.recv()
assert (msg == 'num_seqs')
self._num_seqs = num_seqs
else:
self._num_seqs = 0
return True
def _collect_single_seq(self, seq_idx: int) -> Optional[DatasetSeq]:
if (seq_idx >= self._num_seqs):
return None
worker_idx = (seq_idx % self.num_workers)
worker = self._worker_parent_conns[worker_idx]
worker.send(('get_data_seq', {'seq_idx': (seq_idx // self.num_workers)}))
(msg, data) = worker.recv()
assert (msg == 'data_seq')
if (data is None):
return None
assert isinstance(data, DatasetSeq)
data.seq_idx = seq_idx
return data
@property
def num_seqs(self) -> int:
'num seqs'
return self._num_seqs
def get_total_num_seqs(self) -> int:
'total num seqs'
if (self._total_num_seqs is not None):
return self._total_num_seqs
raise NotImplementedError
|
class NormalizationData(object):
'This class holds normalization data for inputs and outputs.\n It also contains methods to create the normalization HDF file.\n '
GROUP_INPUTS = 'inputs'
GROUP_OUTPUTS = 'outputs'
DATASET_MEAN = 'mean'
DATASET_MEAN_OF_SQUARES = 'meanOfSquares'
DATASET_VARIANCE = 'variance'
DATASET_TOTAL_FRAMES = 'totalNumberOfFrames'
DATASET_TIME_DIMENSION_INDEX = 0
DATASET_FEATURE_DIMENSION_INDEX = 1
SUMMATION_PRECISION = 1e-05
@staticmethod
def createNormalizationFile(bundleFilePath, outputFilePath, dtype=np.float64, flag_includeOutputs=True):
'Calculates means over inputs and outputs of datasets in the HDF files\n described by the given bundle file.\n\n :see: BundleFile.BundleFile\n\n Each HDF dataset file is expected to have the following groups:\n\n * NormalizationData.GROUP_INPUTS (the group for the input data)\n * NormalizationData.GROUP_OUTPUTS (the group for the output data)\n\n Each group may have datasets. Each dataset is expected to have\n shape (time frames, features).\n E.g. (267, 513) -- 267 time frames each containing a feature vector of\n dimensionality 513.\n\n The method writes results into the given output file.\n Availability of means and variances depends on whether the corresponding\n groups are available in the input dataset HDF files.\n\n !!! IMPORTANT !!!\n General rule of thumb: if one dataset file has both input and output\n groups then you should make sure that all the dataset files have them.\n Otherwise means and variance will not be correct.\n It is OK if *all* the datasets have only the input group.\n In this case means and variance only for inputs will be calculated.\n\n :type bundleFilePath: str\n :param bundleFilePath: path to the bundle file. :see: BundleFile.BundleFile\n :type outputFilePath: str\n :param outputFilePath: path to the output HDF normalization file.\n :type dtype: numpy.dtype\n :param dtype: type of data to use during calculations.\n :type flag_includeOutputs: bool\n :param flag_includeOutputs: if True then normalization data will be\n calculated for outputs (targets) as well.\n '
NormalizationData._calculateNormalizationData(bundleFilePath, outputFilePath, NormalizationData.GROUP_INPUTS, dtype=dtype)
if flag_includeOutputs:
NormalizationData._calculateNormalizationData(bundleFilePath, outputFilePath, NormalizationData.GROUP_OUTPUTS, dtype=dtype)
@staticmethod
def _calculateNormalizationData(bundleFilePath, outputFilePath, groupName, dtype=np.float64):
'Helper method.\n Calculates and writes into the output HDF file mean, mean of squares,\n variance and total number of frames for the datasets in the given HDF\n group.\n\n :type bundleFilePath: str\n :param bundleFilePath: path to the bundle file. :see: BundleFile.BundleFile\n :type outputFilePath: str\n :param outputFilePath: path to the output HDF normalization file. If file\n already exists it will not be truncated.\n :type groupName: str\n :param groupName: name of the HDF group for which normalization data\n should be calculated. Also, a group with this name will\n be created in the output HDF file to store the calculated\n normalization data.\n :type dtype: numpy.dtype\n :param dtype: type of data to use during calculations.\n '
accumulatedSum = None
accumulatedSumOfSqr = None
totalFrames = long()
bundle = BundleFile(bundleFilePath)
for filePath in bundle.datasetFilePaths:
with h5py.File(filePath, mode='r') as datasetFile:
(intermSum, intermSumOfSqr, intermTotalFrames) = NormalizationData._accumulateSums(datasetFile, groupName, dtype=dtype)
accumulatedSum = NormalizationData._updateTotalSum(accumulatedSum, intermSum)
accumulatedSumOfSqr = NormalizationData._updateTotalSum(accumulatedSumOfSqr, intermSumOfSqr)
totalFrames += intermTotalFrames
(mean, meanOfSquares, variance) = NormalizationData._calculateMeans(accumulatedSum, accumulatedSumOfSqr, totalFrames)
with h5py.File(outputFilePath, mode='a') as out:
NormalizationData._writeData(out, groupName, mean, meanOfSquares, variance, totalFrames, dtype=dtype)
@staticmethod
def _accumulateSums(f, groupName, dtype=np.float64):
'Helper method.\n Accumulate sums and sums of squares over feature vectors for a given group.\n\n :type f: h5py.File\n :param f: handle to an opened HDF file with datasets\n :type groupName: str\n :param groupName: HDF group containing datasets\n :type dtype: numpy.dtype\n :param dtype: type of data to use during calculations.\n :rtype: tuple (numpy.ndarray | None, numpy.ndarray | None, long)\n :return: tuple (sum, sum of squares, total number of time frames)\n if they are available\n '
sum = None
sumOfSqr = None
totalFrames = np.int64(0)
if (groupName not in f):
return (sum, sumOfSqr, totalFrames)
group = f[groupName]
datasetNames = group.keys()
if (len(datasetNames) == 0):
return (sum, sumOfSqr, totalFrames)
featDims = group[datasetNames[0]].shape[NormalizationData.DATASET_FEATURE_DIMENSION_INDEX]
sum = np.zeros(featDims, dtype=dtype)
sumOfSqr = np.zeros(featDims, dtype=dtype)
for dsName in datasetNames:
dataset = group[dsName][...]
sum += np.sum(dataset, axis=NormalizationData.DATASET_TIME_DIMENSION_INDEX)
sumOfSqr += np.sum(np.square(dataset), axis=NormalizationData.DATASET_TIME_DIMENSION_INDEX)
totalFrames += dataset.shape[NormalizationData.DATASET_TIME_DIMENSION_INDEX]
return (sum, sumOfSqr, totalFrames)
@staticmethod
def _updateTotalSum(totalSum, intermediateSum):
'Helper method.\n Updates total sum with intermediate sum if the latter is available.\n\n :type totalSum: numpy.ndarray | None\n :param totalSum: total sum\n :type intermediateSum: numpy.ndarray | None\n :param intermediateSum: intermediate sum\n :rtype: numpy.ndarray | None\n :return: updated total sum if available\n '
if ((totalSum is None) and (intermediateSum is None)):
return None
if (totalSum is None):
return intermediateSum
if (intermediateSum is None):
return totalSum
oldSum = totalSum
newSum = np.add(totalSum, intermediateSum)
sumErr = np.sum(np.abs(((newSum - oldSum) - intermediateSum)))
if (sumErr > NormalizationData.SUMMATION_PRECISION):
raise FloatingPointError('sums have very different orders of magnitude. summation error = {}'.format(sumErr))
return newSum
@staticmethod
def _calculateMeans(totalSum, totalSumOfSqr, totalFrames):
'Helper method.\n Calculate mean, mean of squares and variance if they are available.\n\n :type totalSum: numpy.ndarray | None\n :param totalSum: total sum of features\n :type totalSumOfSqr: numpy.ndarray | None\n :param totalSumOfSqr: total sum of squares of features\n :type totalFrames: long\n :param totalFrames: total number of timeframes\n :rtype: tuple (numpy.ndarray | None, numpy.ndarray | None, numpy.ndarray | None)\n :return: tuple (mean, mean of squares, variance) if they are available\n '
mean = None
meanOfSquares = None
variance = None
if (totalSum is not None):
assert (totalFrames > 0)
mean = (totalSum / totalFrames)
if ((mean is not None) and (totalSumOfSqr is not None)):
assert (totalFrames > 0)
meanOfSquares = (totalSumOfSqr / totalFrames)
variance = (meanOfSquares - np.square(mean))
return (mean, meanOfSquares, variance)
@staticmethod
def _writeData(f, groupName, mean, meanOfSqr, variance, totalFrames, dtype=np.float64):
'Helper method.\n Writes means and variance for a given group.\n\n :type f: h5py.File\n :param f: handle to an opened HDF file to which data should be written.\n :type groupName: str\n :param groupName: HDF group name\n :type mean: numpy.ndarray | None\n :param mean: mean\n :type meanOfSqr: numpy.ndarray | None\n :param meanOfSqr: mean of squares\n :type variance: numpy.ndarray | None\n :param variance: variance\n :type totalFrames: long\n :param totalFrames: total number of time frames\n :type dtype: numpy.dtype\n :param dtype: type of data to use for writing the data\n '
if (groupName in f):
del f[groupName]
group = f.create_group(groupName)
dsNames = [NormalizationData.DATASET_MEAN, NormalizationData.DATASET_MEAN_OF_SQUARES, NormalizationData.DATASET_VARIANCE]
datasets = [mean, meanOfSqr, variance]
for (name, ds) in zip(dsNames, datasets):
NormalizationData._writeDataset(group, name, ds, dtype)
if (totalFrames > 0):
group.create_dataset(NormalizationData.DATASET_TOTAL_FRAMES, data=totalFrames)
@staticmethod
def _writeDataset(group, datasetName, dataset, dtype=np.float64):
'Helper Method.\n Writes dataset into an HDF group if the dataset is available.\n\n :type group: h5py.Group\n :param group: HDF group handle\n :type datasetName: str\n :param datasetName: name of the dataset\n :type dataset: numpy.ndarray | None\n :param dataset: actual data of the dataset\n :type dtype: numpy.dtype\n :param dtype: type of data to use for writing the data.\n '
if (dataset is None):
return
group.create_dataset(datasetName, data=dataset, dtype=dtype)
def __init__(self, normalizationFilePath):
'Reads normalization data from the given HDF file and saves it\n into the member variables.\n\n :type normalizationFilePath: str\n :param normalizationFilePath: path to the HDF file with normalization data.\n '
self._normalizationFilePath = normalizationFilePath
self._inputMean = None
self._inputVariance = None
self._outputMean = None
self._outputVariance = None
self._readNormalizationData()
def _readNormalizationData(self):
'Reads normalization data from the given HDF file.\n The file is expected to have the following structure.\n\n It may have two groups:\n * NormalizationData.GROUP_INPUTS (the group for the input data)\n * NormalizationData.GROUP_OUTPUTS (the group for the output data)\n\n Each group may have two datasets:\n * NormalizationData.DATASET_MEAN (the dataset for mean)\n * NormalizationData.DATASET_VARIANCE (the dataset for variance)\n\n Everything is optional e.g. when only the group for the input data\n is present and it contains only the dataset for mean then only this\n data will be read. No exception will be thrown.\n\n The groups may also contain additional optional information such as\n e.g. total number of time frames, mean of squares etc.\n However, this information is not read here.\n '
if (not os.path.isfile(self._normalizationFilePath)):
raise IOError((self._normalizationFilePath + ' does not exist'))
with h5py.File(self._normalizationFilePath, mode='r') as f:
(self._inputMean, self._inputVariance) = self._getMeanAndVarianceFromGroup(f, self.GROUP_INPUTS)
(self._outputMean, self._outputVariance) = self._getMeanAndVarianceFromGroup(f, self.GROUP_OUTPUTS)
@staticmethod
def _getMeanAndVarianceFromGroup(f, groupName):
'Reads mean and variance from the given group if they are available.\n Both mean and variance are optional i.e. they may be absent in the\n given HDF group.\n\n :type f: h5py.File\n :param f: handle to an opened HDF file with normalization data.\n :type groupName: str\n :param groupName: name of the HDF group from which mean and variance\n should be read.\n :rtype: tuple (numpy.ndarray | None, numpy.ndarray | None)\n :return: a tuple (mean, variance) each of which may be None\n if the data is not available.\n '
mean = None
variance = None
if (groupName not in f):
return (mean, variance)
group = f[groupName]
if (NormalizationData.DATASET_MEAN in group):
mean = group[NormalizationData.DATASET_MEAN][...]
if (NormalizationData.DATASET_VARIANCE in group):
variance = group[NormalizationData.DATASET_VARIANCE][...]
return (mean, variance)
@property
def inputMean(self):
'Mean of the input data.\n\n :rtype: numpy.ndarray | None\n :return: Mean of the input data if it is available or None otherwise.\n '
return self._inputMean
@property
def inputVariance(self):
'Variance of the input data.\n\n :rtype: numpy.ndarray | None\n :return: Variance of the input data if it is available or None otherwise.\n '
return self._inputVariance
@property
def outputMean(self):
'Mean of the output data.\n\n :rtype: numpy.ndarray | None\n :return: Mean of the output data if it is available or None otherwise.\n '
return self._outputMean
@property
def outputVariance(self):
'Variance of the output data.\n\n :rtype: numpy.ndarray | None\n :return: Variance of the output data if it is available or None otherwise.\n '
return self._outputVariance
|
class NumpyDumpDataset(Dataset):
'\n For ``tools/dump-dataset.py --type=numpy``.\n '
file_format_data = '%i.data'
file_format_targets = '%i.targets'
def __init__(self, prefix, postfix='.txt.gz', start_seq=0, end_seq=None, num_inputs=None, num_outputs=None, **kwargs):
super(NumpyDumpDataset, self).__init__(**kwargs)
self.file_format_data = ((prefix + self.file_format_data) + postfix)
self.file_format_targets = ((prefix + self.file_format_targets) + postfix)
self.start_seq = start_seq
self._init_num_seqs(end_seq)
self._seq_index = None
self.cached_seqs = []
self.num_inputs = num_inputs
self.num_outputs = num_outputs
assert (num_inputs and num_outputs)
def _init_num_seqs(self, end_seq=None):
last_seq = None
i = self.start_seq
while True:
if ((end_seq is not None) and (i >= end_seq)):
break
if (not os.path.exists((self.file_format_data % i))):
break
if (not os.path.exists((self.file_format_targets % i))):
break
last_seq = i
i += 1
if (end_seq is None):
assert (last_seq is not None), ('None found. Check %s.' % (self.file_format_data % self.start_seq))
end_seq = last_seq
else:
assert (last_seq == (end_seq - 1)), ('Check %s.' % (self.file_format_data % end_seq))
assert (end_seq > self.start_seq)
self._num_seqs = (end_seq - self.start_seq)
def _load_numpy_seq(self, seq_idx):
'\n :param int seq_idx:\n '
real_idx = self._seq_index[seq_idx]
features = numpy.loadtxt((self.file_format_data % real_idx))
targets = numpy.loadtxt((self.file_format_targets % real_idx))
assert (features.ndim == 2)
assert (features.shape[1] == self.num_inputs)
assert (targets.ndim == 1)
self._add_cache_seq(seq_idx, features, targets)
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :param int|None epoch:\n :param list[str]|None seq_list:\n :param list[int]|None seq_order:\n :rtype: bool\n '
super(NumpyDumpDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if ((seq_list is not None) or (seq_order is not None)):
raise NotImplementedError
if (self.seq_ordering == 'sorted'):
self.seq_ordering = 'default'
self._seq_index = [(i + self.start_seq) for i in self.get_seq_order_for_epoch(epoch, self.num_seqs)]
self.cached_seqs[:] = []
return True
def _load_seqs(self, start, end):
'\n :param int start:\n :param int end:\n '
self._cleanup_old_seq_cache(start)
for i in range(start, end):
if (not self._have_cache_seq(i)):
self._load_numpy_seq(i)
def get_input_data(self, seq_idx):
'\n :param int seq_idx:\n :rtype: numpy.ndarray\n '
return self._get_cache_seq(seq_idx).get_data('data')
def get_targets(self, target, seq_idx):
'\n :param str target:\n :param int seq_idx:\n :rtype: numpy.ndarray\n '
return self._get_cache_seq(seq_idx).get_data(target)
def get_seq_length(self, seq_idx):
'\n :param int seq_idx:\n :rtype: Util.NumbersDict\n '
if (not self._have_cache_seq(seq_idx)):
self._load_numpy_seq(seq_idx)
return self._get_cache_seq(seq_idx).num_frames
@property
def num_seqs(self):
'\n :rtype: int\n '
return self._num_seqs
def len_info(self):
'\n :rtype: str\n '
return ('%s, %i seqs' % (self.__class__.__name__, self.num_seqs))
def _cleanup_old_seq_cache(self, seq_end):
i = 0
while (i < len(self.cached_seqs)):
if (self.cached_seqs[i].seq_idx >= seq_end):
break
i += 1
del self.cached_seqs[:i]
def _get_cache_seq(self, seq_idx, error_not_found=True):
for data in self.cached_seqs:
if (data.seq_idx == seq_idx):
return data
if error_not_found:
raise Exception(('seq %i not loaded' % seq_idx))
else:
return None
def _have_cache_seq(self, seq_idx):
return (self._get_cache_seq(seq_idx, error_not_found=False) is not None)
def _get_cache_last_seq_idx(self):
if self.cached_seqs:
return self.cached_seqs[(- 1)].seq_idx
else:
return (- 1)
def _add_cache_seq(self, seq_idx, features, targets):
last_seq_idx = self._get_cache_last_seq_idx()
assert (seq_idx == (last_seq_idx + 1))
self.cached_seqs += [DatasetSeq(seq_idx, features, targets)]
|
class SprintDatasetBase(Dataset):
"\n In Sprint, we use this object for multiple purposes:\n - Multiple epoch handling via SprintInterface.getSegmentList().\n For this, we get the segment list from Sprint and use the Dataset\n shuffling method.\n - Fill in data which we get via SprintInterface.feedInput*().\n Note that each such input doesn't necessarily correspond to a single\n segment. This depends which type of FeatureExtractor is used in Sprint.\n If we use the BufferedFeatureExtractor in utterance mode, we will get\n one call for every segment and we get also segmentName as parameter.\n Otherwise, we will get batches of fixed size - in that case,\n it doesn't correspond to the segments.\n In any case, we use this data as-is as a new seq.\n Because of that, we cannot really know the number of seqs in advance,\n nor the total number of time frames, etc.\n\n If you want to use this directly in RETURNN, see ExternSprintDataset.\n "
SprintCachedSeqsMax = 200
SprintCachedSeqsMin = 100
def __init__(self, target_maps=None, str_add_final_zero=False, input_stddev=1.0, orth_post_process=None, bpe=None, orth_vocab=None, suppress_load_seqs_print=False, reduce_target_factor=1, **kwargs):
'\n :param dict[str,str|dict] target_maps: e.g. {"speaker_name": "speaker_map.txt"},\n with "speaker_map.txt" containing a line for each expected speaker.\n The indices will be given by the line index.\n Note that scalar content (e.g. single index) will automatically get a time axis added\n with the length of the audio frames.\n :param bool str_add_final_zero: adds e.g. "orth0" with \'\x00\'-ending\n :param float input_stddev: if != 1, will divide the input "data" by that\n :param str|list[str]|((str)->str)|None orth_post_process: :func:`get_post_processor_function`, applied on orth\n :param None|dict[str] bpe: if given, will be opts for :class:`BytePairEncoding`\n :param None|dict[str] orth_vocab: if given, orth_vocab is applied to orth\n and orth_classes is an available target`\n :param bool suppress_load_seqs_print: less verbose\n :param int reduce_target_factor: downsample factor to allow less targets than features\n '
super(SprintDatasetBase, self).__init__(**kwargs)
self.suppress_load_seqs_print = suppress_load_seqs_print
self.reduce_target_factor = reduce_target_factor
assert (isinstance(self.reduce_target_factor, int) and (self.reduce_target_factor >= 1))
if target_maps:
assert isinstance(target_maps, dict)
target_maps = target_maps.copy()
for (key, tmap) in list(target_maps.items()):
if isinstance(tmap, (str, unicode)):
tmap = {l: i for (i, l) in enumerate(open(tmap).read().splitlines())}
assert isinstance(tmap, dict)
target_maps[key] = tmap
self.target_maps = target_maps
self.str_add_final_zero = str_add_final_zero
self.input_stddev = input_stddev
self.labels['orth'] = [chr(i) for i in range(255)]
self.orth_post_process = None
if orth_post_process:
if callable(orth_post_process):
self.orth_post_process = orth_post_process
else:
from .lm import get_post_processor_function
self.orth_post_process = get_post_processor_function(orth_post_process)
self.bpe = None
if bpe:
from returnn.datasets.util.vocabulary import Vocabulary, BytePairEncoding
if isinstance(bpe, dict):
self.bpe = BytePairEncoding(**bpe)
else:
assert isinstance(bpe, Vocabulary)
self.bpe = bpe
self.labels['bpe'] = self.bpe.labels
self.orth_vocab = None
if orth_vocab:
from returnn.datasets.util.vocabulary import Vocabulary
if isinstance(orth_vocab, dict):
self.orth_vocab = Vocabulary.create_vocab(**orth_vocab)
else:
assert isinstance(orth_vocab, Vocabulary)
self.orth_vocab = orth_vocab
self.labels['orth_classes'] = self.orth_vocab.labels
self.lock = RLock()
self.cond = Condition(lock=self.lock)
self.add_data_thread_id = thread.get_ident()
self.ready_for_data = False
self.reached_final_seq = False
self.reached_final_seq_seen_all = False
self.multiple_epochs = False
self._complete_frac = None
self.sprint_epoch = None
self.returnn_epoch = None
self.predefined_seq_list_order = None
self.sprint_finalized = False
self._target_black_list = []
self._reset_cache()
assert (self.shuffle_frames_of_nseqs == 0)
def use_multiple_epochs(self):
'\n Called via SprintInterface.getSegmentList().\n '
self.multiple_epochs = True
def set_dimensions(self, input_dim, output_dim):
'\n :type input_dim: int\n :type output_dim: int\n\n Called via python_train.\n '
assert (input_dim > 0)
self.num_inputs = input_dim
self.num_outputs = {'data': ((input_dim * self.window), 2)}
if (output_dim > 0):
self.num_outputs['classes'] = (output_dim, 1)
if self.bpe:
self.num_outputs['bpe'] = (self.bpe.num_labels, 1)
if self.orth_vocab:
self.num_outputs['orth_classes'] = (self.orth_vocab.num_labels, 1)
self.num_outputs['orth'] = (256, 1)
if (self.target_maps and ('speaker_name' in self.target_maps)):
self.num_outputs['speaker_name'] = (len(self.target_maps['speaker_name']), 1)
else:
self.num_outputs['speaker_name'] = (256, 1)
self._base_init()
if (not self.multiple_epochs):
self.init_sprint_epoch(None)
def _reset_cache(self):
self.expected_load_seq_start = 0
self.requested_load_seq_end = 0
self.next_seq_to_be_added = 0
self.reached_final_seq = False
self.reached_final_seq_seen_all = False
self._num_timesteps = 0
self.added_data = []
self.ready_for_data = True
def init_sprint_epoch(self, epoch):
'\n :type epoch: int | None\n Called by SprintInterface.getSegmentList() when we start a new epoch.\n We must not call this via self.init_seq_order() because we will already have filled the cache by Sprint\n before the RETURNN train thread starts the epoch.\n '
with self.lock:
self.sprint_epoch = epoch
self.sprint_finalized = False
self._reset_cache()
self.cond.notify_all()
def finalize_sprint(self):
'\n Called when SprintInterface.getSegmentList() ends.\n '
with self.lock:
self.sprint_finalized = True
self.cond.notify_all()
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n Called by RETURNN train thread when we enter a new epoch.\n '
if (seq_order is not None):
raise NotImplementedError('Predefined sequence order via indices in SprintDataset.')
super(SprintDatasetBase, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if self.orth_vocab:
self.orth_vocab.set_random_seed(self._get_random_seed_for_epoch(epoch=epoch))
with self.lock:
self.returnn_epoch = epoch
self.predefined_seq_list_order = seq_list
self.cond.notify_all()
return True
def _cleanup_old_seq_cache(self, seq_end):
i = 0
while (i < len(self.added_data)):
if (self.added_data[i].seq_idx >= seq_end):
break
i += 1
del self.added_data[:i]
def wait_for_returnn_epoch(self, epoch):
'\n Called by SprintInterface.\n '
with self.lock:
while (epoch != self.returnn_epoch):
assert (epoch > self.returnn_epoch)
self.cond.wait()
def _wait_for_seq_can_pass_check(self, seq_start, seq_end):
'\n :param int seq_start:\n :param int seq_end:\n :return: True if _waitForSeq can pass/return. False means that we need to wait more (until next signal)\n :rtype: bool\n '
if self.reached_final_seq:
return True
if self._have_seqs_added(seq_start, seq_end):
return True
return False
def _wait_for_seq(self, seq_start, seq_end=None):
'\n Called by RETURNN train thread.\n Wait until we have seqs [seqStart,..,seqEnd-1] loaded,\n or we now that they will not be loaded anymore because we reached the end.\n\n :param int seq_start:\n :param int|None seq_end:\n '
if (seq_end is None):
seq_end = (seq_start + 1)
if (seq_end > self.requested_load_seq_end):
self.requested_load_seq_end = seq_end
self.cond.notify_all()
if self._wait_for_seq_can_pass_check(seq_start=seq_start, seq_end=seq_end):
return
assert (thread.get_ident() != self.add_data_thread_id)
if (not self.suppress_load_seqs_print):
print(('%s %s: wait for seqs (%i,%i) (last added: %s) (current time: %s)' % (self, current_thread().name, seq_start, seq_end, self._latest_added_seq(), time.strftime('%H:%M:%S'))), file=log.v5)
while (not self._wait_for_seq_can_pass_check(seq_start=seq_start, seq_end=seq_end)):
self.cond.wait()
def _latest_added_seq(self):
if (not self.added_data):
return None
return self.added_data[(- 1)].seq_idx
def _have_seqs_added(self, start, end=None):
if (end is None):
end = (start + 1)
if (start >= end):
return True
for data in self.added_data:
assert (start >= data.seq_idx), ('%s: We expect that we only ask about the cache of the upcoming seqs.' % self)
if (data.seq_idx == start):
start += 1
if (start >= end):
return True
return False
def _get_seq(self, seq_idx):
'\n :param int seq_idx:\n :rtype: DatasetSeq\n '
for data in self.added_data:
if (data.seq_idx == seq_idx):
return data
return None
def is_cached(self, start, end):
'\n :param int start:\n :param int end:\n :rtype: bool\n '
return False
def load_seqs(self, start, end):
'\n Called by RETURNN train thread.\n\n :param int start:\n :param int end:\n '
if (start == end):
return
if (not self.suppress_load_seqs_print):
print(('%s load_seqs in %s:' % (self, current_thread().name)), start, end, end=' ', file=log.v5)
with self.lock:
super(SprintDatasetBase, self).load_seqs(start, end)
if (not self.suppress_load_seqs_print):
print('first features shape:', self._get_seq(start).features['data'].shape, file=log.v5)
def _load_seqs(self, start, end):
'\n Called by RETURNN train thread.\n We expect that start increase monotonic on each call\n for not-yet-loaded data.\n This will already be called with _load_seqs_superset indices.\n\n :param int start:\n :param int end:\n '
assert (start >= self.expected_load_seq_start)
if (start > self.expected_load_seq_start):
self._cleanup_old_seq_cache(start)
self.expected_load_seq_start = start
self.cond.notify_all()
self._wait_for_seq(start, end)
def add_new_data(self, features, targets=None, segment_name=None):
'\n Adds a new seq.\n This is called via the Sprint main thread.\n\n :param numpy.ndarray features: format (input-feature,time) (via Sprint)\n :param dict[str,numpy.ndarray|str] targets: format (time) (idx of output-feature)\n :param str|None segment_name:\n :returns the sorted seq index\n :rtype: int\n '
assert (self.num_inputs == features.shape[0])
num_frames = features.shape[1]
reduce_num_frames = int((((num_frames + self.reduce_target_factor) - 1) / self.reduce_target_factor))
features = features.transpose()
assert (features.shape == (num_frames, self.num_inputs))
if (self.input_stddev != 1):
features /= self.input_stddev
if (self.window > 1):
features = self._sliding_window(features)
assert (features.shape == (num_frames, (self.num_inputs * self.window)))
if (targets is None):
targets = {}
if (not isinstance(targets, dict)):
targets = {'classes': targets}
if ('classes' in targets):
assert (targets['classes'].shape == (reduce_num_frames,)), ('Number of targets %s does not match number of features %s (reduce factor %d)' % (targets['classes'].shape, (num_frames,), self.reduce_target_factor))
if ('speaker_name' in targets):
targets['speaker_name'] = targets['speaker_name'].decode('utf8').strip()
if ('orth' in targets):
targets['orth'] = targets['orth'].decode('utf8').strip()
if (('orth' in targets) and self.orth_post_process):
targets['orth'] = self.orth_post_process(targets['orth'])
if self.bpe:
assert ('orth' in targets)
orth = targets['orth']
assert isinstance(orth, (str, unicode))
assert ('bpe' not in targets)
targets['bpe'] = numpy.array(self.bpe.get_seq(orth), dtype='int32')
if self.orth_vocab:
assert ('orth' in targets)
orth = targets['orth']
assert isinstance(orth, (str, unicode))
assert ('orth_classes' not in targets)
targets['orth_classes'] = numpy.array(self.orth_vocab.get_seq(orth), dtype='int32')
if self.target_maps:
for (key, target_map) in self.target_maps.items():
assert (key in targets)
v = target_map[targets[key]]
v = numpy.asarray(v)
if (v.ndim == 0):
v = (numpy.zeros((num_frames,), dtype=v.dtype) + v)
targets[key] = v
for key in self._target_black_list:
if (key in targets):
del targets[key]
for (key, v) in sorted(list(targets.items())):
if isinstance(v, numpy.ndarray):
continue
if isinstance(v, unicode):
v = v.encode('utf8')
if isinstance(v, (str, bytes)):
if PY3:
assert isinstance(v, bytes)
v = list(v)
else:
v = list(map(ord, v))
v = numpy.array(v, dtype='uint8')
targets[key] = v
if self.str_add_final_zero:
v = numpy.append(v, numpy.array([0], dtype=v.dtype))
assert ((key + '0') not in targets)
targets[(key + '0')] = v
continue
print(('%s, we will ignore the target %r because it is not a numpy array: %r' % (self, key, v)), file=log.v3)
self._target_black_list += [key]
del targets[key]
with self.lock:
assert self.ready_for_data
assert (not self.reached_final_seq)
assert (not self.sprint_finalized)
seq_idx = self.next_seq_to_be_added
if self.predefined_seq_list_order:
assert (seq_idx < len(self.predefined_seq_list_order)), ('seq_idx %i, expected predef num seqs %i' % (seq_idx, len(self.predefined_seq_list_order)))
expected_seq_name = self.predefined_seq_list_order[seq_idx]
if (expected_seq_name != segment_name):
if (segment_name in self.predefined_seq_list_order):
raise Exception(('seq_idx %i expected to be tag %r but got tag %r; tag %r is at idx %i' % (seq_idx, expected_seq_name, segment_name, segment_name, self.predefined_seq_list_order.index(segment_name))))
raise Exception(('seq_idx %i expected to be tag %r but got tag %r; tag %r not found' % (seq_idx, expected_seq_name, segment_name, segment_name)))
self.next_seq_to_be_added += 1
self._num_timesteps += num_frames
self.cond.notify_all()
if (seq_idx > ((self.requested_load_seq_end - 1) + self.SprintCachedSeqsMax)):
print(('%s add_new_data: seq=%i, len=%i. Cache filled, waiting to get loaded...' % (self, seq_idx, num_frames)), file=log.v5)
while (seq_idx > ((self.requested_load_seq_end - 1) + self.SprintCachedSeqsMin)):
assert (not self.reached_final_seq)
assert ((seq_idx + 1) == self.next_seq_to_be_added)
self.cond.wait()
self.added_data += [DatasetSeq(seq_idx, features, targets, seq_tag=segment_name)]
self.cond.notify_all()
return seq_idx
def finish_sprint_epoch(self, seen_all=True):
'\n Called by SprintInterface.getSegmentList().\n This is in a state where Sprint asks for the next segment after we just finished an epoch.\n Thus, any upcoming self.add_new_data() call will contain data from a segment in the new epoch.\n Thus, we finish the current epoch in Sprint.\n '
with self.lock:
self.reached_final_seq = True
self.reached_final_seq_seen_all = seen_all
self.ready_for_data = False
self.cond.notify_all()
def _shuffle_frames_in_seqs(self, start, end):
assert False, 'Shuffling in SprintDataset only via Sprint at the moment.'
def get_num_timesteps(self):
'\n :rtype: int\n '
with self.lock:
assert self.reached_final_seq
return self._num_timesteps
@property
def num_seqs(self):
'\n :rtype: int\n '
with self.lock:
if self.predefined_seq_list_order:
return len(self.predefined_seq_list_order)
if (not self.reached_final_seq):
raise NotImplementedError
return self.next_seq_to_be_added
def have_seqs(self):
'\n :rtype: bool\n '
with self.lock:
if (self.next_seq_to_be_added > 0):
return True
self._wait_for_seq(0)
return (self.next_seq_to_be_added > 0)
def is_less_than_num_seqs(self, n):
'\n :param int n:\n :rtype: bool\n '
with self.lock:
self._wait_for_seq(n)
return (n < self.next_seq_to_be_added)
def get_data_keys(self):
'\n :rtype: list[str]\n '
with self.lock:
if (not self.added_data):
self._wait_for_seq(0)
assert self.added_data
return sorted(self.added_data[0].features.keys())
def get_target_list(self):
'\n :rtype: list[str]\n '
keys = list(self.get_data_keys())
if ('data' in keys):
keys.remove('data')
return keys
def set_complete_frac(self, frac):
'\n :param float frac:\n '
self._complete_frac = frac
def get_complete_frac(self, seq_idx):
'\n :param int seq_idx:\n :rtype: float\n '
with self.lock:
if self.predefined_seq_list_order:
return (float((seq_idx + 1)) / len(self.predefined_seq_list_order))
elif (self._complete_frac is not None):
if (not self.next_seq_to_be_added):
return self._complete_frac
else:
return ((self._complete_frac * float((seq_idx + 1))) / self.next_seq_to_be_added)
else:
return super(SprintDatasetBase, self).get_complete_frac(seq_idx)
def get_seq_length(self, sorted_seq_idx):
'\n :param int sorted_seq_idx:\n :rtype: Util.NumbersDict\n '
with self.lock:
self._wait_for_seq(sorted_seq_idx)
return self._get_seq(sorted_seq_idx).num_frames
def get_data(self, seq_idx, key):
'\n :param int seq_idx:\n :param str key:\n :rtype: numpy.ndarray\n '
with self.lock:
self._wait_for_seq(seq_idx)
return self._get_seq(seq_idx).features[key]
def get_input_data(self, sorted_seq_idx):
'\n :param int sorted_seq_idx:\n :rtype: numpy.ndarray\n '
with self.lock:
self._wait_for_seq(sorted_seq_idx)
return self._get_seq(sorted_seq_idx).features['data']
def get_targets(self, target, sorted_seq_idx):
'\n :param str target:\n :param int sorted_seq_idx:\n :rtype: numpy.ndarray\n '
with self.lock:
self._wait_for_seq(sorted_seq_idx)
return self._get_seq(sorted_seq_idx).features.get(target, None)
def get_tag(self, sorted_seq_idx):
'\n :param int sorted_seq_idx:\n :rtype: str\n '
with self.lock:
self._wait_for_seq(sorted_seq_idx)
return self._get_seq(sorted_seq_idx).seq_tag
|
class ExternSprintDataset(SprintDatasetBase):
'\n This is a Dataset which you can use directly in RETURNN.\n You can use it to get any type of data from Sprint (RWTH ASR toolkit),\n e.g. you can use Sprint to do feature extraction and preprocessing.\n\n This class is like SprintDatasetBase, except that we will start an external Sprint instance ourselves\n which will forward the data to us over a pipe.\n The Sprint subprocess will use SprintExternInterface to communicate with us.\n '
_getnewargs_exclude_attrs = CachedDataset2._getnewargs_exclude_attrs.union(('partitionEpoch',))
def __init__(self, sprintTrainerExecPath, sprintConfigStr, partitionEpoch=None, **kwargs):
'\n :param str|list[str] sprintTrainerExecPath:\n :param str | list[str] | ()->str | list[()->str] | ()->list[str] | ()->list[()->str] sprintConfigStr:\n via eval_shell_str\n :param int|None partitionEpoch: deprecated. use partition_epoch instead\n '
super(ExternSprintDataset, self).__init__(**kwargs)
self.add_data_thread_id = None
self.sprint_trainer_exec_path = sprintTrainerExecPath
self.sprint_config = sprintConfigStr
if partitionEpoch:
assert (self.partition_epoch == 1), "don't provide partitionEpoch and partition_epoch"
self.partition_epoch = partitionEpoch
self._num_seqs = None
self.child_pid = None
self.parent_pid = os.getpid()
self.reader_thread = None
self.seq_list_file = None
self.use_multiple_epochs()
self.python_exit = False
atexit.register(self._exit_handler)
self._start_child(epoch=None, get_dim_only=True)
@property
def _sprintTrainerExecPath(self):
return self.sprint_trainer_exec_path
@property
def _sprintConfigStr(self):
return self.sprint_config
@property
def _partitionEpoch(self):
return self.partition_epoch
def finish_epoch(self):
'\n Called at the end of the epoch.\n '
with self.lock:
super(ExternSprintDataset, self).init_seq_order(epoch=None, seq_list=None)
self._exit_child(wait_thread=True)
super(ExternSprintDataset, self).finish_epoch()
def _exit_handler(self):
'\n Called at exit.\n '
assert (os.getpid() == self.parent_pid)
self.python_exit = True
self._exit_child(wait_thread=False)
def _exit_child(self, wait_thread=True):
'\n :param bool wait_thread:\n '
if self.child_pid:
expected_exit_status = (0 if (wait_thread and (not self.python_exit)) else None)
if (self._join_child(wait=False, expected_exit_status=expected_exit_status) is False):
interrupt = ((not self.reached_final_seq_seen_all) or (not wait_thread))
if interrupt:
print(('%s: interrupt child proc %s' % (self, self.child_pid)), file=log.v5)
os.kill(self.child_pid, signal.SIGKILL)
self._join_child(wait=True, expected_exit_status=None)
self.child_pid = None
else:
self.child_pid = None
if (wait_thread and self.reader_thread):
while self.is_less_than_num_seqs((self.expected_load_seq_start + 1)):
if self.reached_final_seq:
break
self.load_seqs((self.expected_load_seq_start + 1), (self.expected_load_seq_start + 2))
self.reader_thread.join()
self.reader_thread = None
try:
self.pipe_p2c[1].close()
except IOError:
pass
try:
self.pipe_c2p[0].close()
except IOError:
pass
if self.child_pid:
self._join_child(wait=True, expected_exit_status=expected_exit_status)
self.child_pid = None
def _start_child(self, epoch, get_dim_only=False):
'\n :param int|None epoch:\n :param bool get_dim_only:\n '
assert (self.child_pid is None)
assert (self.reader_thread is None)
self.pipe_c2p = self._pipe_open()
self.pipe_p2c = self._pipe_open()
args = self._build_sprint_args()
print(('%s: epoch' % self), epoch, 'exec', args, file=log.v5)
pid = os.fork()
if (pid == 0):
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
from returnn.util import better_exchook
better_exchook.install()
try:
sys.stdin.close()
self.pipe_c2p[0].close()
self.pipe_p2c[1].close()
close_all_fds_except([0, 1, 2, self.pipe_c2p[1].fileno(), self.pipe_p2c[0].fileno()])
os.execv(args[0], args)
print(('%s child exec failed.' % self))
except BaseException:
print(('%s child: Error when starting Sprint %r.' % (self, args)))
sys.excepthook(*sys.exc_info())
finally:
print(('%s child: exit' % self))
os._exit(1)
return
self.pipe_c2p[1].close()
self.pipe_p2c[0].close()
self.child_pid = pid
try:
(init_signal, (input_dim, output_dim, num_segments)) = self._read_next_raw()
assert (init_signal == b'init')
assert (isinstance(input_dim, int) and isinstance(output_dim, int))
self.set_dimensions(input_dim, output_dim)
except Exception:
print(('%s: Sprint child process (%r) caused an exception.' % (self, args)), file=log.v1)
sys.excepthook(*sys.exc_info())
self._exit_child(wait_thread=False)
raise Exception(('%s Sprint init failed' % self))
if get_dim_only:
self._exit_child(wait_thread=False)
else:
self.reader_thread = Thread(target=self._reader_thread_proc, args=(pid, epoch), name=('%s reader thread' % self))
self.reader_thread.daemon = True
self.reader_thread.start()
def _pipe_open(self):
(readend, writeend) = os.pipe()
if hasattr(os, 'set_inheritable'):
os.set_inheritable(readend, True)
os.set_inheritable(writeend, True)
readend = os.fdopen(readend, 'rb', 0)
writeend = os.fdopen(writeend, 'wb', 0)
return (readend, writeend)
@property
def _my_python_mod_path(self):
'\n :rtype: str\n '
from returnn import __root_dir__
return __root_dir__
def _build_sprint_args(self):
'\n :rtype: list[str]\n '
config_str = ('action:ExternSprintDataset,c2p_fd:%i,p2c_fd:%i' % (self.pipe_c2p[1].fileno(), self.pipe_p2c[0].fileno()))
if task_system.SharedMemNumpyConfig['enabled']:
config_str += ',EnableAutoNumpySharedMemPickling:True'
epoch = (self.returnn_epoch or 1)
assert (epoch >= 1)
if isinstance(self.sprint_trainer_exec_path, (list, tuple)):
args = list(self.sprint_trainer_exec_path)
else:
args = [self.sprint_trainer_exec_path]
args += eval_shell_str(self.sprint_config)
args += [('--*.seed=%i' % (self._get_random_seed_for_epoch(epoch=epoch) - 1))]
if (self.partition_epoch > 1):
args += [('--*.corpus.partition=%i' % self.partition_epoch), ('--*.corpus.select-partition=%i' % ((epoch - 1) % self.partition_epoch))]
args += ['--*.python-segment-order=true', ('--*.python-segment-order-pymod-path=%s' % self._my_python_mod_path), '--*.python-segment-order-pymod-name=returnn.sprint.extern_interface', '--*.use-data-source=false', '--*.trainer=python-trainer', ('--*.pymod-path=%s' % self._my_python_mod_path), '--*.pymod-name=returnn.sprint.extern_interface', ('--*.pymod-config=%s' % config_str)]
if self.predefined_seq_list_order:
import tempfile
self.seq_list_file = tempfile.mktemp(prefix='returnn-sprint-predefined-seq-list')
with open(self.seq_list_file, 'w') as f:
for tag in self.predefined_seq_list_order:
f.write(tag)
f.write('\n')
f.close()
args += ['--*.corpus.segment-order-shuffle=false', ('--*.corpus.segments.file=%s' % self.seq_list_file), ('--*.corpus.segment-order=%s' % self.seq_list_file)]
if (self.seq_tags_filter is not None):
assert (not self.predefined_seq_list_order)
import tempfile
self.seq_list_file = tempfile.mktemp(prefix='returnn-sprint-predefined-seq-filter')
with open(self.seq_list_file, 'w') as f:
for tag in self.seq_tags_filter:
f.write(tag)
f.write('\n')
f.close()
args += [('--*.corpus.segments.file=%s' % self.seq_list_file)]
return args
def _read_next_raw(self):
'\n :return: (data_type, args)\n :rtype: (str, object)\n '
(data_type, args) = util.read_pickled_object(self.pipe_c2p[0], encoding='bytes')
return (data_type, args)
def _join_child(self, wait=True, expected_exit_status=None):
'\n :param bool wait:\n :param int|None expected_exit_status:\n :return: whether the child has exited now\n :rtype: bool\n '
assert self.child_pid
options = (0 if wait else os.WNOHANG)
(pid, exit_status) = os.waitpid(self.child_pid, options)
if ((not wait) and (pid == 0)):
return False
assert (pid == self.child_pid)
if (expected_exit_status is not None):
assert (exit_status == expected_exit_status), ('%s: Sprint exit code is %i' % (self, exit_status))
return True
def _reader_thread_proc(self, child_pid, epoch):
'\n :param int child_pid:\n :param int epoch:\n '
try:
self.add_data_thread_id = thread.get_ident()
self.init_sprint_epoch(epoch)
have_seen_the_whole = False
seq_count = 0
while ((not self.python_exit) and self.child_pid):
try:
(data_type, args) = self._read_next_raw()
except (IOError, EOFError):
with self.lock:
if (epoch != self.returnn_epoch):
break
if (self.python_exit or (not self.child_pid)):
break
raise
with self.lock:
if (epoch != self.returnn_epoch):
break
if (self.python_exit or (not self.child_pid)):
break
if (data_type == b'data'):
seq_count += 1
(segment_name, features, targets) = args
if (segment_name is not None):
segment_name = segment_name.decode('utf8')
assert isinstance(features, numpy.ndarray)
if isinstance(targets, dict):
targets = {key.decode('utf8'): value for (key, value) in targets.items()}
self.add_new_data(numpy_copy_and_set_unused(features), numpy_copy_and_set_unused(targets), segment_name=segment_name)
elif (data_type == b'exit'):
have_seen_the_whole = True
break
else:
assert False, ('not handled: (%r, %r)' % (data_type, args))
if self.seq_list_file:
try:
os.remove(self.seq_list_file)
except Exception as e:
print(('%s: error when removing %r: %r' % (self, self.seq_list_file, e)), file=log.v5)
finally:
self.seq_list_file = None
if (not self.python_exit):
with self.lock:
self.finish_sprint_epoch(seen_all=have_seen_the_whole)
if have_seen_the_whole:
self._num_seqs = self.next_seq_to_be_added
print(('%s (proc %i) finished reading epoch %i, seen all %r (finished), num seqs %i' % (self, child_pid, epoch, have_seen_the_whole, seq_count)), file=log.v5)
except Exception as exc:
if (not self.python_exit):
if (epoch == self.returnn_epoch):
with self.lock:
self.finish_sprint_epoch(seen_all=False)
try:
print(('%s reader failed (%s %s)' % (self, type(exc), exc)), file=log.v1)
sys.excepthook(*sys.exc_info())
print('')
finally:
interrupt_main()
raise
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :param int epoch:\n :param list[str]|None seq_list:\n :param list[int]|None seq_order:\n :rtype: bool\n '
if (seq_order is not None):
raise NotImplementedError('Predefined sequence order via indices in ExternSprintDataset.')
if (seq_list is not None):
assert (self.partition_epoch == 1), 'specifying partition_epoch and using seq_list not supported'
if (epoch is None):
epoch = 1
with self.lock:
if ((epoch == self.returnn_epoch) and (self.expected_load_seq_start == 0) and (seq_list == self.predefined_seq_list_order)):
return
super(ExternSprintDataset, self).init_seq_order(epoch=None, seq_list=None, seq_order=None)
self._exit_child(wait_thread=True)
with self.lock:
if self._num_seqs:
self._estimated_num_seqs = self._num_seqs
self._num_seqs = None
super(ExternSprintDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
self._start_child(epoch)
return True
|
class SprintCacheDataset(CachedDataset2):
'\n Can directly read Sprint cache files (and bundle files).\n Supports both cached features and cached alignments.\n For alignments, you need to provide all options for the AllophoneLabeling class, such as allophone file, etc.\n '
class SprintCacheReader(object):
'\n Helper class to read a Sprint cache directly.\n '
def __init__(self, data_key, filename, data_type=None, allophone_labeling=None):
'\n :param str data_key: e.g. "data" or "classes"\n :param str filename: to Sprint cache archive\n :param str|None data_type: "feat" or "align"\n :param dict[str] allophone_labeling: kwargs for :class:`AllophoneLabeling`\n '
self.data_key = data_key
from returnn.sprint.cache import open_file_archive
self.sprint_cache = open_file_archive(filename)
if (not data_type):
if (data_key == 'data'):
data_type = 'feat'
elif (data_key == 'classes'):
data_type = 'align'
elif allophone_labeling:
data_type = 'align'
else:
data_type = 'feat'
assert (data_type in ['feat', 'align'])
self.type = data_type
self.allophone_labeling = None
if allophone_labeling:
from returnn.sprint.cache import AllophoneLabeling
self.allophone_labeling = AllophoneLabeling(**allophone_labeling)
self.sprint_cache.set_allophones(self.allophone_labeling.allophone_file)
else:
assert (data_type != 'align'), "need allophone_labeling for 'align' type"
self.content_keys = [fn for fn in self.sprint_cache.file_list() if (not fn.endswith('.attribs'))]
if (data_type == 'align'):
self.num_labels = self.allophone_labeling.num_labels
if (self.num_labels < (2 ** 7)):
self.dtype = 'int8'
elif (self.num_labels < (2 ** 15)):
self.dtype = 'int16'
else:
assert (self.num_labels < (2 ** 31))
self.dtype = 'int32'
self.num_dims = 1
if self.allophone_labeling.state_tying_by_allo_state_idx:
self.type = 'align_raw'
elif (data_type == 'feat'):
self.num_labels = self._get_feature_dim()
self.num_dims = 2
self.dtype = 'float32'
else:
assert False
def _get_feature_dim(self):
'\n :rtype: int\n '
assert (self.type == 'feat')
assert self.content_keys
(times, feats) = self.sprint_cache.read(self.content_keys[0], 'feat')
assert (len(times) == len(feats) > 0)
feat = feats[0]
assert isinstance(feat, numpy.ndarray)
assert (feat.ndim == 1)
return feat.shape[0]
def read(self, name):
'\n :param str name: content-filename for sprint cache\n :return: numpy array of shape (time, [num_labels])\n :rtype: numpy.ndarray\n '
res = self.sprint_cache.read(name, typ=self.type)
if (self.type == 'align'):
for (t, a, s, w) in res:
assert (w == 1), 'soft alignment not supported'
label_seq = numpy.array([self.allophone_labeling.get_label_idx(a, s) for (t, a, s, w) in res], dtype=self.dtype)
assert (label_seq.shape == (len(res),))
return label_seq
elif (self.type == 'align_raw'):
for (t, a, s, w) in res:
assert (w == 1), 'soft alignment not supported'
label_seq = numpy.array([self.allophone_labeling.state_tying_by_allo_state_idx[a] for (t, a, s, w) in res], dtype=self.dtype)
assert (label_seq.shape == (len(res),))
return label_seq
elif (self.type == 'feat'):
(times, feats) = res
assert (len(times) == len(feats) > 0)
feat_mat = numpy.array(feats, dtype=self.dtype)
assert (feat_mat.shape == (len(times), self.num_labels))
return feat_mat
else:
assert False
def __init__(self, data, **kwargs):
'\n :param dict[str,dict[str]] data: data-key -> dict which keys such as filename, see SprintCacheReader constructor\n '
super(SprintCacheDataset, self).__init__(**kwargs)
self.data = {key: self.SprintCacheReader(data_key=key, **opts) for (key, opts) in data.items()}
self.seq_list_original = self.data['data'].content_keys
self.seq_list_ordered = self.seq_list_original
self._num_seqs = len(self.seq_list_original)
self._check_matching_content_list()
self.num_outputs = {key: (d.num_labels, d.num_dims) for (key, d) in self.data.items()}
self.num_inputs = self.num_outputs['data'][0]
self._seq_lens = None
def _check_matching_content_list(self):
data0 = self.data['data']
assert isinstance(data0, self.SprintCacheReader)
sorted_list0 = sorted(data0.content_keys)
for (key, data) in self.data.items():
if (key == 'data'):
continue
assert isinstance(data, self.SprintCacheReader)
assert (len(data.content_keys) == len(data0.content_keys))
sorted_list1 = sorted(data.content_keys)
for i in range(len(data.content_keys)):
k0 = sorted_list0[i]
k1 = sorted_list1[i]
assert (k0 == k1)
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :param int epoch:\n :param list[str]|None seq_list:\n :param list[int]|None seq_order:\n :rtype: bool\n '
assert ((seq_list is None) and (seq_order is None))
need_reinit = ((self.epoch is None) or (self.epoch != epoch))
super(SprintCacheDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
self._num_seqs = len(self.seq_list_ordered)
if (not need_reinit):
return False
data0 = self.data['data']
assert isinstance(data0, self.SprintCacheReader)
def get_seq_size(s):
'\n :param int s:\n :rtype: int\n '
return data0.sprint_cache.ft[self.seq_list_original[s]].size
seq_index = self.get_seq_order_for_epoch(epoch, num_seqs=len(self.seq_list_original), get_seq_len=get_seq_size)
self.seq_list_ordered = [self.seq_list_original[s] for s in seq_index]
self._num_seqs = len(self.seq_list_ordered)
return True
def get_total_num_seqs(self) -> int:
'total num seqs'
return len(self.seq_list_original)
def get_all_tags(self) -> List[str]:
'all seq names'
return self.seq_list_original
def supports_seq_order_sorting(self) -> bool:
'supports sorting'
return True
def get_dataset_seq_for_name(self, name, seq_idx=(- 1)):
'\n :param str name:\n :param int seq_idx:\n :rtype: DatasetSeq\n '
data = {key: d.read(name) for (key, d) in self.data.items()}
return DatasetSeq(seq_idx=seq_idx, seq_tag=name, features=data['data'], targets=data)
def _collect_single_seq(self, seq_idx):
'\n :type seq_idx: int\n :rtype: DatasetSeq | None\n :returns DatasetSeq or None if seq_idx >= num_seqs.\n '
if (seq_idx >= self.num_seqs):
return None
seq_tag = self.get_tag(seq_idx)
return self.get_dataset_seq_for_name(seq_idx=seq_idx, name=seq_tag)
def get_data_keys(self):
'\n :rtype: list[str]\n '
return self.data.keys()
def get_target_list(self):
'\n :rtype: list[str]\n '
return [key for (key, d) in self.data.items() if (d.type == 'align')]
def get_tag(self, sorted_seq_idx):
'\n :rtype: str\n '
return self.seq_list_ordered[sorted_seq_idx]
|
def demo():
'\n Demo.\n '
print('SprintDataset demo.')
from argparse import ArgumentParser
from returnn.util.basic import progress_bar_with_time
from returnn.log import log
from returnn.config import Config
from returnn.datasets.basic import init_dataset
arg_parser = ArgumentParser()
arg_parser.add_argument('--config', help='config with ExternSprintDataset', required=True)
arg_parser.add_argument('--sprint_cache_dataset', help='kwargs dict for SprintCacheDataset', required=True)
arg_parser.add_argument('--max_num_seqs', default=sys.maxsize, type=int)
arg_parser.add_argument('--action', default='compare', help='compare or benchmark')
args = arg_parser.parse_args()
log.initialize(verbosity=[4])
sprint_cache_dataset_kwargs = eval(args.sprint_cache_dataset)
assert isinstance(sprint_cache_dataset_kwargs, dict)
sprint_cache_dataset = SprintCacheDataset(**sprint_cache_dataset_kwargs)
print(('SprintCacheDataset: %r' % sprint_cache_dataset))
config = Config()
config.load_file(args.config)
dataset = init_dataset(config.typed_value('train'))
print(('Dataset via config: %r' % dataset))
assert (sprint_cache_dataset.num_inputs == dataset.num_inputs)
assert (tuple(sprint_cache_dataset.num_outputs['classes']) == tuple(dataset.num_outputs['classes']))
sprint_cache_dataset.init_seq_order(epoch=1)
if (args.action == 'compare'):
print('Iterating through dataset...')
seq_idx = 0
dataset.init_seq_order(epoch=1)
while (seq_idx < args.max_num_seqs):
if (not dataset.is_less_than_num_seqs(seq_idx)):
break
dataset.load_seqs(seq_idx, (seq_idx + 1))
tag = dataset.get_tag(seq_idx)
assert (not tag.startswith('seq-')), 'dataset does not provide tag-names for seqs'
dataset_seq = sprint_cache_dataset.get_dataset_seq_for_name(tag)
data = dataset.get_data(seq_idx, 'data')
targets = dataset.get_data(seq_idx, 'classes')
assert (data.shape == dataset_seq.features['data'].shape)
assert (targets.shape == dataset_seq.features['classes'].shape)
assert numpy.allclose(data, dataset_seq.features['data'])
assert numpy.allclose(targets, dataset_seq.features['classes'])
seq_idx += 1
progress_bar_with_time(dataset.get_complete_frac(seq_idx))
print(('Finished through dataset. Num seqs: %i' % seq_idx))
print(('SprintCacheDataset has num seqs: %i.' % sprint_cache_dataset.num_seqs))
elif (args.action == 'benchmark'):
print('Iterating through dataset...')
start_time = time.time()
seq_tags = []
seq_idx = 0
dataset.init_seq_order(epoch=1)
while (seq_idx < args.max_num_seqs):
if (not dataset.is_less_than_num_seqs(seq_idx)):
break
dataset.load_seqs(seq_idx, (seq_idx + 1))
tag = dataset.get_tag(seq_idx)
assert (not tag.startswith('seq-')), 'dataset does not provide tag-names for seqs'
seq_tags.append(tag)
dataset.get_data(seq_idx, 'data')
dataset.get_data(seq_idx, 'classes')
seq_idx += 1
progress_bar_with_time(dataset.get_complete_frac(seq_idx))
print(('Finished through dataset. Num seqs: %i, time: %f' % (seq_idx, (time.time() - start_time))))
print(('SprintCacheDataset has num seqs: %i.' % sprint_cache_dataset.num_seqs))
if hasattr(dataset, 'exit_handler'):
dataset.exit_handler()
else:
print('No way to stop any background tasks.')
del dataset
start_time = time.time()
print('Iterating through SprintCacheDataset...')
for (i, tag) in enumerate(seq_tags):
sprint_cache_dataset.get_dataset_seq_for_name(tag)
progress_bar_with_time((float(i) / len(seq_tags)))
print(('Finished through SprintCacheDataset. time: %f' % ((time.time() - start_time),)))
else:
raise Exception(('invalid action: %r' % args.action))
|
class StereoDataset(CachedDataset2):
'The purpose of this dataset is to be a base dataset for datasets which\n have an easy to use interface for using RETURNN as a regression tool\n '
def __init__(self, partition_epoch=1, **kwargs):
'constructor'
super(StereoDataset, self).__init__(**kwargs)
self._seq_index_list = None
self._partition_epoch = partition_epoch
self._current_partition = 0
self._seqs_per_epoch = None
def initialize(self):
self._seq_overhead = (self._get_total_number_of_sequences() % self._partition_epoch)
super(StereoDataset, self).initialize()
@property
def num_seqs(self):
'returns the number of sequences of the dataset\n\n :rtype: int\n '
if (self._num_seqs is not None):
return self._num_seqs
raise NotImplementedError
def _get_total_number_of_sequences(self):
raise NotImplementedError
@property
def seqs_per_epoch(self):
if (self._seqs_per_epoch is None):
self._seqs_per_epoch = (self._get_total_number_of_sequences() // self._partition_epoch)
return self._seqs_per_epoch
def _collect_single_seq(self, seq_idx):
'returns the sequence specified by the index seq_idx\n\n :type seq_idx: int\n :rtype: DatasetSeq | None\n :returns DatasetSeq or None if seq_idx >= num_seqs.\n '
raise NotImplementedError
def _get_partition_size(self, partition):
partition_size = self.seqs_per_epoch
if (partition == (self._partition_epoch - 1)):
partition_size += self._seq_overhead
return partition_size
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
'\n :type epoch: int|None\n :param epoch: epoch number\n :param list[str]|None seq_list:\n :param list[int]|None seq_order:\n :param seq_list: only None is currently supported\n Initialize lists:\n self.seq_index # sorted seq idx\n '
super(StereoDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if (epoch is None):
self._seq_index_list = range(self.num_seqs)
return True
self._current_partition = ((epoch - 1) % self._partition_epoch)
partition_size = self._get_partition_size(self._current_partition)
if ((seq_list is not None) or (seq_order is not None)):
raise NotImplementedError('init_seq_order of StereoDataset does not support a predefined seq_list yet.')
else:
seq_index = self.get_seq_order_for_epoch(epoch, partition_size, (lambda s: self.get_seq_length(s).get('data', None)))
self._seq_index_list = seq_index
if (epoch is not None):
print(('Reinitialize dataset seq order for epoch %i.' % epoch), file=log.v4)
return True
|
class StereoHdfDataset(StereoDataset):
"A stereo dataset which needs an hdf file as input. The hdf file\n is supposed to always have group 'inputs' and for the training data it\n also needs to contain the group 'outputs'. Each group is supposed to\n contain one dataset per sequence. The names of the datasets are supposed\n to be consecutive numbers starting at 0.\n\n The datasets are 2D numpy arrays, where dimension 0 is the time axis and\n dimension 1 is the feature axis. Therefore dimension 0 of the 'input'\n dataset and the respective 'output' dataset need to be the same.\n "
def __init__(self, hdfFile, num_outputs=None, normalizationFile=None, flag_normalizeInputs=True, flag_normalizeTargets=True, **kwargs):
"Constructor\n\n :type hdfFile: str\n :param hdfFile: path to the hdf file. if a bundle file is given (*.bundle)\n all hdf files listed in the bundle file will be used for\n the dataset.\n :see: BundleFile.BundleFile\n :type num_outputs: int\n :param num_outputs: this needs to be set if the stereo data hdf file\n only contains 'inputs' data (e.g. for the extraction\n process). Only if no 'outputs' data exists in the hdf\n file num_outputs is used.\n :type normalizationFile: str | None\n :param normalizationFile: path to a HDF file with normalization data.\n The file is optional: if it is not provided then\n no normalization is performed.\n :see: NormalizationData.NormalizationData\n :type flag_normalizeInputs: bool\n :param flag_normalizeInputs: if True then inputs will be normalized\n provided that the normalization HDF file has\n necessary datasets (i.e. mean and variance)\n :type flag_normalizeTargets: bool\n :param flag_normalizeTargets: if True then targets will be normalized\n provided that the normalization HDF file has\n necessary datasets (i.e. mean and variance)\n "
super(StereoHdfDataset, self).__init__(**kwargs)
self._flag_normalizeInputs = flag_normalizeInputs
self._flag_normalizeTargets = flag_normalizeTargets
self.num_inputs = None
self.num_outputs = None
self._filePaths = None
self._fileHandlers = None
self._seqMap = None
self._normData = None
if (not os.path.isfile(hdfFile)):
raise IOError((hdfFile + ' does not exits'))
self._initHdfFileHandlers(hdfFile)
self._num_seqs = self._calculateNumberOfSequences()
if (normalizationFile is not None):
self._setNormalization(normalizationFile)
self._setInputAndOutputDimensions(num_outputs)
def _initHdfFileHandlers(self, hdfFile):
'Initialize HDF file handlers\n\n :type hdfFile: str\n :param hdfFile: path to an HDF file with sequences or to a bundle file\n which should contain one path to an HDF file per line\n :see: BundleFile.BundleFile\n '
self._filePaths = []
self._fileHandlers = []
if hdfFile.endswith('.bundle'):
bundle = BundleFile(hdfFile)
for hdfFilePath in bundle.datasetFilePaths:
self._filePaths.append(hdfFilePath)
self._fileHandlers.append(h5py.File(hdfFilePath, 'r'))
else:
self._filePaths.append(hdfFile)
self._fileHandlers.append(h5py.File(hdfFile, 'r'))
def _calculateNumberOfSequences(self):
return self.seqs_per_epoch
def _get_total_number_of_sequences(self):
'Calculate and return the number of sequences in the dataset.\n This method also initializes a sequences map which maps sequence\n indices into HDF file handlers.\n\n :rtype: int\n :return: the number of sequences in the dataset\n '
self._seqMap = {}
seqCounter = 0
for (fhIdx, fh) in enumerate(self._fileHandlers):
for k in fh['inputs'].keys():
self._seqMap[seqCounter] = (fhIdx, k)
seqCounter += 1
return seqCounter
def _setNormalization(self, normalizationFile):
'Set optional normalization (mean and variance).\n Mean and variance are set only if they are provided.\n\n :type normalizationFile: string\n :param normalizationFile: path to an HDF normalization file which contains\n optional datasets "mean" and "variance".\n :see: NormalizationData.NormalizationData\n '
if (not os.path.isfile(normalizationFile)):
raise IOError((normalizationFile + ' does not exist'))
self._normData = NormalizationData(normalizationFile)
def _setInputAndOutputDimensions(self, num_outputs):
'Set properties which correspond to input and output dimensions.\n\n :type num_outputs: int\n :param num_outputs: dimensionality of output features. used only if\n the dataset does not have output features. Or if output\n features are sparse\n '
someSequence = self._collect_single_seq(0)
self.num_inputs = someSequence.get_data('data').shape[1]
if ('outputs' in self._fileHandlers[0]):
if (len(someSequence.get_data('classes').shape) == 1):
outputFeatDim = 1
else:
outputFeatDim = someSequence.get_data('classes').shape[1]
if ((outputFeatDim == 1) and (num_outputs is not None)):
self.num_outputs = {'classes': (num_outputs, outputFeatDim)}
else:
self.num_outputs = {'classes': (outputFeatDim, 2)}
else:
if (num_outputs is None):
raise ValueError('if no output data is contained in StereoDataset the output dimension has to be specified by num_outputs')
self.num_outputs = {'classes': (num_outputs, 2)}
def get_data_dim(self, key):
'This is copied from CachedDataset2 but the assertion is\n removed (see CachedDataset2.py)\n\n :type key: str\n :rtype: int\n :return: number of classes, no matter if sparse or not\n '
if (key == 'data'):
return self.num_inputs
if (key in self.num_outputs):
d = self.num_outputs[key][0]
return d
self._load_something()
if (len(self.added_data[0].get_data(key).shape) == 1):
return super(CachedDataset2, self).get_data_dim(key)
assert (len(self.added_data[0].get_data(key).shape) == 2)
return self.added_data[0].get_data(key).shape[1]
def __del__(self):
'Closes HDF file handlers.'
for fh in self._fileHandlers:
try:
fh.close()
except Exception:
pass
@property
def num_seqs(self):
'Returns the number of sequences of the dataset\n\n :rtype: int\n :return: the number of sequences of the dataset.\n '
if (self._num_seqs is not None):
return self._num_seqs
self._num_seqs = self._calculateNumberOfSequences()
return self._num_seqs
def _collect_single_seq(self, seq_idx):
'Returns the sequence specified by the index seq_idx.\n Normalization is applied to the input features if mean and variance\n have been specified during dataset creating (see the constructor).\n\n :type seq_idx: int\n :rtype: DatasetSeq | None\n :returns: None if seq_idx >= num_seqs or the corresponding sequence.\n '
if (self._seq_index_list is None):
self.init_seq_order()
if (seq_idx >= len(self._seq_index_list)):
return None
shuf_seq_idx = self._seq_index_list[seq_idx]
partition_offset = int(np.sum([self._get_partition_size(i1) for i1 in range(self._current_partition)]))
shuf_seq_idx += partition_offset
seqMapping = self._seqMap[shuf_seq_idx]
fileIdx = seqMapping[0]
datasetName = seqMapping[1]
fileHandler = self._fileHandlers[fileIdx]
inputFeatures = fileHandler['inputs'][datasetName][...]
targets = None
if ('outputs' in fileHandler):
targets = fileHandler['outputs'][datasetName][...]
if (self._normData is not None):
assert isinstance(self._normData, NormalizationData)
if self._flag_normalizeInputs:
inputFeatures = StereoHdfDataset._normalizeVector(inputFeatures, self._normData.inputMean, self._normData.inputVariance)
if self._flag_normalizeTargets:
targets = StereoHdfDataset._normalizeVector(targets, self._normData.outputMean, self._normData.outputVariance)
inputFeatures = inputFeatures.astype(np.float32)
if ((targets is not None) and (targets.shape[1] > 1)):
targets = targets.astype(np.float32)
elif (targets.shape[1] == 1):
targets = np.reshape(targets.astype(np.int32), (targets.shape[0],))
return DatasetSeq(seq_idx, inputFeatures, targets)
@staticmethod
def _normalizeVector(v, mean, variance):
'Helper method.\n Applies optional normalization to the given vector.\n\n :type v: numpy.ndarray | None\n :param v: vector if available or None otherwise\n :type mean: numpy.ndarray | None\n :param mean: mean\n :type variance: numpy.ndarray | None\n :param variance: variance\n :rtype: numpy.ndarray | None\n :return: normalized vector or None if it was None\n '
if (v is None):
return v
if (mean is not None):
v -= mean
if (variance is not None):
v /= np.sqrt(variance)
return v
|
class DatasetWithTimeContext(StereoHdfDataset):
'This dataset composes a context feature by stacking together time frames.'
def __init__(self, hdfFile, tau=1, **kwargs):
'Constructor\n\n :type hdfFile: string\n :param hdfFile: see the StereoHdfDataset\n :type tau: int\n :param tau: how many time frames should be on the left and on the right.\n E.g. if tau = 2 then the context feature will be created\n by stacking two neighboring time frames from left and\n two neighboring time frames from right:\n newInputFeature = [ x_{t-2}, x_{t-1}, x_t, x_{t+1}, x_{t+2} ].\n In general new feature will have shape\n (2 * tau + 1) * originalFeatureDimensionality\n Output features are not changed.\n :type kwargs: dictionary\n :param kwargs: the rest of the arguments passed to the StereoHdfDataset\n '
if (tau <= 0):
raise ValueError('context parameter tau should be greater than zero')
self._tau = tau
super(DatasetWithTimeContext, self).__init__(hdfFile, **kwargs)
def _collect_single_seq(self, seq_idx):
'this method implements stacking the features\n\n :type seq_idx: int\n :param seq_idx: index of a sequence\n :rtype: DatasetSeq\n :return: DatasetSeq\n '
if (seq_idx >= self.num_seqs):
return None
originalSeq = super(DatasetWithTimeContext, self)._collect_single_seq(seq_idx)
inputFeatures = originalSeq.get_data('data')
(frames, bins) = inputFeatures.shape
leftContext = deque()
rightContext = deque()
inFeatWithContext = []
for i in range(self._tau):
leftContext.append(np.zeros(bins))
if ((i + 1) < frames):
rightContext.append(inputFeatures[((i + 1), ...)])
else:
rightContext.append(np.zeros(bins))
for t in range(frames):
f = inputFeatures[(t, ...)]
newFeature = np.concatenate([np.concatenate(leftContext, axis=0), f, np.concatenate(rightContext, axis=0)], axis=0)
inFeatWithContext.append(newFeature)
leftContext.popleft()
leftContext.append(f)
rightContext.popleft()
if (((t + 1) + self._tau) < frames):
rightContext.append(inputFeatures[(((t + 1) + self._tau), ...)])
else:
rightContext.append(np.zeros(bins))
inputFeatures = np.array(inFeatWithContext)
targets = None
if ('classes' in originalSeq.get_data_keys()):
targets = originalSeq.get_data('classes')
return DatasetSeq(seq_idx, inputFeatures, targets)
|
def str_to_numpy_array(s: str) -> numpy.ndarray:
'\n Canonical way to make a Numpy array for a Python string.\n\n For :class:`returnn.tensor.Tensor` instances on Numpy arrays,\n the dtype logic assumes this behavior.\n\n :param s: string\n :return: numpy array, `dtype.kind == "U"`\n '
return numpy.array(s)
|
class Vocabulary(object):
'\n Represents a vocabulary (set of words, and their ids).\n Used by :class:`BytePairEncoding`.\n '
_cache = {}
@classmethod
def create_vocab(cls, **opts):
'\n :param opts: kwargs for class\n :rtype: Vocabulary|BytePairEncoding|CharacterTargets\n '
opts = opts.copy()
clz = cls
if ('class' in opts):
class_name = opts.pop('class')
clz = globals()[class_name]
assert issubclass(clz, Vocabulary), ('class %r %r is not a subclass of %r' % (class_name, clz, cls))
elif ('bpe_file' in opts):
clz = BytePairEncoding
return clz(**opts)
def __init__(self, vocab_file, seq_postfix=None, unknown_label='UNK', bos_label=None, eos_label=None, pad_label=None, control_symbols=None, user_defined_symbols=None, num_labels=None, labels=None):
'\n :param str|None vocab_file:\n :param str|int|None unknown_label: e.g. "UNK" or "<unk>"\n :param str|int|None bos_label: e.g. "<s>"\n :param str|int|None eos_label: e.g. "</s>"\n :param str|int|None pad_label: e.g. "<pad>"\n :param dict[str,str|int]|None control_symbols:\n https://github.com/google/sentencepiece/blob/master/doc/special_symbols.md\n :param dict[str,str|int]|None user_defined_symbols:\n https://github.com/google/sentencepiece/blob/master/doc/special_symbols.md\n :param int num_labels: just for verification\n :param list[int]|None seq_postfix: labels will be added to the seq in self.get_seq\n :param list[str]|(()->list[str])|None labels:\n '
self.vocab_file = vocab_file
self.unknown_label = unknown_label
self.num_labels = None
self._vocab = None
if ((labels is not None) and callable(labels)):
labels = labels()
if (labels is not None):
assert isinstance(labels, (list, tuple))
self._labels = labels
self._parse_vocab()
if (num_labels is not None):
assert (self.num_labels == num_labels)
self.unknown_label_id = self.to_id(self.unknown_label, allow_none=True)
if (self.unknown_label_id is not None):
self.unknown_label = self.id_to_label(self.unknown_label_id)
self.bos_label_id = self.to_id(bos_label, allow_none=True)
self.eos_label_id = self.to_id(eos_label, allow_none=True)
self.pad_label_id = self.to_id(pad_label, allow_none=True)
self.control_symbol_ids = {name: self.to_id(label) for (name, label) in (control_symbols or {}).items()}
self.user_defined_symbol_ids = {name: self.to_id(label) for (name, label) in (user_defined_symbols or {}).items()}
self.seq_postfix = (seq_postfix or [])
def __repr__(self):
parts = [repr(self.vocab_file), ('num_labels=%s' % self.num_labels)]
if (self.unknown_label_id is not None):
parts.append(('unknown_label=%r' % self.unknown_label))
if (self.bos_label_id is not None):
parts.append(('bos_label=%r' % self.id_to_label(self.bos_label_id)))
if (self.eos_label_id is not None):
parts.append(('eos_label=%r' % self.id_to_label(self.eos_label_id)))
if (self.pad_label_id is not None):
parts.append(('pad_label=%r' % self.id_to_label(self.pad_label_id)))
return ('%s(%s)' % (self.__class__.__name__, ', '.join(parts)))
def set_random_seed(self, seed):
'\n This can be called for a new epoch or so.\n Usually it has no effect, as there is no randomness.\n However, some vocab class could introduce some sampling process.\n\n :param int seed:\n '
pass
def _parse_vocab(self):
'\n Sets self.vocab, self.labels, self.num_labels.\n '
filename = self.vocab_file
import pickle
if (self._labels is not None):
self._vocab = {label: i for (i, label) in enumerate(self._labels)}
self.num_labels = len(self._labels)
elif (filename in self._cache):
(self._vocab, self._labels) = self._cache[filename]
self.num_labels = len(self._labels)
else:
if (filename[(- 4):] == '.pkl'):
d = pickle.load(open(filename, 'rb'))
else:
d = eval(open(filename, 'r').read())
if (not PY3):
assert isinstance(d, dict)
from returnn.util.basic import py2_utf8_str_to_unicode
d = {py2_utf8_str_to_unicode(s): i for (s, i) in d.items()}
assert isinstance(d, dict)
labels = {idx: label for (label, idx) in sorted(d.items())}
(min_label, max_label, num_labels) = (min(labels), max(labels), len(labels))
assert (0 == min_label)
if ((num_labels - 1) < max_label):
print(('Vocab error: not all indices used? max label: %i' % max_label), file=log.v1)
print(('unused labels: %r' % ([i for i in range((max_label + 1)) if (i not in labels)],)), file=log.v2)
assert ((num_labels - 1) == max_label)
self.num_labels = len(labels)
self._vocab = d
self._labels = [label for (idx, label) in sorted(labels.items())]
self._cache[filename] = (self._vocab, self._labels)
@classmethod
def create_vocab_dict_from_labels(cls, labels):
'\n This is exactly the format which we expect when we read it in self._parse_vocab.\n\n :param list[str] labels:\n :rtype: dict[str,int]\n '
d = {label: idx for (idx, label) in enumerate(labels)}
assert (len(d) == len(labels)), 'some labels are provided multiple times'
return d
@classmethod
def create_vocab_from_labels(cls, labels, **kwargs):
'\n Creates a `Vocabulary` from the given labels. Depending on whether the labels are identified as\n bytes, characters or words a `Utf8ByteTargets`, `CharacterTargets` or `Vocabulary` vocab is created.\n\n :param list[str] labels:\n :rtype: Vocabulary\n '
kwargs = kwargs.copy()
kwargs.setdefault('unknown_label', None)
if ((len(labels) < 1000) and all([(len(label) == 1) for label in labels])):
if all([((ord(label) <= 255) and (ord(label) == idx)) for (idx, label) in enumerate(labels)]):
return Utf8ByteTargets()
return CharacterTargets(vocab_file=None, labels=labels, **kwargs)
return Vocabulary(vocab_file=None, labels=labels, **kwargs)
def tf_get_init_variable_func(self, var):
'\n :param tensorflow.Variable var:\n :rtype: (tensorflow.Session)->None\n '
import tensorflow as tf
from returnn.tf.util.basic import VariableAssigner
assert isinstance(var, tf.Variable)
assert (var.dtype.base_dtype == tf.string)
assert (var.shape.as_list() == [self.num_labels])
assert (len(self._labels) == self.num_labels)
def init_vocab_var(session):
'\n :param tensorflow.Session session:\n '
VariableAssigner(var).assign(session=session, value=self._labels)
return init_vocab_var
def to_id(self, label, default=KeyError, allow_none=False):
'\n :param str|int|None label:\n :param str|type[KeyError]|None default:\n :param bool allow_none: whether label can be None. in this case, None is returned\n :rtype: int|None\n '
if isinstance(label, str):
return self.label_to_id(label, default=default)
if isinstance(label, int):
if self.is_id_valid(label):
return label
if (default is KeyError):
raise KeyError(('invalid label id %i' % label))
return None
if ((label is None) and allow_none):
return None
raise TypeError(('invalid label type %r' % type(label)))
def label_to_id(self, label, default=KeyError):
'\n :param str label:\n :param int|type[KeyError]|None default:\n :rtype: int|None\n '
if (default is KeyError):
return self._vocab[label]
return self._vocab.get(label, default)
def id_to_label(self, idx, default=KeyError):
'\n :param int idx:\n :param str|KeyError|None default:\n :rtype: str|None\n '
if self.is_id_valid(idx):
return self._labels[idx]
if (default is KeyError):
raise KeyError(('idx %i out of range' % idx))
return default
def is_id_valid(self, idx):
'\n :param int idx:\n :rtype: bool\n '
return (0 <= idx < len(self._labels))
@property
def labels(self):
'\n :rtype: list[str]\n '
return self._labels
def get_seq(self, sentence):
'\n :param str sentence: assumed to be seq of vocab entries separated by whitespace\n :rtype: list[int]\n '
segments = sentence.split()
return (self.get_seq_indices(segments) + self.seq_postfix)
def get_seq_indices(self, seq):
'\n :param list[str] seq:\n :rtype: list[int]\n '
if (self.unknown_label is not None):
return [self._vocab.get(k, self.unknown_label_id) for k in seq]
return [self._vocab[k] for k in seq]
def get_seq_labels(self, seq):
'\n :param list[int]|numpy.ndarray seq: 1D sequence\n :rtype: str\n '
return ' '.join(map(self._labels.__getitem__, seq))
|
class BytePairEncoding(Vocabulary):
'\n Vocab based on Byte-Pair-Encoding (BPE).\n This will encode the text on-the-fly with BPE.\n\n Reference:\n Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.\n Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.\n '
def __init__(self, vocab_file, bpe_file, seq_postfix=None, **kwargs):
'\n :param str vocab_file:\n :param str bpe_file:\n :param list[int]|None seq_postfix: labels will be added to the seq in self.get_seq\n '
super(BytePairEncoding, self).__init__(vocab_file=vocab_file, seq_postfix=seq_postfix, **kwargs)
from returnn.util.bpe import StandardBytePairEncoder
self.bpe = StandardBytePairEncoder(bpe_codes_file=bpe_file, labels=self._labels)
def get_seq(self, sentence):
'\n :param str sentence:\n :rtype: list[int]\n '
segments = self.bpe.segment_sentence(sentence)
seq = self.get_seq_indices(segments)
return (seq + self.seq_postfix)
|
class SamplingBytePairEncoding(Vocabulary):
'\n Vocab based on Byte-Pair-Encoding (BPE).\n Like :class:`BytePairEncoding`, but here we randomly sample from different possible BPE splits.\n This will encode the text on-the-fly with BPE.\n '
def __init__(self, vocab_file, breadth_prob, seq_postfix=None, **kwargs):
'\n :param str vocab_file:\n :param float breadth_prob:\n :param list[int]|None seq_postfix: labels will be added to the seq in self.get_seq\n '
super(SamplingBytePairEncoding, self).__init__(vocab_file=vocab_file, seq_postfix=seq_postfix, **kwargs)
from returnn.util.bpe import SamplingBytePairEncoder
self.rnd = numpy.random.RandomState(0)
self.bpe = SamplingBytePairEncoder(labels=self._labels, breadth_prob=breadth_prob, rnd=self.rnd, unknown_label=(self.id_to_label(self.unknown_label_id) if (self.unknown_label_id is not None) else None))
def set_random_seed(self, seed):
'\n :param int seed:\n '
self.rnd.seed(seed)
def get_seq(self, sentence):
'\n :param str sentence:\n :rtype: list[int]\n '
segments = self.bpe.segment_sentence(sentence)
seq = self.get_seq_indices(segments)
return (seq + self.seq_postfix)
|
class SentencePieces(Vocabulary):
'\n Uses the SentencePiece software,\n which supports different kind of subword units (including BPE, unigram, ...).\n\n https://github.com/google/sentencepiece/\n https://github.com/google/sentencepiece/tree/master/python\n\n Dependency::\n\n pip3 install --user sentencepiece\n\n '
def __init__(self, **opts):
'\n :param str model_file: The sentencepiece model file path.\n :param str model_proto: The sentencepiece model serialized proto.\n :param type out_type: output type. int or str. (Default = int)\n :param bool add_bos: Add <s> to the result (Default = false)\n :param bool add_eos: Add </s> to the result (Default = false)\n <s>/</s> is added after reversing (if enabled).\n :param bool reverse: Reverses the tokenized sequence (Default = false)\n :param bool enable_sampling: (Default = false)\n :param int nbest_size: sampling parameters for unigram. Invalid for BPE-Dropout.\n nbest_size = {0,1}: No sampling is performed.\n nbest_size > 1: samples from the nbest_size results.\n nbest_size < 0: (Default). assuming that nbest_size is infinite and samples\n from the all hypothesis (lattice) using\n forward-filtering-and-backward-sampling algorithm.\n :param float alpha: Soothing parameter for unigram sampling, and dropout probability of\n merge operations for BPE-dropout. (Default = 0.1)\n :param dict[str,str|int]|None control_symbols:\n https://github.com/google/sentencepiece/blob/master/doc/special_symbols.md\n :param dict[str,str|int]|None user_defined_symbols:\n https://github.com/google/sentencepiece/blob/master/doc/special_symbols.md\n '
import sentencepiece as spm
self._opts = opts
self._cache_key = opts.get('model_file', None)
control_symbols = opts.pop('control_symbols', None)
user_defined_symbols = opts.pop('user_defined_symbols', None)
self.sp = spm.SentencePieceProcessor(**opts)
super(SentencePieces, self).__init__(vocab_file=None, seq_postfix=None, unknown_label=self.sp.unk_id(), eos_label=self.sp.eos_id(), bos_label=self.sp.bos_id(), pad_label=self.sp.pad_id(), control_symbols=control_symbols, user_defined_symbols=user_defined_symbols)
def __repr__(self):
return ('%s(%r)' % (self.__class__.__name__, self._opts))
def _parse_vocab(self):
self.num_labels = self.sp.vocab_size()
@property
def labels(self):
'\n :rtype: list[str]\n '
if (self._cache_key and (self._cache_key in self._cache)):
(self._vocab, self._labels) = self._cache[self._cache_key]
assert (self.num_labels == len(self._vocab) == len(self._labels))
else:
self._labels = [self.sp.id_to_piece(i) for i in range(self.num_labels)]
self._vocab = {label: i for (i, label) in enumerate(self._labels)}
if self._cache_key:
self._cache[self._cache_key] = (self._vocab, self._labels)
return self._labels
def is_id_valid(self, idx):
'\n :param int idx:\n :rtype: bool\n '
return (not self.sp.IsUnused(idx))
def id_to_label(self, idx, default=KeyError):
'\n :param int idx:\n :param str|KeyError|None default:\n :rtype: str|None\n '
if ((default is not KeyError) and (not self.is_id_valid(idx))):
return default
return self.sp.IdToPiece(idx)
def label_to_id(self, label, default=KeyError):
'\n :param str label:\n :param int|type[KeyError]|None default:\n :rtype: int|None\n '
res = self.sp.PieceToId(label)
if ((res == self.unknown_label_id) or (res < 0) or (res is None)):
if (label == self.id_to_label(self.unknown_label_id)):
return self.unknown_label_id
if (default is KeyError):
raise KeyError(('label %r not found' % label))
return default
return res
def set_random_seed(self, seed):
'\n :param int seed:\n '
import sentencepiece as spm
spm.set_random_generator_seed(seed)
def get_seq(self, sentence):
'\n :param str sentence: assumed to be seq of vocab entries separated by whitespace\n :rtype: list[int]\n '
return self.sp.encode(sentence, out_type=int)
|
class CharacterTargets(Vocabulary):
'\n Uses characters as target labels.\n Also see :class:`Utf8ByteTargets`.\n '
def __init__(self, vocab_file, seq_postfix=None, unknown_label='@', labels=None, **kwargs):
'\n :param str|None vocab_file:\n :param list[int]|None seq_postfix: labels will be added to the seq in self.get_seq\n :param str|None unknown_label:\n :param list[str]|None labels:\n '
super(CharacterTargets, self).__init__(vocab_file=vocab_file, seq_postfix=seq_postfix, unknown_label=unknown_label, labels=labels, **kwargs)
def get_seq(self, sentence):
'\n :param str sentence:\n :rtype: list[int]\n '
if (self.unknown_label is not None):
seq = [self._vocab.get(k, self.unknown_label_id) for k in sentence]
else:
seq = [self._vocab[k] for k in sentence]
return (seq + self.seq_postfix)
def get_seq_labels(self, seq):
'\n :param list[int]|numpy.ndarray seq: 1D sequence\n :rtype: str\n '
return ''.join(map(self._labels.__getitem__, seq))
|
class Utf8ByteTargets(Vocabulary):
'\n Uses bytes as target labels from UTF8 encoded text. All bytes (0-255) are allowed.\n Also see :class:`CharacterTargets`.\n '
def __init__(self, seq_postfix=None):
'\n :param list[int]|None seq_postfix: labels will be added to the seq in self.get_seq\n '
super(Utf8ByteTargets, self).__init__(vocab_file=None, seq_postfix=seq_postfix, unknown_label=None)
def _parse_vocab(self):
'\n Sets self.vocab, self.labels, self.num_labels.\n '
self._vocab = {chr(i): i for i in range(256)}
self._labels = [chr(i) for i in range(256)]
self.num_labels = 256
def get_seq(self, sentence):
'\n :param str sentence:\n :rtype: list[int]\n '
if (sys.version_info[0] >= 3):
seq = list(sentence.encode('utf8'))
else:
seq = list(bytearray(sentence.encode('utf8')))
return (seq + self.seq_postfix)
def get_seq_labels(self, seq):
'\n :param list[int]|numpy.ndarray seq: 1D sequence\n :rtype: str\n '
return bytearray(seq).decode(encoding='utf8')
|
class EngineBase():
'\n Base class for a backend engine, such as :class:`TFEngine.Engine`.\n '
def __init__(self, config: Optional[Config]=None):
'\n :param config:\n '
if (config is None):
config = get_global_config(auto_create=True)
self.config = config
self.epoch = 0
self.global_train_step = None
self.pretrain = None
self.model_filename = None
self.learning_rate = 0.0
self.learning_rate_control = None
def init_network_from_config(self, config: Optional[Config]=None):
'\n Initialize network/model\n\n :param config:\n '
def init_train_from_config(self, config: Optional[Config]=None):
'\n Initialize all engine parts needed for training\n\n :param config:\n '
if (not config):
config = self.config
self.learning_rate_control = load_learning_rate_control_from_config(config)
self.learning_rate = self.learning_rate_control.default_learning_rate
@classmethod
def config_get_final_epoch(cls, config):
'\n :param returnn.config.Config config:\n :rtype: int\n '
num_epochs = config.int('num_epochs', 5)
if config.has('load_epoch'):
num_epochs = max(num_epochs, config.int('load_epoch', 0))
return num_epochs
@classmethod
def get_existing_models(cls, config: Config, *, for_training: Optional[bool]=None):
'\n :param config:\n :param for_training: if True, will only return models which are suitable for resuming training.\n E.g. in case of PyTorch, it means that the optimizer state should be present.\n By default, will be True if the task is "train".\n :return: dict epoch -> model filename\n :rtype: dict[int,str]\n '
model_filename = config.value('model', '')
if (not model_filename):
return {}
file_list = {}
if (for_training is None):
for_training = (config.value('task', 'train') == 'train')
for epoch in range(1, (cls.config_get_final_epoch(config) + 1)):
for is_pretrain in ([False, True] if util.BackendEngine.is_tensorflow_selected() else [False]):
fn = cls.epoch_model_filename(model_filename, epoch, is_pretrain=is_pretrain)
if os.path.exists(fn):
file_list[epoch] = fn
break
if util.BackendEngine.is_tensorflow_selected():
if os.path.exists((fn + '.index')):
file_list[epoch] = fn
break
elif util.BackendEngine.is_torch_selected():
if os.path.exists((fn + '.pt')):
if for_training:
if (not os.path.exists((fn + '.opt.pt'))):
continue
file_list[epoch] = fn
break
return file_list
@classmethod
def get_epoch_model(cls, config):
'\n :type config: returnn.config.Config\n :return: (epoch, model_filename). epoch is the epoch of the model filename.\n :rtype: (int|None, str|None)\n '
start_epoch_mode = config.value('start_epoch', 'auto')
if (start_epoch_mode == 'auto'):
start_epoch = None
else:
start_epoch = int(start_epoch_mode)
assert (start_epoch >= 1)
load_model_epoch_filename = util.get_checkpoint_filepattern(config.value('load', ''))
if load_model_epoch_filename:
assert os.path.exists((load_model_epoch_filename + util.get_model_filename_postfix())), ('load option %r, file %r does not exist' % (config.value('load', ''), (load_model_epoch_filename + util.get_model_filename_postfix())))
import_model_train_epoch1 = util.get_checkpoint_filepattern(config.value('import_model_train_epoch1', ''))
if import_model_train_epoch1:
assert os.path.exists((import_model_train_epoch1 + util.get_model_filename_postfix()))
existing_models = cls.get_existing_models(config)
load_epoch = config.int('load_epoch', (- 1))
if load_model_epoch_filename:
if (load_epoch <= 0):
load_epoch = util.model_epoch_from_filename(load_model_epoch_filename)
elif (load_epoch > 0):
assert (load_epoch in existing_models)
load_model_epoch_filename = existing_models[load_epoch]
assert (util.model_epoch_from_filename(load_model_epoch_filename) == load_epoch)
if (load_model_epoch_filename and ((config.value('task', 'train') != 'train') or (start_epoch is not None))):
if ((config.value('task', 'train') == 'train') and (start_epoch is not None)):
epoch = None
else:
epoch = load_epoch
epoch_model = (epoch, load_model_epoch_filename)
elif existing_models:
epoch_model = sorted(existing_models.items())[(- 1)]
if load_model_epoch_filename:
print("note: there is a 'load' which we ignore because of existing model", file=log.v4)
elif ((config.value('task', 'train') == 'train') and import_model_train_epoch1 and (start_epoch in [None, 1])):
epoch_model = (0, import_model_train_epoch1)
elif load_model_epoch_filename:
epoch_model = (load_epoch, load_model_epoch_filename)
else:
epoch_model = (None, None)
if (start_epoch == 1):
if epoch_model[0]:
print(('warning: there is an existing model: %s' % (epoch_model,)), file=log.v4)
epoch_model = (None, None)
elif ((start_epoch or 0) > 1):
if epoch_model[0]:
if (epoch_model[0] != (start_epoch - 1)):
print(('warning: start_epoch %i but there is %s' % (start_epoch, epoch_model)), file=log.v4)
epoch_model = ((start_epoch - 1), existing_models[(start_epoch - 1)])
return epoch_model
@classmethod
def get_train_start_epoch(cls, config: Config) -> int:
'\n We will always automatically determine the best start (epoch,batch) tuple\n based on existing model files.\n This ensures that the files are present and enforces that there are\n no old outdated files which should be ignored.\n Note that epochs start at idx 1 and batches at idx 0.\n\n :param config:\n :return: epoch\n '
start_batch_mode = config.value('start_batch', 'auto')
if (start_batch_mode != 'auto'):
raise Exception(f'custom start_batch {start_batch_mode!r} not supported')
(last_epoch, _) = cls.get_epoch_model(config)
if (last_epoch is None):
start_epoch = 1
else:
start_epoch = (last_epoch + 1)
return start_epoch
@classmethod
def epoch_model_filename(cls, model_filename: str, epoch: int, *, is_pretrain: bool=False) -> str:
'\n :param model_filename:\n :param epoch:\n :param is_pretrain:\n '
if ((sys.platform == 'win32') and model_filename.startswith('/tmp/')):
import tempfile
model_filename = (tempfile.gettempdir() + model_filename[len('/tmp'):])
return ((model_filename + ('.pretrain' if is_pretrain else '')) + ('.%03d' % epoch))
def get_epoch_model_filename(self, epoch=None):
'\n :param int|None epoch:\n :return: filename, excluding TF specific postfix\n :rtype: str\n '
if (not epoch):
epoch = self.epoch
return self.epoch_model_filename(self.model_filename, epoch, is_pretrain=self.is_pretrain_epoch(epoch=epoch))
def get_epoch_str(self):
'\n :return: e.g. "epoch 3", or "pretrain epoch 5"\n :rtype: str\n '
return (('pretrain ' if self.is_pretrain_epoch() else '') + ('epoch %s' % self.epoch))
def is_pretrain_epoch(self, epoch=None):
'\n :param int|None epoch:\n :return: whether this epoch is covered by the pretrain logic\n :rtype: bool\n '
if (not epoch):
epoch = self.epoch
return (self.pretrain and (epoch <= self.pretrain.get_train_num_epochs()))
def is_first_epoch_after_pretrain(self):
'\n :return: whether the current epoch is the first epoch right after pretraining\n :rtype: bool\n '
return (self.pretrain and (self.epoch == (self.pretrain.get_train_num_epochs() + 1)))
def forward_with_callback(self, *, dataset: Dataset, callback: ForwardCallbackIface):
'\n Iterate through the dataset, calling `forward_step` from user config,\n collecting outputs in `rf.get_run_ctx()` via `mark_as_output` calls,\n and then calling `callback` for each entry.\n '
raise NotImplementedError
def _do_save(self):
'\n :return: whether to perform save on disk in this process. e.g. for Horovod rank != 0, do not save.\n :rtype: bool\n '
import returnn.util.basic
return returnn.util.basic.should_write_to_disk(config=self.config)
@staticmethod
def delete_model(filename):
'\n :param str filename:\n :return: accumulated file-size in bytes of deleted files\n :rtype: int\n '
raise NotImplementedError
def cleanup_old_models(self, ask_for_confirmation=False):
'\n :param bool ask_for_confirmation: if True, will ask the user interactively to confirm\n '
if (not self._do_save()):
return
from returnn.util.basic import CollectionReadCheckCovered, human_bytes_size, confirm
from returnn.util.math import next_power_of_two
from itertools import count
opts = CollectionReadCheckCovered(self.config.get_of_type('cleanup_old_models', dict, {}))
existing_models = self.get_existing_models(config=self.config, for_training=False)
if (self.learning_rate_control is not None):
lr_control = self.learning_rate_control
else:
lr_control = load_learning_rate_control_from_config(self.config)
epochs = sorted(existing_models.keys())
if (not epochs):
print('Cannot cleanup models, no models found.', file=log.v2)
return
keep_last_n = opts.get('keep_last_n', 2)
keep_best_n = opts.get('keep_best_n', 4)
assert ((keep_last_n >= 1) and (keep_best_n >= 0))
if (max(keep_last_n, keep_best_n) >= len(epochs)):
print(('Only %i epochs stored so far and keeping last %i epochs and best %i epochs, thus not cleaning up any epochs yet.' % (len(epochs), keep_last_n, keep_best_n)), file=log.v2)
return
keep_epochs = set()
default_keep_pattern = set()
if (epochs[(- 1)] <= 10):
keep_every = 4
keep_doubles_of = 5
elif (epochs[(- 1)] <= 50):
keep_every = 20
keep_doubles_of = 5
elif (epochs[(- 1)] <= 100):
keep_every = 40
keep_doubles_of = 10
else:
keep_every = (80 * next_power_of_two((1 + (epochs[(- 1)] // 240))))
keep_doubles_of = 20
for i in count(1):
n = (keep_every * i)
if (n > epochs[(- 1)]):
break
default_keep_pattern.add(n)
for i in count():
n = (keep_doubles_of * (2 ** i))
if (n > epochs[(- 1)]):
break
default_keep_pattern.add(n)
keep_epochs.update(opts.get('keep', default_keep_pattern))
keep_epochs.update(epochs[(- keep_last_n):])
score_keys = set()
for data in lr_control.epoch_data.values():
score_keys.update(data.error.keys())
assert score_keys
score_keys = sorted(score_keys)
score_values = {key: [] for key in score_keys}
for epoch in epochs:
epoch_scores = lr_control.epoch_data[epoch].error
for key in epoch_scores.keys():
score_values[key].append(epoch_scores[key])
for key in list(score_keys):
scores = score_values[key]
if (min(scores) == max(scores)):
print(('Ignoring score key %r because all epochs have the same value %r.' % (key, scores[0])), file=log.v3)
score_keys.remove(key)
score_values.pop(key)
worst_score_values = {key: max(scores) for (key, scores) in score_values.items()}
for key in score_keys:
scores = sorted([(lr_control.epoch_data[epoch].error.get(key, worst_score_values[key]), epoch) for epoch in epochs])
scores = scores[:keep_best_n]
keep_epochs.update([v[1] for v in scores])
keep_epochs.intersection_update(epochs)
if (len(keep_epochs) == len(epochs)):
print(('%i epochs stored so far and keeping all.' % len(epochs)), file=log.v2)
return
remove_epochs = sorted(set(epochs).difference(keep_epochs))
assert remove_epochs
if (len(epochs) > 6):
epoch_summary = ('[%s, ..., %s]' % (', '.join(map(str, epochs[:3])), ', '.join(map(str, epochs[(- 3):]))))
else:
epoch_summary = str(epochs)
print(('We have stored models for epochs %s and keep epochs %s.' % (epoch_summary, sorted(keep_epochs))), file=log.v3)
print(('We will delete the models of epochs %s.' % (remove_epochs,)), file=log.v3)
opts.assert_all_read()
if self.config.bool('dry_run', False):
print('Dry-run, will not delete models.', file=log.v2)
return
if ask_for_confirmation:
confirm('Delete those models?', exit_on_false=True)
count_bytes = 0
for epoch in remove_epochs:
count_bytes += self.delete_model(existing_models[epoch])
print(('Deleted %s.' % human_bytes_size(count_bytes)), file=log.v2)
def _is_dataset_evaluated(self, name: str) -> bool:
'\n Check via self.learning_rate_control.\n\n :param name: e.g. "dev"\n :return: whether there is an entry for the score in the learning rate file\n '
assert self.learning_rate_control.filename
error_dict = self.learning_rate_control.get_epoch_error_dict(self.epoch)
if (not error_dict):
return False
return any([(k.startswith(('%s_score' % name)) or k.startswith(('%s_loss' % name))) for k in error_dict.keys()])
|
class BatchSeqCopyPart():
'\n A batch used for training in RETURNN can consist of several parts from sequences,\n ordered in various ways. The dataset, depending on the configuration, can\n generate these. For the non-recurrent case, we usually concatenate\n them together into one slice. For the recurrent case, we have a single\n slice per sequence, or even multiple slices for a sequence in case of chunking.\n This class represents one single such part and where it is going to\n be stored in the batch.\n '
def __init__(self, seq_idx, seq_start_frame, seq_end_frame, batch_slice, batch_frame_offset):
'\n :type seq_idx: int\n :type seq_start_frame: NumbersDict | int\n :type seq_end_frame: NumbersDict | int\n Frame idx are input seq, output seq.\n :type batch_slice: int\n :type batch_frame_offset: int | NumbersDict\n '
self.seq_idx = seq_idx
self.seq_start_frame = NumbersDict(seq_start_frame)
self.seq_end_frame = NumbersDict(seq_end_frame)
self.batch_slice = batch_slice
self.batch_frame_offset = NumbersDict(batch_frame_offset)
assert self.seq_start_frame.has_values()
assert self.seq_end_frame.has_values()
assert self.batch_frame_offset.has_values()
@property
def frame_length(self):
'\n :rtype: NumbersDict\n '
return (self.seq_end_frame - self.seq_start_frame)
def __repr__(self):
keys = ('seq_idx', 'seq_start_frame', 'seq_end_frame', 'batch_slice', 'batch_frame_offset')
return ('<BatchSeqCopyPart %s>' % ' '.join([('%s=%r' % (k, getattr(self, k))) for k in keys]))
|
class Batch():
'\n A batch can consists of several sequences (= segments).\n This is basically just a list of BatchSeqCopyPart.\n '
def __init__(self):
self.max_num_frames_per_slice = NumbersDict(0)
self.num_slices = 0
self.seqs = []
def __repr__(self):
return ('<Batch start_seq:%r, len(seqs):%i>' % (self.start_seq, len(self.seqs)))
def try_sequence_as_slice(self, length):
'\n :param NumbersDict length: number of (time) frames\n :return: new shape which covers the old shape and one more data-batch, format (time,batch)\n :rtype: (NumbersDict,int)\n '
return [NumbersDict.max([self.max_num_frames_per_slice, length]), (self.num_slices + 1)]
def add_sequence_as_slice(self, seq_idx, seq_start_frame, length):
'\n Adds one data-batch in an additional slice.\n\n :param int seq_idx:\n :param NumbersDict|int seq_start_frame:\n :param NumbersDict length: number of (time) frames\n '
(self.max_num_frames_per_slice, self.num_slices) = self.try_sequence_as_slice(length)
self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=(seq_start_frame + length), batch_slice=(self.num_slices - 1), batch_frame_offset=0)]
def add_frames(self, seq_idx, seq_start_frame, length, frame_dim_corresponds=True):
"\n Adds frames to all data-batches.\n Will add one data-batch if we don't have one yet.\n\n :param int seq_idx:\n :param NumbersDict|int seq_start_frame:\n :param NumbersDict length: number of (time) frames\n :param bool frame_dim_corresponds: if the batch frame offset should always be the same (max value) for all keys\n "
batch_frame_offset = self.max_num_frames_per_slice
if frame_dim_corresponds:
batch_frame_offset = NumbersDict(batch_frame_offset.max_value())
self.max_num_frames_per_slice = NumbersDict(self.max_num_frames_per_slice.max_value())
self.max_num_frames_per_slice += length
self.num_slices = max(self.num_slices, 1)
self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=(seq_start_frame + length), batch_slice=0, batch_frame_offset=batch_frame_offset)]
def init_with_one_full_sequence(self, seq_idx, dataset):
'\n :param int seq_idx:\n :param Dataset.Dataset dataset:\n '
assert (not self.seqs)
(start, end) = dataset.get_start_end_frames_full_seq(seq_idx)
self.add_frames(seq_idx=seq_idx, seq_start_frame=start, length=(end - start))
def get_all_slices_num_frames(self):
'\n Note that this is only an upper limit in case of data_shape[1] > 1\n because data_shape[0] is the max frame len of all seqs.\n\n :return: related to the data-key with max length\n :rtype: NumbersDict\n '
return (self.max_num_frames_per_slice * self.num_slices)
def get_total_num_frames(self):
'\n :rtype: NumbersDict\n '
return sum([s.frame_length for s in self.seqs])
@property
def start_seq(self):
'\n :rtype: int|None\n '
if (not self.seqs):
return None
return min([s.seq_idx for s in self.seqs])
@property
def end_seq(self):
'\n :rtype: int|None\n '
if (not self.seqs):
return None
return (max([s.seq_idx for s in self.seqs]) + 1)
def get_num_seqs(self):
'\n :rtype: int\n '
if (not self.seqs):
return 0
return (self.end_seq - self.start_seq)
|
class BatchSetGenerator():
'\n This will give you the next batches (list[Batch]) such that you can use them for assign_dev_data().\n We get those batches from a generator, i.e. lazily on-the-fly. This is the whole point of BatchSetGenerator\n - that we must not know the whole list of batches in advance.\n As assign_dev_data() can fail for various reasons, we buffer the list of batches and\n you call self.advance() explicitly to go forward to next batches.\n '
def __init__(self, dataset, generator, shuffle_batches=False, cache_whole_epoch=True):
'\n :type dataset: Dataset.Dataset\n :type generator: typing.Generator[Batch]|typing.Iterator[Batch]\n :param bool shuffle_batches:\n :param bool cache_whole_epoch:\n '
self.dataset = dataset
self.generator = generator
self.shuffle_batches = shuffle_batches
self.cache_whole_epoch = cache_whole_epoch
self.cache = []
self.buffer = []
self.last_batch = None
self.reached_end = False
random.seed(1234)
self._reset()
def _reset(self):
self.buffer = self.cache[:]
if self.shuffle_batches:
random.shuffle(self.buffer)
self.cache_active = self.reached_end
self.reached_end = False
self.last_batch = None
self.current_batch_idx = 0
def reset(self):
'\n Call this after one epoch to reuse the previously cached batches.\n '
assert self.cache_whole_epoch
self._reset()
def _read_next(self):
if self.reached_end:
return False
try:
batch = next(self.generator)
except StopIteration:
self.reached_end = True
return False
else:
self.buffer += [batch]
if (self.cache_whole_epoch and (not self.cache_active)):
self.cache += [batch]
return True
def _read_next_up_to_n(self, n):
for i in range(n):
if (len(self.buffer) >= n):
break
if (not self._read_next()):
break
def peek_next_n(self, n):
'\n :rtype: list[Batch]\n :returns it might return less. There is no way to know in advance.\n If self.has_more() is True, it will at least return one.\n '
self._read_next_up_to_n(n)
return self.buffer[:n]
def advance(self, n):
'\n :type n: int\n '
assert (n > 0)
self._read_next_up_to_n(n)
assert (n <= len(self.buffer))
self.last_batch = self.buffer[(n - 1)]
self.buffer = self.buffer[n:]
self.current_batch_idx += n
def completed_frac(self):
'\n :rtype: float\n :returns 0-1, >0\n '
if self.cache_active:
return self.dataset.generic_complete_frac(self.current_batch_idx, len(self.cache))
if (not self.last_batch):
return self.dataset.generic_complete_frac(0, None)
return self.dataset.get_complete_frac(self.last_batch.start_seq)
def has_more(self):
'\n This would also try to advance further in the dataset, thus it might block.\n If it returns False, no more data is available in the dataset.\n\n :rtype: bool\n '
if (len(self.buffer) > 0):
return True
return self._read_next()
def get_current_batch_idx(self):
'\n :rtype: int\n '
return self.current_batch_idx
|
def is_checked_out():
'\n Checks if the git submodule is checkout out.\n\n :rtype: bool\n '
return os.path.isfile(('%s/src/rnnt_entrypoint.cpp' % submodule_dir))
|
def init_warprnnt(verbose=False):
'\n Initializes and compiles the library. Caches the TF module.\n\n :param bool verbose:\n '
global _tf_mod
if _tf_mod:
return
assert is_checked_out(), 'submodule not checked out? Run `git submodule update --init --recursive`'
src_files = [('%s/src/rnnt_entrypoint.cpp' % submodule_dir), ('%s/tensorflow_binding/src/warprnnt_op.cc' % submodule_dir)]
assert all([os.path.isfile(f) for f in src_files]), ('submodule in %r not checked out?' % warprnnt_dir)
src_code = ''
for fn in src_files:
f_code = open(fn).read()
src_code += ('\n// ------------ %s : BEGIN { ------------\n' % os.path.basename(fn))
src_code += ('#line 1 "%s"\n' % os.path.basename(fn))
src_code += f_code
src_code += ('\n// ------------ %s : END } --------------\n\n' % os.path.basename(fn))
with_cuda = (CudaEnv.get_instance().is_available() and is_gpu_available())
with_omp = sys.platform.startswith('linux')
compiler = OpCodeCompiler(base_name='warprnnt_kernels', code_version=1, code=src_code, include_paths=((submodule_dir + '/include'),), c_macro_defines=dict_joined(({'WITH_OMP': 1} if with_omp else {'RNNT_DISABLE_OMP': 1}), ({'WARPRNNT_ENABLE_GPU': 1} if with_cuda else {})), ld_flags=(['-Xcompiler', '-fopenmp'] if with_omp else []), is_cpp=True, use_cuda_if_available=True, verbose=verbose)
tf_mod = compiler.load_tf_module()
assert hasattr(tf_mod, 'WarpRNNT'), ('content of mod: %r' % (dir(tf_mod),))
_tf_mod = tf_mod
return tf_mod
|
def rnnt_loss(acts, labels, input_lengths, label_lengths, blank_label=0):
'Computes the RNNT loss between a sequence of activations and a\n ground truth labeling.\n Args:\n acts: A 4-D Tensor of floats. The dimensions\n should be (B, T, U, V), where B is the minibatch index,\n T is the time index, U is the prediction network sequence\n length, and V indexes over activations for each\n symbol in the alphabet.\n labels: A 2-D Tensor of ints, a padded label sequences to make sure\n labels for the minibatch are same length.\n input_lengths: A 1-D Tensor of ints, the number of time steps\n for each sequence in the minibatch.\n label_lengths: A 1-D Tensor of ints, the length of each label\n for each example in the minibatch.\n blank_label: int, the label value/index that the RNNT\n calculation should use as the blank label\n Returns:\n 1-D float Tensor, the cost of each example in the minibatch\n (as negative log probabilities).\n * This class performs the softmax operation internally.\n * The label reserved for the blank symbol should be label 0.\n '
init_warprnnt()
(loss, _) = _tf_mod.warp_rnnt(acts, labels, input_lengths, label_lengths, blank_label)
loss.set_shape(acts.shape.as_list()[:1])
return loss
|
@ops.RegisterGradient('WarpRNNT')
def _warprnnt_loss_grad(op, grad_loss, _):
grad = op.outputs[1]
grad_loss = tf.reshape(grad_loss, ((- 1), 1, 1, 1))
return [(grad_loss * grad), None, None, None]
|
def is_checked_out():
'\n Checks if the git submodule is checkout out.\n\n :rtype: bool\n '
return os.path.isfile(('%s/core.cu' % submodule_dir))
|
def init_warprna(verbose=False):
'\n Initializes and compiles the library. Caches the TF module.\n\n :param bool verbose:\n '
global _tf_mod
if _tf_mod:
return
assert is_checked_out(), 'submodule not checked out? Run `git submodule update --init --recursive`'
enable_gpu = OpCodeCompiler.cuda_available()
enable_cpu = os.path.exists(('%s/core_cpu.cpp' % submodule_dir))
src_files = [('%s/tensorflow_binding/src/warp_rna_op.cc' % submodule_dir)]
if enable_gpu:
src_files.append(('%s/core.cu' % submodule_dir))
if enable_cpu:
src_files.append(('%s/core_cpu.cpp' % submodule_dir))
src_code = ''
for fn in src_files:
f_code = open(fn).read()
src_code += ('\n// ------------ %s : BEGIN { ------------\n' % os.path.basename(fn))
src_code += ('#line 1 "%s"\n' % os.path.basename(fn))
src_code += f_code
src_code += ('\n// ------------ %s : END } --------------\n\n' % os.path.basename(fn))
compiler = OpCodeCompiler(base_name='warprna_kernels', code_version=1, code=src_code, include_paths=(submodule_dir, ('%s/tensorflow_binding/src' % submodule_dir)), c_macro_defines={'WARPRNA_ENABLE_CPU': (1 if enable_cpu else None), 'WARPRNA_ENABLE_GPU': (1 if enable_gpu else None)}, is_cpp=True, use_cuda_if_available=enable_gpu, verbose=verbose)
tf_mod = compiler.load_tf_module()
assert hasattr(tf_mod, 'WarpRNA'), ('content of mod: %r' % (dir(tf_mod),))
_tf_mod = tf_mod
return tf_mod
|
def rna_loss(log_probs, labels, input_lengths, label_lengths, blank_label=0):
'Computes the RNA loss between a sequence of activations and a\n ground truth labeling.\n Args:\n log_probs: A 4-D Tensor of floats. The dimensions\n should be (B, T, U, V), where B is the minibatch index,\n T is the time index, U is the prediction network sequence\n length, and V indexes over activations for each\n symbol in the alphabet.\n labels: A 2-D Tensor of ints, shape (B,U-1) a padded label sequences to make sure\n labels for the minibatch are same length.\n input_lengths: A 1-D Tensor of ints, shape (B,), the number of time steps\n for each sequence in the minibatch.\n label_lengths: A 1-D Tensor of ints, shape (B,), the length of each label\n for each example in the minibatch.\n blank_label: int, scalar, the label value/index that the RNA\n calculation should use as the blank label\n Returns:\n 1-D float Tensor, the cost of each example in the minibatch\n (as negative log probabilities).\n '
init_warprna()
(loss, _) = _tf_mod.warp_rna(log_probs, labels, input_lengths, label_lengths, tf.reduce_min(label_lengths), blank_label)
return loss
|
@ops.RegisterGradient('WarpRNA')
def _warprna_loss_grad(op, grad_loss, _):
grad = op.outputs[1]
grad_loss = tf.reshape(grad_loss, ((- 1), 1, 1, 1))
return [(grad_loss * grad), None, None, None, None]
|
def main():
'main warp-rna demo'
print('Hello, running WarpRna demo')
test_warprna_forward()
|
def test_warprna_forward():
'test warp-rna forward'
assert is_checked_out()
import numpy as np
expected_costs = np.array([2.6347387, 2.4651031])
expected_grads = np.array([[[[(- 0.34075904), (- 0.65924096), 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[(- 0.09434381), (- 0.24641524), 0.0], [(- 0.4480959), 0.0, (- 0.2111451)], [0.0, 0.0, 0.0]], [[0.0, (- 0.09434381), 0.0], [(- 0.25838017), 0.0, (- 0.43613094)], [(- 0.2111451), 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, (- 0.35272402)], [(- 0.64727604), 0.0, 0.0]]], [[[(- 0.6283351), (- 0.37166485), 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[(- 0.26558593), (- 0.36274916), 0.0], [(- 0.23790276), (- 0.13376209), 0.0], [0.0, 0.0, 0.0]], [[0.0, (- 0.26558593), 0.0], [(- 0.26772842), (- 0.3329236), 0.0], [(- 0.13376209), 0.0, 0.0]], [[0.0, 0.0, 0.0], [0.0, (- 0.53331435), 0.0], [(- 0.46668565), 0.0, 0.0]]]])
(n_batch, n_time, n_target, n_vocab) = (2, 4, 3, 3)
acts = np.array([0.065357, 0.78753, 0.081592, 0.529716, 0.750675, 0.754135, 0.609764, 0.86814, 0.622532, 0.668522, 0.858039, 0.164539, 0.98978, 0.944298, 0.603168, 0.946783, 0.666203, 0.286882, 0.094184, 0.366674, 0.736168, 0.16668, 0.714154, 0.3994, 0.535982, 0.291821, 0.612642, 0.324241, 0.800764, 0.524106, 0.779195, 0.183314, 0.113745, 0.240222, 0.33947, 0.13416, 0.505562, 0.051597, 0.64029, 0.430733, 0.829473, 0.177467, 0.3207, 0.042883, 0.302803, 0.675178, 0.569537, 0.558474, 0.083132, 0.060165, 0.107958, 0.748615, 0.943918, 0.486356, 0.418199, 0.652408, 0.024243, 0.134582, 0.366342, 0.29583, 0.92367, 0.689929, 0.741898, 0.250005, 0.60343, 0.987289, 0.592606, 0.884672, 0.54345, 0.66077, 0.377128, 0.358021], dtype=np.float32)
acts = np.reshape(acts, (n_batch, n_time, n_target, n_vocab))
labels = np.array([[1, 2], [1, 1]], dtype=np.int32)
input_lengths = np.array([4, 4], dtype=np.int32)
label_lengths = np.array([2, 2], dtype=np.int32)
acts_t = tf.convert_to_tensor(acts)
labels_t = tf.convert_to_tensor(labels)
input_lengths_t = tf.convert_to_tensor(input_lengths)
label_lengths_t = tf.convert_to_tensor(label_lengths)
log_probs = tf.nn.log_softmax(acts_t)
costs_cuda = rna_loss(log_probs, labels_t, input_lengths_t, label_lengths_t)
grads_cuda = tf.gradients(costs_cuda, [log_probs])[0]
with tf_v1.Session() as session:
(out_costs_cuda, out_grads_cuda) = session.run([costs_cuda, grads_cuda])
print('[CUDA] costs:', out_costs_cuda)
np.testing.assert_allclose(out_grads_cuda, expected_grads, rtol=1e-06)
np.testing.assert_allclose(out_costs_cuda, expected_costs, rtol=1e-06)
|
def detach_control_inputs(sgv):
'Detach all the external control inputs of the subgraph sgv.\n\n Args:\n sgv: the subgraph view to be detached. This argument is converted to a\n subgraph using the same rules as the function subgraph.make_view.\n '
sgv = subgraph.make_view(sgv)
for op in sgv.ops:
cops = [cop for cop in op.control_inputs if (cop not in sgv.ops)]
reroute.remove_control_inputs(op, cops)
|
def detach_control_outputs(sgv, control_outputs):
'Detach all the external control outputs of the subgraph sgv.\n\n Args:\n sgv: the subgraph view to be detached. This argument is converted to a\n subgraph using the same rules as the function subgraph.make_view.\n control_outputs: a util.ControlOutputs instance.\n '
if (not isinstance(control_outputs, util.ControlOutputs)):
raise TypeError('Expected a util.ControlOutputs, got: {}', type(control_outputs))
control_outputs.update()
sgv = subgraph.make_view(sgv)
for op in sgv.ops:
for cop in control_outputs.get(op):
if (cop not in sgv.ops):
reroute.remove_control_inputs(cop, op)
|
def detach_inputs(sgv, control_inputs=False):
'Detach the inputs of a subgraph view.\n\n Args:\n sgv: the subgraph view to be detached. This argument is converted to a\n subgraph using the same rules as the function subgraph.make_view.\n Note that sgv is modified in place.\n control_inputs: if True control_inputs are also detached.\n Returns:\n A tuple `(sgv, input_placeholders)` where\n `sgv` is a new subgraph view of the detached subgraph;\n `input_placeholders` is a list of the created input placeholders.\n Raises:\n StandardError: if sgv cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
sgv = subgraph.make_view(sgv)
with sgv.graph.as_default():
input_placeholders = [tf_array_ops.placeholder(dtype=input_t.dtype, name=util.placeholder_name(input_t)) for input_t in sgv.inputs]
reroute.swap_inputs(sgv, input_placeholders)
if control_inputs:
detach_control_inputs(sgv)
return (sgv, input_placeholders)
|
def detach_outputs(sgv, control_outputs=None):
'Detach the output of a subgraph view.\n\n Args:\n sgv: the subgraph view to be detached. This argument is converted to a\n subgraph using the same rules as the function subgraph.make_view.\n Note that sgv is modified in place.\n control_outputs: a util.ControlOutputs instance or None. If not None the\n control outputs are also detached.\n Returns:\n A tuple `(sgv, output_placeholders)` where\n `sgv` is a new subgraph view of the detached subgraph;\n `output_placeholders` is a list of the created output placeholders.\n Raises:\n StandardError: if sgv cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
sgv = subgraph.make_view(sgv)
sgv_ = sgv.remap_outputs([output_id for (output_id, output_t) in enumerate(sgv.outputs) if output_t.consumers()])
consumers_sgv = subgraph.SubGraphView(sgv_.consumers())
consumers_sgv = consumers_sgv.remap_inputs([input_id for (input_id, input_t) in enumerate(consumers_sgv.inputs) if (input_t in sgv_.outputs)])
with sgv_.graph.as_default():
output_placeholders = [util.make_placeholder_from_tensor(input_t) for input_t in consumers_sgv.inputs]
reroute.swap_outputs(sgv_, output_placeholders)
if (control_outputs is not None):
detach_control_outputs(sgv_, control_outputs)
return (sgv_, output_placeholders)
|
def detach(sgv, control_inputs=False, control_outputs=None, control_ios=None):
'Detach both the inputs and the outputs of a subgraph view.\n\n Args:\n sgv: the subgraph view to be detached. This argument is converted to a\n subgraph using the same rules as the function subgraph.make_view.\n Note that sgv is modified in place.\n control_inputs: A boolean indicating whether control inputs are enabled.\n control_outputs: An instance of util.ControlOutputs or None. If not None,\n control outputs are enabled.\n control_ios: An instance of util.ControlOutputs or None. If not None, both\n control inputs and control outputs are enabled. This is equivalent to set\n control_inputs to True and control_outputs to the util.ControlOutputs\n instance.\n Returns:\n A tuple `(sgv, detached_inputs, detached_outputs)` where:\n `sgv` is a new subgraph view of the detached subgraph;\n `detach_inputs` is a list of the created input placeholders;\n `detach_outputs` is a list of the created output placeholders.\n Raises:\n StandardError: if sgv cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
(control_inputs, control_outputs) = select.check_cios(control_inputs, control_outputs, control_ios)
(_, detached_inputs) = detach_inputs(sgv, control_inputs)
(_, detached_outputs) = detach_outputs(sgv, control_outputs)
return (sgv, detached_inputs, detached_outputs)
|
def connect(sgv0, sgv1, disconnect_first=False):
'Connect the outputs of sgv0 to the inputs of sgv1.\n\n Args:\n sgv0: the first subgraph to have its outputs swapped. This argument is\n converted to a subgraph using the same rules as the function\n subgraph.make_view.\n Note that sgv0 is modified in place.\n sgv1: the second subgraph to have its outputs swapped. This argument is\n converted to a subgraph using the same rules as the function\n subgraph.make_view.\n Note that sgv1 is modified in place.\n disconnect_first: if True the current outputs of sgv0 are disconnected.\n Returns:\n A tuple `(sgv0, sgv1)` of the now connected subgraphs.\n Raises:\n StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
sgv0 = subgraph.make_view(sgv0)
sgv1 = subgraph.make_view(sgv1)
util.check_graphs(sgv0, sgv1)
if disconnect_first:
detach_outputs(sgv0)
sgv0_outputs = subgraph.SubGraphView(passthrough_ts=sgv0.outputs)
reroute.reroute_inputs(sgv0_outputs, sgv1)
return (sgv0, sgv1)
|
def bypass(sgv):
'Bypass the given subgraph by connecting its inputs to its outputs.\n\n Args:\n sgv: the subgraph view to be bypassed. This argument is converted to a\n subgraph using the same rules than the function subgraph.make_view.\n Note that sgv is modified in place.\n Returns:\n A tuple `(sgv, detached_inputs)` where:\n `sgv` is a new subgraph view of the bypassed subgraph;\n `detached_inputs` is a list of the created input placeholders.\n Raises:\n StandardError: if sgv cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
sgv = subgraph.make_view(sgv)
sgv_inputs = list(sgv.inputs)
(sgv, detached_inputs) = detach_inputs(sgv)
reroute.reroute_ts(sgv_inputs, sgv.outputs)
return (sgv, detached_inputs)
|
def _check_ts_compatibility(ts0, ts1):
"Make sure the shape and dtype of the two tensor's lists are compatible.\n\n Args:\n ts0: an object convertible to a list of `tf.Tensor`.\n ts1: an object convertible to a list of `tf.Tensor`.\n Raises:\n ValueError: if any pair of tensors (same index in ts0 and ts1) have\n a dtype or a shape which is not compatible.\n "
ts0 = _util.make_list_of_t(ts0)
ts1 = _util.make_list_of_t(ts1)
if (len(ts0) != len(ts1)):
raise ValueError('ts0 and ts1 have different sizes: {} != {}'.format(len(ts0), len(ts1)))
for (t0, t1) in zip(ts0, ts1):
(dtype0, dtype1) = (t0.dtype, t1.dtype)
if (not dtype0.is_compatible_with(dtype1)):
raise ValueError('Dtypes {} and {} are not compatible.'.format(dtype0, dtype1))
(shape0, shape1) = (t0.get_shape(), t1.get_shape())
if (not shape0.is_compatible_with(shape1)):
raise ValueError('Shapes {} and {} are not compatible.'.format(shape0, shape1))
|
class _RerouteMode(object):
"Enums for reroute's mode.\n\n swap: the end of tensors a and b are swapped.\n a2b: the end of the tensor a are also rerouted to the end of the tensor b\n (the end of b is left dangling).\n b2a: the end of the tensor b are also rerouted to the end of the tensor a\n (the end of a is left dangling).\n "
(swap, a2b, b2a) = range(3)
@classmethod
def check(cls, mode):
'Check swap mode.\n\n Args:\n mode: an integer representing one of the modes.\n Returns:\n A tuple `(a2b, b2a)` boolean indicating what rerouting needs doing.\n Raises:\n ValueError: if mode is outside the enum range.\n '
if (mode == cls.swap):
return (True, True)
elif (mode == cls.b2a):
return (False, True)
elif (mode == cls.a2b):
return (True, False)
else:
raise ValueError('Unknown _RerouteMode: {}'.format(mode))
|
def _reroute_t(t0, t1, consumers1, can_modify=None, cannot_modify=None):
'Reroute the end of the tensors (t0,t1).\n\n Warning: this function is directly manipulating the internals of the\n `tf.Graph`.\n\n Args:\n t0: a tf.Tensor.\n t1: a tf.Tensor.\n consumers1: The consumers of t1 which needs to be rerouted.\n can_modify: iterable of operations which can be modified. Any operation\n outside within_ops will be left untouched by this function.\n cannot_modify: iterable of operations which cannot be modified.\n Any operation within cannot_modify will be left untouched by this\n function.\n Returns:\n The number of individual modifications made by the function.\n '
nb_update_inputs = 0
if (can_modify is not None):
consumers1 &= can_modify
if (cannot_modify is not None):
consumers1 -= cannot_modify
consumers1_indices = {}
for consumer1 in consumers1:
consumers1_indices[consumer1] = [i for (i, t) in enumerate(consumer1.inputs) if (t is t1)]
for consumer1 in consumers1:
for i in consumers1_indices[consumer1]:
consumer1._update_input(i, t0)
nb_update_inputs += 1
return nb_update_inputs
|
def _reroute_ts(ts0, ts1, mode, can_modify=None, cannot_modify=None):
'Reroute the end of the tensors in each pair (t0,t1) in ts0 x ts1.\n\n This function is the back-bone of the Graph-Editor. It is essentially a thin\n wrapper on top of the tf.Operation._update_input.\n\n Given a pair of tensor t0, t1 in ts0 x ts1, this function re-route the end\n of t0 and t1 in three possible ways:\n 1) The reroute mode is "a<->b" or "b<->a": the tensors\' end are swapped. After\n this operation, the previous consumers of t0 are now consumers of t1 and\n vice-versa.\n 2) The reroute mode is "a->b": the tensors\' end of t0 are re-routed to the\n tensors\'s end of t1 (which are left dangling). After this operation, the\n previous consumers of t0 are still consuming t0 but the previous consumers of\n t1 are not also consuming t0. The tensor t1 has no consumer.\n 3) The reroute mode is "b->a": this mode is the symmetric of the "a->b" mode.\n\n Note that this function is re-routing the end of two tensors, not the start.\n Re-routing the start of two tensors is not supported by this library. The\n reason for that is the following: TensorFlow, by design, creates a strong bond\n between an op and its output tensor. This Graph editor follows this design and\n treats an operation A and its generating tensors {t_i} as an entity which\n cannot be broken. In other words, an op cannot be detached from any of its\n output tensors, ever. But it is possible to detach an op from its input\n tensors, which is what this function concerns itself with.\n\n Warning: this function is directly manipulating the internals of the tf.Graph.\n\n Args:\n ts0: an object convertible to a list of `tf.Tensor`.\n ts1: an object convertible to a list of `tf.Tensor`.\n mode: what to do with those tensors: "a->b" or "b<->a" for swaping and\n "a->b" or "b->a" for one direction re-routing.\n can_modify: iterable of operations which can be modified. Any operation\n outside within_ops will be left untouched by this function.\n cannot_modify: iterable of operations which cannot be modified.\n Any operation within cannot_modify will be left untouched by this\n function.\n Returns:\n The number of individual modifications made by the function.\n Raises:\n TypeError: if `ts0` or `ts1` cannot be converted to a list of `tf.Tensor`.\n TypeError: if `can_modify` or `cannot_modify` is not `None` and cannot be\n converted to a list of `tf.Operation`.\n '
(a2b, b2a) = _RerouteMode.check(mode)
ts0 = _util.make_list_of_t(ts0)
ts1 = _util.make_list_of_t(ts1)
_check_ts_compatibility(ts0, ts1)
if (cannot_modify is not None):
cannot_modify = frozenset(_util.make_list_of_op(cannot_modify))
if (can_modify is not None):
can_modify = frozenset(_util.make_list_of_op(can_modify))
nb_update_inputs = 0
precomputed_consumers = []
for (t0, t1) in zip(ts0, ts1):
consumers0 = set(t0.consumers())
consumers1 = set(t1.consumers())
precomputed_consumers.append((consumers0, consumers1))
for (t0, t1, consumers) in zip(ts0, ts1, precomputed_consumers):
if (t0 is t1):
continue
(consumers0, consumers1) = consumers
if a2b:
nb_update_inputs += _reroute_t(t0, t1, consumers1, can_modify, cannot_modify)
if b2a:
nb_update_inputs += _reroute_t(t1, t0, consumers0, can_modify, cannot_modify)
return nb_update_inputs
|
def swap_ts(ts0, ts1, can_modify=None, cannot_modify=None):
"For each tensor's pair, swap the end of (t0,t1).\n\n B0 B1 B0 B1\n | | => X\n A0 A1 A0 A1\n\n Args:\n ts0: an object convertible to a list of `tf.Tensor`.\n ts1: an object convertible to a list of `tf.Tensor`.\n can_modify: iterable of operations which can be modified. Any operation\n outside within_ops will be left untouched by this function.\n cannot_modify: iterable of operations which cannot be modified.\n Any operation within cannot_modify will be left untouched by this\n function.\n Returns:\n The number of individual modifications made by the function.\n Raises:\n TypeError: if ts0 or ts1 cannot be converted to a list of tf.Tensor.\n TypeError: if can_modify or cannot_modify is not None and cannot be\n converted to a list of tf.Operation.\n "
return _reroute_ts(ts0, ts1, _RerouteMode.swap, can_modify, cannot_modify)
|
def reroute_ts(ts0, ts1, can_modify=None, cannot_modify=None):
"For each tensor's pair, replace the end of t1 by the end of t0.\n\n B0 B1 B0 B1\n | | => |/\n A0 A1 A0 A1\n\n The end of the tensors in ts1 are left dangling.\n\n Args:\n ts0: an object convertible to a list of `tf.Tensor`.\n ts1: an object convertible to a list of `tf.Tensor`.\n can_modify: iterable of operations which can be modified. Any operation\n outside within_ops will be left untouched by this function.\n cannot_modify: iterable of operations which cannot be modified. Any\n operation within cannot_modify will be left untouched by this function.\n Returns:\n The number of individual modifications made by the function.\n Raises:\n TypeError: if ts0 or ts1 cannot be converted to a list of tf.Tensor.\n TypeError: if can_modify or cannot_modify is not None and cannot be\n converted to a list of tf.Operation.\n "
return _reroute_ts(ts0, ts1, _RerouteMode.a2b, can_modify, cannot_modify)
|
def _reroute_sgv_remap(sgv0, sgv1, mode):
'Remap in place the inputs of two subgraph views to mimic the reroute.\n\n This function is meant to used by reroute_inputs only.\n\n Args:\n sgv0: the first subgraph to have its inputs remapped.\n sgv1: the second subgraph to have its inputs remapped.\n mode: reroute mode, see _reroute_ts(...).\n Raises:\n TypeError: if svg0 or svg1 are not SubGraphView.\n ValueError: if sgv0 and sgv1 do not belong to the same graph.\n '
(a2b, b2a) = _RerouteMode.check(mode)
if (not isinstance(sgv0, _subgraph.SubGraphView)):
raise TypeError('Expected a SubGraphView, got {}'.format(type(sgv0)))
if (not isinstance(sgv1, _subgraph.SubGraphView)):
raise TypeError('Expected a SubGraphView, got {}'.format(type(sgv1)))
_util.check_graphs(sgv0, sgv1)
sgv0_ = sgv0.copy()
sgv1_ = sgv1.copy()
if (a2b and b2a):
(sgv0_._input_ts, sgv1_._input_ts) = (sgv1_._input_ts, sgv0_._input_ts)
(sgv0_._passthrough_ts, sgv1_._passthrough_ts) = (sgv1_._passthrough_ts, sgv0_._passthrough_ts)
elif a2b:
sgv1_._input_ts = sgv0_._input_ts[:]
sgv1_._passthrough_ts = sgv0_._passthrough_ts[:]
elif b2a:
sgv0_._input_ts = sgv1_._input_ts[:]
sgv0_._passthrough_ts = sgv1_._passthrough_ts[:]
def update_passthrough_outputs(a, b):
for (i, t) in enumerate(b._output_ts):
if (t in a._passthrough_ts):
ii = a._input_ts.index(t)
b._output_ts[i] = b._input_ts[ii]
if a2b:
update_passthrough_outputs(sgv0_, sgv1_)
if b2a:
update_passthrough_outputs(sgv1_, sgv0_)
sgv0._assign_from(sgv0_)
sgv1._assign_from(sgv1_)
|
def _reroute_sgv_inputs(sgv0, sgv1, mode):
'Re-route all the inputs of two subgraphs.\n\n Args:\n sgv0: the first subgraph to have its inputs swapped. This argument is\n converted to a subgraph using the same rules than the function\n subgraph.make_view.\n sgv1: the second subgraph to have its inputs swapped. This argument is\n converted to a subgraph using the same rules than the function\n subgraph.make_view.\n mode: reroute mode, see _reroute_ts(...).\n Returns:\n A tuple `(sgv0, sgv1)` of subgraph views with their inputs swapped.\n Note that the function argument sgv0 and sgv1 are also modified in place.\n Raises:\n StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
sgv0 = _subgraph.make_view(sgv0)
sgv1 = _subgraph.make_view(sgv1)
_util.check_graphs(sgv0, sgv1)
can_modify = (sgv0.ops + sgv1.ops)
can_modify += _util.get_consuming_ops(sgv0.passthroughs)
can_modify += _util.get_consuming_ops(sgv1.passthroughs)
_reroute_ts(sgv0.inputs, sgv1.inputs, mode, can_modify=can_modify)
_reroute_sgv_remap(sgv0, sgv1, mode)
return (sgv0, sgv1)
|
def _reroute_sgv_outputs(sgv0, sgv1, mode):
'Re-route all the outputs of two operations.\n\n Args:\n sgv0: the first subgraph to have its outputs swapped. This argument is\n converted to a subgraph using the same rules than the function\n subgraph.make_view.\n sgv1: the second subgraph to have its outputs swapped. This argument is\n converted to a subgraph using the same rules than the function\n subgraph.make_view.\n mode: reroute mode, see _reroute_ts(...).\n Returns:\n A tuple `(sgv0, sgv1)` of subgraph views with their outputs swapped.\n Note that the function argument sgv0 and sgv1 are also modified in place.\n Raises:\n StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
sgv0 = _subgraph.make_view(sgv0)
sgv1 = _subgraph.make_view(sgv1)
_util.check_graphs(sgv0, sgv1)
cannot_modify = (sgv0.ops + sgv1.ops)
_reroute_ts(sgv0.outputs, sgv1.outputs, mode, cannot_modify=cannot_modify)
return (sgv0, sgv1)
|
def _reroute_sgv(sgv0, sgv1, mode):
'Re-route both the inputs and the outputs of the two subgraph views.\n\n This involves swapping all the inputs/outputs of the two subgraph views.\n\n Args:\n sgv0: the first subgraph to be swapped. This argument is converted to a\n subgraph using the same rules than the function subgraph.make_view.\n sgv1: the second subgraph to be swapped. This argument is converted to a\n subgraph using the same rules than the function subgraph.make_view.\n mode: reroute mode, see _reroute_ts(...).\n Returns:\n A tuple `(sgv0, sgv1)` of subgraph views with their outputs and inputs\n swapped.\n Note that the function argument sgv0 and sgv1 are also modified in place.\n Raises:\n StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
_reroute_sgv_outputs(sgv0, sgv1, mode)
_reroute_sgv_inputs(sgv0, sgv1, mode)
return (sgv0, sgv1)
|
def swap_inputs(sgv0, sgv1):
'Swap all the inputs of sgv0 and sgv1 (see reroute_inputs).'
return _reroute_sgv_inputs(sgv0, sgv1, _RerouteMode.swap)
|
def reroute_inputs(sgv0, sgv1):
'Re-route all the inputs of two subgraphs.\n\n Args:\n sgv0: the first subgraph to have its inputs swapped. This argument is\n converted to a subgraph using the same rules than the function\n subgraph.make_view.\n sgv1: the second subgraph to have its inputs swapped. This argument is\n converted to a subgraph using the same rules than the function\n subgraph.make_view.\n Returns:\n A tuple `(sgv0, sgv1)` of subgraph views with their inputs swapped.\n Note that the function argument sgv0 and sgv1 are also modified in place.\n Raises:\n StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
return _reroute_sgv_inputs(sgv0, sgv1, _RerouteMode.a2b)
|
def swap_outputs(sgv0, sgv1):
'Swap all the outputs of sgv0 and sgv1 (see reroute_outputs).'
return _reroute_sgv_outputs(sgv0, sgv1, _RerouteMode.swap)
|
def reroute_outputs(sgv0, sgv1):
'Re-route all the outputs of two operations.\n\n Args:\n sgv0: the first subgraph to have its outputs swapped. This argument is\n converted to a subgraph using the same rules than the function\n subgraph.make_view.\n sgv1: the second subgraph to have its outputs swapped. This argument is\n converted to a subgraph using the same rules than the function\n subgraph.make_view.\n Returns:\n A tuple `(sgv0, sgv1)` of subgraph views with their outputs swapped.\n Note that the function argument sgv0 and sgv1 are also modified in place.\n Raises:\n StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
return _reroute_sgv_outputs(sgv0, sgv1, _RerouteMode.a2b)
|
def swap_ios(sgv0, sgv1):
'Swap the inputs and outputs of sgv1 to sgv0 (see _reroute_sgv).'
return _reroute_sgv(sgv0, sgv1, _RerouteMode.swap)
|
def reroute_ios(sgv0, sgv1):
'Re-route the inputs and outputs of sgv0 to sgv1 (see _reroute_sgv).'
return _reroute_sgv(sgv0, sgv1, _RerouteMode.a2b)
|
def remove_control_inputs(op, cops):
'Remove the control inputs cops from co.\n\n Warning: this function is directly manipulating the internals of the\n `tf.Graph`.\n\n Args:\n op: a `tf.Operation` from which to remove the control inputs.\n cops: an object convertible to a list of `tf.Operation`.\n Raises:\n TypeError: if op is not a `tf.Operation`.\n ValueError: if any cop in cops is not a control input of op.\n '
if (not isinstance(op, _tf_ops.Operation)):
raise TypeError('Expected a tf.Operation, got: {}', type(op))
cops = _util.make_list_of_op(cops, allow_graph=False)
for cop in cops:
if (cop not in op.control_inputs):
raise ValueError('{} is not a control_input of {}'.format(op.name, cop.name))
control_inputs = [cop for cop in op.control_inputs if (cop not in cops)]
op._remove_all_control_inputs()
op._add_control_inputs(control_inputs)
|
def add_control_inputs(op, cops):
'Add the control inputs cops to op.\n\n Warning: this function is directly manipulating the internals of the tf.Graph.\n\n Args:\n op: a tf.Operation to which the control inputs are added.\n cops: an object convertible to a list of `tf.Operation`.\n Raises:\n TypeError: if op is not a tf.Operation\n ValueError: if any cop in cops is already a control input of op.\n '
if (not isinstance(op, _tf_ops.Operation)):
raise TypeError('Expected a tf.Operation, got: {}', type(op))
cops = _util.make_list_of_op(cops, allow_graph=False)
for cop in cops:
if (cop in op.control_inputs):
raise ValueError('{} is already a control_input of {}'.format(cop.name, op.name))
op._add_control_inputs(cops)
|
def can_be_regex(obj):
'Return True if obj can be turned into a regular expression.'
return isinstance(obj, (string_types + (_RE_TYPE,)))
|
def make_regex(obj):
'Return a compiled regular expression.\n\n Args:\n obj: a string or a regular expression.\n Returns:\n A compiled regular expression.\n Raises:\n ValueError: if obj could not be converted to a regular expression.\n '
if (not can_be_regex(obj)):
raise ValueError('Expected a string or a regex, got: {}'.format(type(obj)))
if isinstance(obj, string_types):
return re.compile(obj)
else:
return obj
|
def _get_input_ts(ops):
'Compute the list of unique input tensors of all the op in ops.\n\n Args:\n ops: an object convertible to a list of `tf.Operation`.\n Returns:\n The list of unique input tensors of all the op in ops.\n Raises:\n TypeError: if ops cannot be converted to a list of `tf.Operation`.\n '
ops = util.make_list_of_op(ops)
ts = []
ts_set = set()
for op in ops:
for t in op.inputs:
if (t not in ts_set):
ts.append(t)
ts_set.add(t)
return ts
|
def _get_output_ts(ops):
'Compute the list of unique output tensors of all the op in ops.\n\n Args:\n ops: an object convertible to a list of tf.Operation.\n Returns:\n The list of unique output tensors of all the op in ops.\n Raises:\n TypeError: if ops cannot be converted to a list of tf.Operation.\n '
ops = util.make_list_of_op(ops)
ts = []
for op in ops:
ts += op.outputs
return ts
|
def filter_ts(ops, positive_filter):
'Get all the tensors which are input or output of an op in ops.\n\n Args:\n ops: an object convertible to a list of `tf.Operation`.\n positive_filter: a function deciding whether to keep a tensor or not.\n If `True`, all the tensors are returned.\n Returns:\n A list of `tf.Tensor`.\n Raises:\n TypeError: if ops cannot be converted to a list of `tf.Operation`.\n '
ops = util.make_list_of_op(ops)
ts = _get_input_ts(ops)
util.concatenate_unique(ts, _get_output_ts(ops))
if (positive_filter is not True):
ts = [t for t in ts if positive_filter(t)]
return ts
|
def filter_ts_from_regex(ops, regex):
'Get all the tensors linked to ops that match the given regex.\n\n Args:\n ops: an object convertible to a list of tf.Operation.\n regex: a regular expression matching the tensors\' name.\n For example, "^foo(/.*)?:\\d+$" will match all the tensors in the "foo"\n scope.\n Returns:\n A list of tf.Tensor.\n Raises:\n TypeError: if ops cannot be converted to a list of tf.Operation.\n '
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ts(ops, positive_filter=(lambda op: regex_obj.search(op.name)))
|
def filter_ops(ops, positive_filter):
'Get the ops passing the given filter.\n\n Args:\n ops: an object convertible to a list of tf.Operation.\n positive_filter: a function deciding where to keep an operation or not.\n If True, all the operations are returned.\n Returns:\n A list of selected tf.Operation.\n Raises:\n TypeError: if ops cannot be converted to a list of tf.Operation.\n '
ops = util.make_list_of_op(ops)
if (positive_filter is not True):
ops = [op for op in ops if positive_filter(op)]
return ops
|
def filter_ops_from_regex(ops, regex):
'Get all the operations that match the given regex.\n\n Args:\n ops: an object convertible to a list of `tf.Operation`.\n regex: a regular expression matching the operation\'s name.\n For example, `"^foo(/.*)?$"` will match all the operations in the "foo"\n scope.\n Returns:\n A list of `tf.Operation`.\n Raises:\n TypeError: if ops cannot be converted to a list of `tf.Operation`.\n '
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ops(ops, (lambda op: regex_obj.search(op.name)))
|
def get_name_scope_ops(ops, scope):
'Get all the operations under the given scope path.\n\n Args:\n ops: an object convertible to a list of tf.Operation.\n scope: a scope path.\n Returns:\n A list of tf.Operation.\n Raises:\n TypeError: if ops cannot be converted to a list of tf.Operation.\n '
if (scope and (scope[(- 1)] == '/')):
scope = scope[:(- 1)]
return filter_ops_from_regex(ops, '^{}(/.*)?$'.format(scope))
|
def check_cios(control_inputs=False, control_outputs=None, control_ios=None):
'Do various check on control_inputs and control_outputs.\n\n Args:\n control_inputs: A boolean indicating whether control inputs are enabled.\n control_outputs: An instance of util.ControlOutputs or None. If not None,\n control outputs are enabled.\n control_ios: An instance of util.ControlOutputs or None. If not None, both\n control inputs and control outputs are enabled. This is equivalent to set\n control_inputs to True and control_outputs to the util.ControlOutputs\n instance.\n Returns:\n A tuple `(control_inputs, control_outputs)` where:\n `control_inputs` is a boolean indicating whether to use control inputs.\n `control_outputs` is an instance of util.ControlOutputs or None\n Raises:\n ValueError: if control_inputs is an instance of util.ControlOutputs but\n control_outputs is not None\n TypeError: if control_outputs is not None and is not a util.ControlOutputs.\n '
if (control_ios is not None):
if (not isinstance(control_ios, util.ControlOutputs)):
raise TypeError('Expected a util.ControlOutputs, got: {}'.format(type(control_ios)))
if (control_outputs is not None):
raise ValueError('control_outputs should be None when using control_ios.')
control_inputs = True
control_outputs = control_ios
elif (control_outputs is not None):
if (not isinstance(control_outputs, util.ControlOutputs)):
raise TypeError('Expected a util.ControlOutputs, got: {}'.format(type(control_outputs)))
if (control_outputs is not None):
control_outputs.update()
return (control_inputs, control_outputs)
|
def get_ops_ios(ops, control_inputs=False, control_outputs=None, control_ios=None):
'Return all the `tf.Operation` which are connected to an op in ops.\n\n Args:\n ops: an object convertible to a list of `tf.Operation`.\n control_inputs: A boolean indicating whether control inputs are enabled.\n control_outputs: An instance of `util.ControlOutputs` or `None`. If not\n `None`, control outputs are enabled.\n control_ios: An instance of `util.ControlOutputs` or `None`. If not `None`,\n both control inputs and control outputs are enabled. This is equivalent to\n set `control_inputs` to `True` and `control_outputs` to the\n `util.ControlOutputs` instance.\n Returns:\n All the `tf.Operation` surrounding the given ops.\n Raises:\n TypeError: if `ops` cannot be converted to a list of `tf.Operation`.\n '
(control_inputs, control_outputs) = check_cios(control_inputs, control_outputs, control_ios)
ops = util.make_list_of_op(ops)
res = []
for op in ops:
util.concatenate_unique(res, [t.op for t in op.inputs])
for t in op.outputs:
util.concatenate_unique(res, t.consumers())
if (control_outputs is not None):
util.concatenate_unique(res, control_outputs.get(op))
if control_inputs:
util.concatenate_unique(res, op.control_inputs)
return res
|
def compute_boundary_ts(ops):
'Compute the tensors at the boundary of a set of ops.\n\n This function looks at all the tensors connected to the given ops (in/out)\n and classify them into three categories:\n 1) input tensors: tensors whose generating operation is not in ops.\n 2) output tensors: tensors whose consumer operations are not in ops\n 3) inside tensors: tensors which are neither input nor output tensors.\n\n Note that a tensor can be both an inside tensor and an output tensor if it is\n consumed by operations both outside and inside of `ops`.\n\n Args:\n ops: an object convertible to a list of tf.Operation.\n Returns:\n A tuple `(outside_input_ts, outside_output_ts, inside_ts)` where:\n `outside_input_ts` is a Python list of input tensors;\n `outside_output_ts` is a python list of output tensors;\n `inside_ts` is a python list of inside tensors.\n Since a tensor can be both an inside tensor and an output tensor,\n `outside_output_ts` and `inside_ts` might intersect.\n Raises:\n TypeError: if ops cannot be converted to a list of tf.Operation.\n '
ops = util.make_list_of_op(ops)
input_ts = _get_input_ts(ops)
output_ts = _get_output_ts(ops)
output_ts_set = frozenset(output_ts)
ops_set = frozenset(ops)
inside_ts = []
only_inside_ts = []
for t in input_ts:
if (t not in output_ts_set):
continue
inside_ts.append(t)
consumers = frozenset(t.consumers())
if (consumers - ops_set):
continue
only_inside_ts.append(t)
inside_ts_set = frozenset(inside_ts)
only_inside_ts_set = frozenset(only_inside_ts)
outside_output_ts = [t for t in output_ts if (t not in only_inside_ts_set)]
outside_input_ts = [t for t in input_ts if (t not in inside_ts_set)]
return (outside_input_ts, outside_output_ts, inside_ts)
|
def get_within_boundary_ops(ops, seed_ops, boundary_ops=(), inclusive=True, control_inputs=False, control_outputs=None, control_ios=None):
'Return all the `tf.Operation` within the given boundary.\n\n Args:\n ops: an object convertible to a list of `tf.Operation`. those ops define the\n set in which to perform the operation (if a `tf.Graph` is given, it\n will be converted to the list of all its operations).\n seed_ops: the operations from which to start expanding.\n boundary_ops: the ops forming the boundary.\n inclusive: if `True`, the result will also include the boundary ops.\n control_inputs: A boolean indicating whether control inputs are enabled.\n control_outputs: An instance of `util.ControlOutputs` or `None`. If not\n `None`, control outputs are enabled.\n control_ios: An instance of `util.ControlOutputs` or `None`. If not\n `None`, both control inputs and control outputs are enabled. This is\n equivalent to set control_inputs to True and control_outputs to\n the `util.ControlOutputs` instance.\n Returns:\n All the `tf.Operation` surrounding the given ops.\n Raises:\n TypeError: if `ops` or `seed_ops` cannot be converted to a list of\n `tf.Operation`.\n ValueError: if the boundary is intersecting with the seeds.\n '
(control_inputs, control_outputs) = check_cios(control_inputs, control_outputs, control_ios)
ops = util.make_list_of_op(ops)
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
boundary_ops = set(util.make_list_of_op(boundary_ops))
res = set(seed_ops)
if (boundary_ops & res):
raise ValueError('Boundary is intersecting with the seeds.')
wave = set(seed_ops)
while wave:
new_wave = set()
ops_io = get_ops_ios(wave, control_inputs, control_outputs)
for op in ops_io:
if (op in res):
continue
if (op in boundary_ops):
if inclusive:
res.add(op)
else:
new_wave.add(op)
res.update(new_wave)
wave = new_wave
return [op for op in ops if (op in res)]
|
def get_forward_walk_ops(seed_ops, inclusive=True, within_ops=None, within_ops_fn=None, stop_at_ts=(), control_outputs=None):
'Do a forward graph walk and return all the visited ops.\n\n Args:\n seed_ops: an iterable of operations from which the forward graph\n walk starts. If a list of tensors is given instead, the seed_ops are set\n to be the consumers of those tensors.\n inclusive: if True the given seed_ops are also part of the resulting set.\n within_ops: an iterable of `tf.Operation` within which the search is\n restricted. If `within_ops` is `None`, the search is performed within\n the whole graph.\n within_ops_fn: if provided, a function on ops that should return True iff\n the op is within the graph traversal. This can be used along within_ops,\n in which case an op is within if it is also in within_ops.\n stop_at_ts: an iterable of tensors at which the graph walk stops.\n control_outputs: a `util.ControlOutputs` instance or None.\n If not `None`, it will be used while walking the graph forward.\n Returns:\n A Python set of all the `tf.Operation` ahead of `seed_ops`.\n Raises:\n TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of\n `tf.Operation`.\n '
(_, control_outputs) = check_cios(False, control_outputs)
if (not util.is_iterable(seed_ops)):
seed_ops = [seed_ops]
if (not seed_ops):
return []
if isinstance(seed_ops[0], tf_ops.Tensor):
ts = util.make_list_of_t(seed_ops, allow_graph=False)
seed_ops = util.get_consuming_ops(ts)
else:
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
seed_ops = frozenset(seed_ops)
stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts))
if within_ops:
within_ops = util.make_list_of_op(within_ops, allow_graph=False)
within_ops = frozenset(within_ops)
seed_ops &= within_ops
def is_within(op):
'\n :param tf.Operation op:\n :rtype: bool\n '
return (((within_ops is None) or (op in within_ops)) and ((within_ops_fn is None) or within_ops_fn(op)))
result = list(seed_ops)
wave = set(seed_ops)
while wave:
new_wave = set()
for op in wave:
for new_t in op.outputs:
if (new_t in stop_at_ts):
continue
for new_op in new_t.consumers():
if ((new_op not in result) and is_within(new_op)):
new_wave.add(new_op)
if (control_outputs is not None):
for new_op in control_outputs.get(op):
if ((new_op not in result) and is_within(new_op)):
new_wave.add(new_op)
util.concatenate_unique(result, new_wave)
wave = new_wave
if (not inclusive):
result = [op for op in result if (op not in seed_ops)]
return result
|
def get_backward_walk_ops(seed_ops, inclusive=True, within_ops=None, within_ops_fn=None, stop_at_ts=(), control_inputs=False):
'Do a backward graph walk and return all the visited ops.\n\n Args:\n seed_ops: an iterable of operations from which the backward graph\n walk starts. If a list of tensors is given instead, the seed_ops are set\n to be the generators of those tensors.\n inclusive: if True the given seed_ops are also part of the resulting set.\n within_ops: an iterable of `tf.Operation` within which the search is\n restricted. If `within_ops` is `None`, the search is performed within\n the whole graph.\n within_ops_fn: if provided, a function on ops that should return True iff\n the op is within the graph traversal. This can be used along within_ops,\n in which case an op is within if it is also in within_ops.\n stop_at_ts: an iterable of tensors at which the graph walk stops.\n control_inputs: if True, control inputs will be used while moving backward.\n Returns:\n A Python set of all the `tf.Operation` behind `seed_ops`.\n Raises:\n TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of\n `tf.Operation`.\n '
try:
from tensorflow.contrib.graph_editor import select as op_selector
except ImportError:
from tensorflow.python.ops import op_selector
return op_selector.get_backward_walk_ops(seed_ops, inclusive=inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, stop_at_ts=stop_at_ts, control_inputs=control_inputs)
|
def get_walks_intersection_ops(forward_seed_ops, backward_seed_ops, forward_inclusive=True, backward_inclusive=True, within_ops=None, within_ops_fn=None, control_inputs=False, control_outputs=None, control_ios=None):
'Return the intersection of a forward and a backward walk.\n\n Args:\n forward_seed_ops: an iterable of operations from which the forward graph\n walk starts. If a list of tensors is given instead, the seed_ops are set\n to be the consumers of those tensors.\n backward_seed_ops: an iterable of operations from which the backward graph\n walk starts. If a list of tensors is given instead, the seed_ops are set\n to be the generators of those tensors.\n forward_inclusive: if True the given forward_seed_ops are also part of the\n resulting set.\n backward_inclusive: if True the given backward_seed_ops are also part of the\n resulting set.\n within_ops: an iterable of tf.Operation within which the search is\n restricted. If within_ops is None, the search is performed within\n the whole graph.\n within_ops_fn: if provided, a function on ops that should return True iff\n the op is within the graph traversal. This can be used along within_ops,\n in which case an op is within if it is also in within_ops.\n control_inputs: A boolean indicating whether control inputs are enabled.\n control_outputs: An instance of util.ControlOutputs or None. If not None,\n control outputs are enabled.\n control_ios: An instance of util.ControlOutputs or None. If not None, both\n control inputs and control outputs are enabled. This is equivalent to set\n control_inputs to True and control_outputs to the util.ControlOutputs\n instance.\n Returns:\n A Python set of all the tf.Operation in the intersection of a forward and a\n backward walk.\n Raises:\n TypeError: if `forward_seed_ops` or `backward_seed_ops` or `within_ops`\n cannot be converted to a list of `tf.Operation`.\n '
(control_inputs, control_outputs) = check_cios(control_inputs, control_outputs, control_ios)
forward_ops = get_forward_walk_ops(forward_seed_ops, inclusive=forward_inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(backward_seed_ops, inclusive=backward_inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, control_inputs=control_inputs)
return [op for op in forward_ops if (op in backward_ops)]
|
def get_walks_union_ops(forward_seed_ops, backward_seed_ops, forward_inclusive=True, backward_inclusive=True, within_ops=None, within_ops_fn=None, control_inputs=False, control_outputs=None, control_ios=None):
'Return the union of a forward and a backward walk.\n\n Args:\n forward_seed_ops: an iterable of operations from which the forward graph\n walk starts. If a list of tensors is given instead, the seed_ops are set\n to be the consumers of those tensors.\n backward_seed_ops: an iterable of operations from which the backward graph\n walk starts. If a list of tensors is given instead, the seed_ops are set\n to be the generators of those tensors.\n forward_inclusive: if True the given forward_seed_ops are also part of the\n resulting set.\n backward_inclusive: if True the given backward_seed_ops are also part of the\n resulting set.\n within_ops: restrict the search within those operations. If within_ops is\n None, the search is done within the whole graph.\n within_ops_fn: if provided, a function on ops that should return True iff\n the op is within the graph traversal. This can be used along within_ops,\n in which case an op is within if it is also in within_ops.\n control_inputs: A boolean indicating whether control inputs are enabled.\n control_outputs: An instance of util.ControlOutputs or None. If not None,\n control outputs are enabled.\n control_ios: An instance of util.ControlOutputs or None. If not None, both\n control inputs and control outputs are enabled. This is equivalent to set\n control_inputs to True and control_outputs to the util.ControlOutputs\n instance.\n Returns:\n A Python set of all the tf.Operation in the union of a forward and a\n backward walk.\n Raises:\n TypeError: if forward_seed_ops or backward_seed_ops or within_ops cannot be\n converted to a list of tf.Operation.\n '
(control_inputs, control_outputs) = check_cios(control_inputs, control_outputs, control_ios)
forward_ops = get_forward_walk_ops(forward_seed_ops, inclusive=forward_inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(backward_seed_ops, inclusive=backward_inclusive, within_ops=within_ops, within_ops_fn=within_ops_fn, control_inputs=control_inputs)
return util.concatenate_unique(forward_ops, backward_ops)
|
def select_ops(*args, **kwargs):
'Helper to select operations.\n\n Args:\n *args: list of 1) regular expressions (compiled or not) or 2) (array of)\n `tf.Operation`. `tf.Tensor` instances are silently ignored.\n **kwargs: \'graph\': `tf.Graph` in which to perform the regex query.This is\n required when using regex.\n \'positive_filter\': an elem if selected only if `positive_filter(elem)` is\n `True`. This is optional.\n \'restrict_ops_regex\': a regular expression is ignored if it doesn\'t start\n with the substring "(?#ops)".\n Returns:\n A list of `tf.Operation`.\n Raises:\n TypeError: if the optional keyword argument graph is not a `tf.Graph`\n or if an argument in args is not an (array of) `tf.Operation`\n or an (array of) `tf.Tensor` (silently ignored) or a string\n or a regular expression.\n ValueError: if one of the keyword arguments is unexpected or if a regular\n expression is used without passing a graph as a keyword argument.\n '
graph = None
positive_filter = None
restrict_ops_regex = False
for (k, v) in iteritems(kwargs):
if (k == 'graph'):
graph = v
if ((graph is not None) and (not isinstance(graph, tf_ops.Graph))):
raise TypeError('Expected a tf.Graph, got: {}'.format(type(graph)))
elif (k == 'positive_filter'):
positive_filter = v
elif (k == 'restrict_ops_regex'):
restrict_ops_regex = v
elif (k == 'restrict_ts_regex'):
pass
else:
raise ValueError('Wrong keywords argument: {}.'.format(k))
ops = []
for arg in args:
if can_be_regex(arg):
if (graph is None):
raise ValueError("Use the keyword argument 'graph' to use regex.")
regex = make_regex(arg)
if regex.pattern.startswith('(?#ts)'):
continue
if (restrict_ops_regex and (not regex.pattern.startswith('(?#ops)'))):
continue
ops_ = filter_ops_from_regex(graph, regex)
for op_ in ops_:
if (op_ not in ops):
if ((positive_filter is None) or positive_filter(op_)):
ops.append(op_)
else:
ops_aux = util.make_list_of_op(arg, ignore_ts=True)
if (positive_filter is not None):
ops_aux = [op for op in ops_aux if positive_filter(op)]
ops_aux = [op for op in ops_aux if (op not in ops)]
ops += ops_aux
return ops
|
def select_ts(*args, **kwargs):
'Helper to select tensors.\n\n Args:\n *args: list of 1) regular expressions (compiled or not) or 2) (array of)\n `tf.Tensor`. `tf.Operation` instances are silently ignored.\n **kwargs: \'graph\': `tf.Graph` in which to perform the regex query.This is\n required when using regex.\n \'positive_filter\': an elem if selected only if `positive_filter(elem)` is\n `True`. This is optional.\n \'restrict_ts_regex\': a regular expression is ignored if it doesn\'t start\n with the substring "(?#ts)".\n Returns:\n A list of `tf.Tensor`.\n Raises:\n TypeError: if the optional keyword argument graph is not a `tf.Graph`\n or if an argument in args is not an (array of) `tf.Tensor`\n or an (array of) `tf.Operation` (silently ignored) or a string\n or a regular expression.\n ValueError: if one of the keyword arguments is unexpected or if a regular\n expression is used without passing a graph as a keyword argument.\n '
graph = None
positive_filter = None
restrict_ts_regex = False
for (k, v) in iteritems(kwargs):
if (k == 'graph'):
graph = v
if ((graph is not None) and (not isinstance(graph, tf_ops.Graph))):
raise TypeError('Expected a tf.Graph, got {}'.format(type(graph)))
elif (k == 'positive_filter'):
positive_filter = v
elif (k == 'restrict_ts_regex'):
restrict_ts_regex = v
elif (k == 'restrict_ops_regex'):
pass
else:
raise ValueError('Wrong keywords argument: {}.'.format(k))
ts = []
for arg in args:
if can_be_regex(arg):
if (graph is None):
raise ValueError("Use the keyword argument 'graph' to use regex.")
regex = make_regex(arg)
if regex.pattern.startswith('(?#ops)'):
continue
if (restrict_ts_regex and (not regex.pattern.startswith('(?#ts)'))):
continue
ts_ = filter_ts_from_regex(graph, regex)
for t_ in ts_:
if (t_ not in ts):
if ((positive_filter is None) or positive_filter(t_)):
ts.append(t_)
else:
ts_aux = util.make_list_of_t(arg, ignore_ops=True)
if (positive_filter is not None):
ts_aux = [t for t in ts_aux if positive_filter(t)]
ts_aux = [t for t in ts_aux if (t not in ts)]
ts += ts_aux
return ts
|
def select_ops_and_ts(*args, **kwargs):
'Helper to select operations and tensors.\n\n Args:\n *args: list of 1) regular expressions (compiled or not) or 2) (array of)\n `tf.Operation` 3) (array of) tf.Tensor. Regular expressions matching\n tensors must start with the comment `"(?#ts)"`, for instance:\n `"(?#ts)^foo/.*"`.\n **kwargs: \'graph\': `tf.Graph` in which to perform the regex query.This is\n required when using regex.\n \'positive_filter\': an elem if selected only if `positive_filter(elem)` is\n `True`. This is optional.\n Returns:\n A tuple `(ops, ts)` where:\n `ops` is a list of `tf.Operation`, and\n `ts` is a list of `tf.Tensor`\n Raises:\n TypeError: if the optional keyword argument graph is not a `tf.Graph`\n or if an argument in args is not an (array of) `tf.Tensor`\n or an (array of) `tf.Operation` or a string or a regular expression.\n ValueError: if one of the keyword arguments is unexpected or if a regular\n expression is used without passing a graph as a keyword argument.\n '
ops = select_ops(*args, restrict_ops_regex=False, **kwargs)
ts = select_ts(*args, restrict_ts_regex=True, **kwargs)
return (ops, ts)
|
def _finalize_index(index_or_t, ts):
'Returns index as is or return index of tensor in `ts`.'
if isinstance(index_or_t, six.integer_types):
return index_or_t
else:
return ts.index(index_or_t)
|
def _finalize_indices(list_of_index_or_t, ts):
"Returns index in `indices` as is or replace with tensor's index."
return [_finalize_index(index_or_t, ts) for index_or_t in list_of_index_or_t]
|
def _check_within_range(mapping, n, repetition):
'Check is the mapping is valid.\n\n Args:\n mapping: an iterable of integer.\n n: define the input domain as [0, n-1]. Note that the mapping can be\n under-complete, that is, it can only contain a subset of the integers on\n [0, n-1].\n repetition: if True repetition are allowed (the function is surjective)\n otherwise repetition are not allowed (the function is injective).\n Raises:\n ValueError: if the mapping is out of range ot if repetition is False and\n the mapping has some repetition.\n '
for i in mapping:
if (not (0 <= i < n)):
raise ValueError('Out of [0, {}[ range: {}'.format(n, i))
if ((not repetition) and (len(set(mapping)) != len(mapping))):
raise ValueError('Found repetition in mapping: {}'.format(mapping))
|
class SubGraphView(object):
'A subgraph view on an existing `tf.Graph`.\n\n An instance of this class is a subgraph view on an existing `tf.Graph`.\n "subgraph" means that it can represent part of the whole `tf.Graph`.\n "view" means that it only provides a passive observation and do not to act\n on the `tf.Graph`. Note that in this documentation, the term "subgraph" is\n often used as substitute to "subgraph view".\n\n A subgraph contains:\n\n * a list of input tensors, accessible via the `inputs` property.\n * a list of output tensors, accessible via the `outputs` property.\n * and the operations in between, accessible via the "ops" property.\n\n An subgraph can be seen as a function F(i0, i1, ...) -> o0, o1, ... It is a\n function which takes as input some input tensors and returns as output some\n output tensors. The computation that the function performs is encoded in the\n operations of the subgraph.\n\n The tensors (input or output) can be of two kinds:\n\n - connected: a connected tensor connects to at least one operation contained\n in the subgraph. One example is a subgraph representing a single operation\n and its inputs and outputs: all the input and output tensors of the op\n are "connected".\n - passthrough: a passthrough tensor does not connect to any operation\n contained in the subgraph. One example is a subgraph representing a\n single tensor: this tensor is passthrough. By default a passthrough tensor is\n present both in the input and output tensors of the subgraph. It can however\n be remapped to only appear as an input (or output) only.\n\n The input and output tensors can be remapped. For instance, some input tensor\n can be omitted. For instance, a subgraph representing an operation with two\n inputs can be remapped to only take one input. Note that this does not change\n at all the underlying `tf.Graph` (remember, it is a view). It means that\n the other input is being ignored, or is being treated as "given".\n The analogy with functions can be extended like this: F(x,y) is the original\n function. Remapping the inputs from [x, y] to just [x] means that the subgraph\n now represent the function F_y(x) (y is "given").\n\n The output tensors can also be remapped. For instance, some output tensor can\n be omitted. Other output tensor can be duplicated as well. As mentioned\n before, this does not change at all the underlying `tf.Graph`.\n The analogy with functions can be extended like this: F(...)->x,y is the\n original function. Remapping the outputs from [x, y] to just [y,y] means that\n the subgraph now represent the function M(F(...)) where M is the function\n M(a,b)->b,b.\n\n It is useful to describe three other kind of tensors:\n\n * internal: an internal tensor is a tensor connecting operations contained\n in the subgraph. One example in the subgraph representing the two\n operations A and B connected sequentially: -> A -> B ->. The middle arrow\n is an internal tensor.\n * actual input: an input tensor of the subgraph, regardless of whether it is\n listed in "inputs" or not (masked-out).\n * actual output: an output tensor of the subgraph, regardless of whether it is\n listed in "outputs" or not (masked-out).\n * hidden input: an actual input which has been masked-out using an\n input remapping. In other word, a hidden input is a non-internal tensor\n not listed as a input tensor and one of whose consumers belongs to\n the subgraph.\n * hidden output: a actual output which has been masked-out using an output\n remapping. In other word, a hidden output is a non-internal tensor\n not listed as an output and one of whose generating operations belongs to\n the subgraph.\n\n Here are some useful guarantees about an instance of a SubGraphView:\n\n * the input (or output) tensors are not internal.\n * the input (or output) tensors are either "connected" or "passthrough".\n * the passthrough tensors are not connected to any of the operation of\n the subgraph.\n\n Note that there is no guarantee that an operation in a subgraph contributes\n at all to its inputs or outputs. For instance, remapping both the inputs and\n outputs to empty lists will produce a subgraph which still contains all the\n original operations. However, the remove_unused_ops function can be used to\n make a new subgraph view whose operations are connected to at least one of\n the input or output tensors.\n\n An instance of this class is meant to be a lightweight object which is not\n modified in-place by the user. Rather, the user can create new modified\n instances of a given subgraph. In that sense, the class SubGraphView is meant\n to be used like an immutable python object.\n\n A common problem when using views is that they can get out-of-sync with the\n data they observe (in this case, a `tf.Graph`). This is up to the user to\n ensure that this doesn\'t happen. To keep on the safe side, it is recommended\n that the life time of subgraph views are kept very short. One way to achieve\n this is to use subgraphs within a "with make_sgv(...) as sgv:" Python context.\n\n To alleviate the out-of-sync problem, some functions are granted the right to\n modified subgraph in place. This is typically the case of graph manipulation\n functions which, given some subgraphs as arguments, can modify the underlying\n `tf.Graph`. Since this modification is likely to render the subgraph view\n invalid, those functions can modify the argument in place to reflect the\n change. For instance, calling the function swap_inputs(svg0, svg1) will modify\n svg0 and svg1 in place to reflect the fact that their inputs have now being\n swapped.\n '
def __init__(self, inside_ops=(), passthrough_ts=()):
'Create a subgraph containing the given ops and the "passthrough" tensors.\n\n Args:\n inside_ops: an object convertible to a list of `tf.Operation`. This list\n defines all the operations in the subgraph.\n passthrough_ts: an object convertible to a list of `tf.Tensor`. This list\n define all the "passthrough" tensors. A passthrough tensor is a tensor\n which goes directly from the input of the subgraph to it output, without\n any intermediate operations. All the non passthrough tensors are\n silently ignored.\n Raises:\n TypeError: if inside_ops cannot be converted to a list of `tf.Operation`\n or if `passthrough_ts` cannot be converted to a list of `tf.Tensor`.\n '
inside_ops = util.make_list_of_op(inside_ops)
passthrough_ts = util.make_list_of_t(passthrough_ts)
ops_and_ts = (inside_ops + passthrough_ts)
if ops_and_ts:
self._graph = util.get_unique_graph(ops_and_ts)
self._ops = inside_ops
(inputs, outputs, insides) = select.compute_boundary_ts(inside_ops)
all_tensors = frozenset(((inputs + outputs) + list(insides)))
self._passthrough_ts = [t for t in passthrough_ts if (t not in all_tensors)]
self._input_ts = (inputs + self._passthrough_ts)
self._output_ts = (outputs + self._passthrough_ts)
else:
self._graph = None
self._passthrough_ts = []
self._input_ts = []
self._output_ts = []
self._ops = []
def __copy__(self):
'Create a copy of this subgraph.\n\n Note that this class is a "view", copying it only create another view and\n does not copy the underlying part of the `tf.Graph`.\n\n Returns:\n A new identical instance of the original subgraph view.\n '
cls = self.__class__
result = cls.__new__(cls)
for (k, v) in iteritems(self.__dict__):
if (k == '_graph'):
setattr(result, k, v)
else:
setattr(result, k, list(v))
return result
def _assign_from(self, other):
'Assign other to itself.\n\n Args:\n other: another subgraph-view.\n Returns:\n A new instance identical to the original one.\n Raises:\n TypeError: if other is not an SubGraphView.\n '
if (not isinstance(other, SubGraphView)):
raise TypeError('Expected SubGraphView, got: {}'.format(type(other)))
self._graph = other._graph
self._ops = list(other._ops)
self._passthrough_ts = list(other._passthrough_ts)
self._input_ts = list(other._input_ts)
self._output_ts = list(other._output_ts)
def copy(self):
'Return a copy of itself.\n\n Note that this class is a "view", copying it only create another view and\n does not copy the underlying part of the tf.Graph.\n\n Returns:\n A new instance identical to the original one.\n '
return copy.copy(self)
def _remap_default(self, remove_input_map=True, remove_output_map=True):
'Remap in the place the inputs and/or outputs to the default mapping.\n\n Args:\n remove_input_map: if True the input map is reset to the default one.\n remove_output_map: if True the output map is reset to the default one.\n '
if ((not remove_input_map) and (not remove_output_map)):
return
(inputs, outputs, _) = select.compute_boundary_ts(self._ops)
if remove_input_map:
self._input_ts = (list(inputs) + self._passthrough_ts)
if remove_output_map:
self._output_ts = (list(outputs) + self._passthrough_ts)
def remap_default(self, remove_input_map=True, remove_output_map=True):
'Remap the inputs and/or outputs to the default mapping.\n\n Args:\n remove_input_map: if True the input map is reset to the default one.\n remove_output_map: if True the output map is reset to the default one.\n Returns:\n A new modified instance of the original subgraph view with its\n input and/or output mapping reset to the default one.\n '
res = self.copy()
res._remap_default(remove_input_map, remove_output_map)
return res
def _remap_inputs(self, new_input_indices):
'Remap the inputs of the subgraph in-place.'
new_input_indices = _finalize_indices(new_input_indices, self._input_ts)
_check_within_range(new_input_indices, len(self._input_ts), repetition=False)
self._input_ts = [self._input_ts[i] for i in new_input_indices]
def _remap_outputs(self, new_output_indices):
'Remap the outputs of the subgraph in-place.'
new_output_indices = _finalize_indices(new_output_indices, self._output_ts)
_check_within_range(new_output_indices, len(self._output_ts), repetition=True)
self._output_ts = [self._output_ts[i] for i in new_output_indices]
def _remap_outputs_make_unique(self):
'Remap the outputs in place so that all the tensors appears only once.'
output_ts = list(self._output_ts)
self._output_ts = []
util.concatenate_unique(self._output_ts, output_ts)
def _remap_outputs_to_consumers(self):
'Remap the outputs in place to match the number of consumers.'
self._remap_outputs_make_unique()
output_ts = list(self._output_ts)
self._output_ts = []
for t in output_ts:
self._output_ts += ([t] * len(t.consumers()))
def remap_outputs_make_unique(self):
'Remap the outputs so that all the tensors appears only once.'
res = copy.copy(self)
res._remap_outputs_make_unique()
return res
def remap_outputs_to_consumers(self):
'Remap the outputs to match the number of consumers.'
res = copy.copy(self)
res._remap_outputs_to_consumers()
return res
def _remove_unused_ops(self, control_inputs=True):
'Remove unused ops in place.\n\n Args:\n control_inputs: if True, control inputs are used to detect used ops.\n Returns:\n A new subgraph view which only contains used operations.\n '
ops = select.get_walks_union_ops(self.connected_inputs, self.connected_outputs, within_ops=self._ops, control_inputs=control_inputs)
self._ops = [op for op in self._ops if (op in ops)]
def remove_unused_ops(self, control_inputs=True):
'Remove unused ops.\n\n Args:\n control_inputs: if True, control inputs are used to detect used ops.\n Returns:\n A new subgraph view which only contains used operations.\n '
res = copy.copy(self)
res._remove_unused_ops(control_inputs)
return res
def remap_inputs(self, new_input_indices):
'Remap the inputs of the subgraph.\n\n If the inputs of the original subgraph are [t0, t1, t2], remapping to [2,0]\n will create a new instance whose inputs is [t2, t0].\n\n Note that this is only modifying the view: the underlying `tf.Graph` is not\n affected.\n\n Args:\n new_input_indices: an iterable of integers or tf.Tensors\n representing a mapping between the old inputs and the new ones.\n Integers must be positive and smaller than the number of old inputs.\n tf.Tensors must belong to the old list of inputs.\n This mapping can be under-complete and must be without repetitions.\n Returns:\n A new modified instance of the original subgraph view with remapped\n inputs.\n '
res = self.copy()
res._remap_inputs(new_input_indices)
return res
def remap_outputs(self, new_output_indices):
'Remap the output of the subgraph.\n\n If the output of the original subgraph are [t0, t1, t2], remapping to\n [1,1,0] will create a new instance whose outputs is [t1, t1, t0].\n\n Note that this is only modifying the view: the underlying tf.Graph is not\n affected.\n\n Args:\n new_output_indices: an iterable of integers or tf.Tensors\n representing a mapping between the old outputs and the new ones.\n Integers must be positive and smaller than the number of old outputs.\n tf.Tensors must belong to the old list of outputs.\n This mapping can be under-complete and can have repetitions.\n Returns:\n A new modified instance of the original subgraph view with remapped\n outputs.\n '
res = copy.copy(self)
res._remap_outputs(new_output_indices)
return res
def remap(self, new_input_indices=None, new_output_indices=None):
'Remap the inputs and outputs of the subgraph.\n\n Note that this is only modifying the view: the underlying tf.Graph is not\n affected.\n\n Args:\n new_input_indices: an iterable of integers or tf.Tensors\n representing a mapping between the old inputs and the new ones.\n Integers must be positive and smaller than the number of old inputs.\n tf.Tensors must belong to the old list of inputs.\n This mapping can be under-complete and must be without repetitions.\n new_output_indices: an iterable of integers or tf.Tensors\n representing a mapping between the old outputs and the new ones.\n Integers must be positive and smaller than the number of old outputs.\n tf.Tensors must belong to the old list of outputs.\n This mapping can be under-complete and can have repetitions.\n Returns:\n A new modified instance of the original subgraph view with remapped\n inputs and outputs.\n '
res = copy.copy(self)
if (new_input_indices is not None):
res._remap_inputs(new_input_indices)
if (new_output_indices is not None):
res._remap_outputs(new_output_indices)
return res
def find_op_by_name(self, op_name):
'Return the op named op_name.\n\n Args:\n op_name: the name to search for\n Returns:\n The op named op_name.\n Raises:\n ValueError: if the op_name could not be found.\n AssertionError: if the name was found multiple time.\n '
res = [op for op in self._ops if (op.name == op_name)]
if (not res):
raise ValueError('{} not in subgraph.'.format(op_name))
if (len(res) > 1):
raise AssertionError('More than 1 op named: {}!'.format(op_name))
return res[0]
def __str__(self):
if (not self):
return 'SubGraphView: empty'
def op_name(op):
return op.name
def tensor_name(t):
if (t in self._passthrough_ts):
return '{} *'.format(t.name)
else:
return t.name
def print_list(name, iterable, get_name):
if iterable:
print('** {}[{}]:'.format(name, len(iterable)), file=res)
print('\n'.join([' {}'.format(get_name(elem)) for elem in iterable]), file=res)
else:
print('** {}: empty'.format(name), file=res)
res = StringIO()
print('SubGraphView (graphid={}):'.format(id(self.graph)), file=res)
print_list('ops', self._ops, op_name)
print_list('inputs', self._input_ts, tensor_name)
print_list('outputs', self._output_ts, tensor_name)
return res.getvalue()
@property
def graph(self):
'The underlying `tf.Graph`.'
return self._graph
@property
def ops(self):
'The operations in this subgraph view.'
return self._ops
@property
def inputs(self):
'The input tensors of this subgraph view.'
return util.ListView(self._input_ts)
@property
def connected_inputs(self):
'The connected input tensors of this subgraph view.'
return [t for t in self._input_ts if (t not in self._passthrough_ts)]
@property
def outputs(self):
'The output tensors of this subgraph view.'
return util.ListView(self._output_ts)
@property
def connected_outputs(self):
'The connected output tensors of this subgraph view.'
return [t for t in self._output_ts if (t not in self._passthrough_ts)]
@property
def passthroughs(self):
'The passthrough tensors, going straight from input to output.'
return util.ListView(self._passthrough_ts)
def __bool__(self):
'Allows for implicit boolean conversion.'
return (self._graph is not None)
__nonzero__ = __bool__
def op(self, op_id):
'Get an op by its index.'
return self._ops[op_id]
def is_passthrough(self, t):
'Check whether a tensor is passthrough.'
return (t in self._passthrough_ts)
def __enter__(self):
'Allow Python context to minimize the life time of a subgraph view.\n\n A subgraph view is meant to be a lightweight and transient object. A short\n lifetime will alleviate the "out-of-sync" issue mentioned earlier. For that\n reason, a SubGraphView instance can be used within a Python context. For\n example:\n\n from tensorflow.contrib import graph_editor as ge\n with ge.make_sgv(...) as sgv:\n print(sgv)\n\n Returns:\n Itself.\n '
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def input_index(self, t):
'Find the input index corresponding to the given input tensor t.\n\n Args:\n t: the input tensor of this subgraph view.\n Returns:\n The index in the self.inputs list.\n Raises:\n Error: if t in not an input tensor.\n '
try:
subgraph_id = self._input_ts.index(t)
except ValueError:
raise ValueError("Can't find {} in inputs of subgraph {}.".format(t.name, self.name))
return subgraph_id
def output_index(self, t):
'Find the output index corresponding to given output tensor t.\n\n Args:\n t: the output tensor of this subgraph view.\n Returns:\n The index in the self.outputs list.\n Raises:\n Error: if t in not an output tensor.\n '
try:
subgraph_id = self._output_ts.index(t)
except ValueError:
raise ValueError("Can't find {} in outputs of subgraph {}.".format(t.name, self.name))
return subgraph_id
def consumers(self):
'Return a Python set of all the consumers of this subgraph view.\n\n A consumer of a subgraph view is a tf.Operation which is a consumer\n of one of the output tensors and is not in the subgraph.\n\n Returns:\n A list of `tf.Operation` which are the consumers of this subgraph view.\n '
ops_set = frozenset(self._ops)
res = []
for output in self._output_ts:
consumers = [op for op in output.consumers() if (op not in ops_set)]
util.concatenate_unique(res, consumers)
return res
|
def _check_graph(sgv, graph):
'Check if sgv belongs to the given graph.\n\n Args:\n sgv: a SubGraphView.\n graph: a graph or None.\n Returns:\n The SubGraphView sgv.\n Raises:\n TypeError: if sgv is not a SubGraphView or if graph is not None and not\n a tf.Graph.\n ValueError: if the graph of sgv and the given graph are not None and\n different.\n '
if (not isinstance(sgv, SubGraphView)):
raise TypeError('Expected a SubGraphView, got: {}'.format(type(graph)))
if ((graph is None) or (not sgv.graph)):
return sgv
if (not isinstance(graph, tf_ops.Graph)):
raise TypeError('Expected a tf.Graph, got: {}'.format(type(graph)))
if (sgv.graph is not graph):
raise ValueError('Graph mismatch.')
return sgv
|
def make_view(*args, **kwargs):
'Create a SubGraphView from selected operations and passthrough tensors.\n\n Args:\n *args: list of 1) regular expressions (compiled or not) or 2) (array of)\n `tf.Operation` 3) (array of) `tf.Tensor`. Those objects will be converted\n into a list of operations and a list of candidate for passthrough tensors.\n **kwargs: keyword graph is used 1) to check that the ops and ts are from\n the correct graph 2) for regular expression query\n Returns:\n A subgraph view.\n Raises:\n TypeError: if the optional keyword argument graph is not a `tf.Graph`\n or if an argument in args is not an (array of) `tf.Tensor`\n or an (array of) `tf.Operation` or a string or a regular expression.\n ValueError: if one of the keyword arguments is unexpected.\n '
graph = (kwargs['graph'] if ('graph' in kwargs) else None)
if ((len(args) == 1) and isinstance(args[0], SubGraphView)):
return _check_graph(args[0], graph)
(ops, ts) = select.select_ops_and_ts(*args, **kwargs)
sgv = SubGraphView(ops, ts)
return _check_graph(sgv, graph)
|
def make_view_from_scope(scope, graph):
'Make a subgraph from a name scope.\n\n Args:\n scope: the name of the scope.\n graph: the `tf.Graph`.\n Returns:\n A subgraph view representing the given scope.\n '
ops = select.get_name_scope_ops(graph, scope)
return SubGraphView(ops)
|
def replace_t_with_placeholder_handler(info, t):
'Transform a tensor into a placeholder tensor.\n\n This handler is typically used to transform a subgraph input tensor into a\n placeholder.\n\n Args:\n info: Transform._TmpInfo instance.\n t: tensor whose input must be transformed into a place holder.\n Returns:\n The tensor generated by the newly created place holder.\n '
with info.graph_.as_default():
t_ = util.make_placeholder_from_tensor(t, scope=info.scope_)
return t_
|
def keep_t_if_possible_handler(info, t):
'Transform a tensor into itself (identity) if possible.\n\n This handler transform a tensor into itself if the source and destination\n graph are the same. Otherwise it will create a placeholder.\n This handler is typically used to transform a hidden input tensors.\n\n Args:\n info: Transform._TmpInfo instance.\n t: tensor whose input must be transformed into a place holder.\n Returns:\n The tensor generated by the newly created place holder.\n '
if (info.graph is info.graph_):
return t
else:
return replace_t_with_placeholder_handler(info, t)
|
def assign_renamed_collections_handler(info, elem, elem_):
'Add the transformed elem to the (renamed) collections of elem.\n\n A collection is renamed only if is not a known key, as described in\n `tf.compat.v1.GraphKeys`.\n\n Args:\n info: Transform._TmpInfo instance.\n elem: the original element (`tf.Tensor` or `tf.Operation`)\n elem_: the transformed element\n '
known_collection_names = util.get_predefined_collection_names()
for (name, collection) in iteritems(info.collections):
if (elem not in collection):
continue
if (name in known_collection_names):
transformed_name = name
else:
transformed_name = info.new_name(name)
info.graph_.add_to_collection(transformed_name, elem_)
|
def transform_op_if_inside_handler(info, op, keep_if_possible=True):
'Transform an optional op only if it is inside the subgraph.\n\n This handler is typically use to handle original op: it is fine to keep them\n if they are inside the subgraph, otherwise they are just ignored.\n\n Args:\n info: Transform._TmpInfo instance.\n op: the optional op to transform (or ignore).\n keep_if_possible: re-attach to the original op if possible, that is,\n if the source graph and the destination graph are the same.\n Returns:\n The transformed op or None.\n '
if (op in info.sgv.ops):
return info.transformed_ops[op]
elif (keep_if_possible and (info.graph is info.graph_)):
return op
else:
return None
|
def copy_op_handler(info, op, new_inputs, copy_shape=False, nodedef_fn=None):
'Copy a `tf.Operation`.\n\n Args:\n info: Transform._TmpInfo instance.\n op: the `tf.Operation` to be copied.\n new_inputs: The new inputs for this op.\n copy_shape: also copy the shape of the tensor\n nodedef_fn: If provided, a function that will be run on the NodeDef\n and should return a mutated NodeDef before a new Operation is created.\n This is useful as certain features cannot be set on the Operation and\n must be modified in NodeDef.\n\n Returns:\n A `(op, op_outputs)` tuple containing the transformed op and its outputs.\n '
if isinstance(new_inputs, bool):
raise TypeError('the `new_inputs` argument must be an iterable.')
node_def_ = deepcopy(op.node_def)
name_ = info.new_name(op.name)
name_ = info.graph_.unique_name(name_)
node_def_.name = name_
if (nodedef_fn is not None):
node_def_ = nodedef_fn(node_def_)
output_types_ = op._output_types[:]
input_types_ = op._input_types[:]
op_def_ = deepcopy(op.op_def)
op_ = tf_ops.Operation(node_def_, info.graph_, new_inputs, output_types_, [], input_types_, None, op_def_)
if copy_shape:
for (t, t_) in zip(op.outputs, op_.outputs):
t_.set_shape(t.get_shape())
if op._original_op:
op_._original_op = op._original_op
return (op_, op_.outputs)
|
class TransformerInfo(object):
' "Contains information about the result of a transform operation.'
def __init__(self, info):
'Constructor.\n\n Args:\n info: an instance of Transformer._TmpInfo containing various internal\n information about the transform operation.\n '
self._graph = info.graph
self._scope = info.scope
self._graph_ = info.graph_
self._scope_ = info.scope_
self._transformed_ops = info.transformed_ops
self._transformed_ts = info.transformed_ts
def _get_transformed_map(self, top):
'Return the correct container depending on the type of `top`.'
if isinstance(top, tf_ops.Operation):
return self._transformed_ops
elif isinstance(top, tf_ops.Tensor):
return self._transformed_ts
else:
raise TypeError('Expected a tf.Tensor or a tf.Operation, got a {}'.format(type(top)))
def _transformed_elem(self, original_top, missing_fn=None):
'Return the transformed op/tensor corresponding to the original one.\n\n Args:\n original_top: the original tensor/operation.\n missing_fn: function handling the case where the counterpart\n cannot be found. By default, None is returned.\n Returns:\n the transformed tensor/operation (or None if no match is found).\n '
transformed_map = self._get_transformed_map(original_top)
if isinstance(original_top, string_types):
for (original, transformed) in iteritems(transformed_map):
if (original.name == original_top):
return transformed
return (None if (missing_fn is None) else missing_fn(original_top))
else:
if (original_top not in transformed_map):
return (None if (missing_fn is None) else missing_fn(original_top))
return transformed_map[original_top]
def _original_elem(self, transformed_top, missing_fn=None):
'Return the original op/tensor corresponding to the transformed one.\n\n Args:\n transformed_top: the transformed tensor/operation.\n missing_fn: function handling the case where the counterpart\n cannot be found. By default, None is returned.\n Returns:\n the original tensor/operation (or None if no match is found).\n '
transformed_map = self._get_transformed_map(transformed_top)
if isinstance(transformed_top, string_types):
finder = (lambda transformed: (transformed.name == transformed_top))
else:
finder = (lambda transformed: (transformed == transformed_top))
for (original, transformed) in iteritems(transformed_map):
if finder(transformed):
return original
return (None if (missing_fn is None) else missing_fn(transformed_top))
def transformed(self, original, missing_fn=None):
'Return the transformed op/tensor corresponding to the original one.\n\n Note that the output of this function mimics the hierarchy\n of its input argument `original`.\n Given an iterable, it returns a list. Given an operation or a tensor,\n it will return an operation or a tensor.\n\n Args:\n original: the original tensor/operation.\n missing_fn: function handling the case where the counterpart\n cannot be found. By default, None is returned.\n Returns:\n the transformed tensor/operation (or None if no match is found).\n '
transformed_elem = partial(self._transformed_elem, missing_fn=missing_fn)
return util.transform_tree(original, transformed_elem)
def original(self, transformed, missing_fn=None):
'Return the original op/tensor corresponding to the transformed one.\n\n Note that the output of this function mimics the hierarchy\n of its input argument `transformed`.\n Given an iterable, it returns a list. Given an operation or a tensor,\n it will return an operation or a tensor.\n\n Args:\n transformed: the transformed tensor/operation.\n missing_fn: function handling the case where the counterpart\n cannot be found. By default, None is returned.\n Returns:\n the original tensor/operation (or None if no match is found).\n '
original_elem = partial(self._original_elem, missing_fn=missing_fn)
return util.transform_tree(transformed, original_elem)
def __str__(self):
res = StringIO()
print('Transform result info:', file=res)
if (self._graph == self._graph_):
in_place_str = ('' if self._scope_ else ' IN-PLACE')
print(' Within graph[{}]{}'.format(id(self._graph), in_place_str), file=res)
else:
print(' graph[{}] => graph[{}]'.format(id(self._graph), id(self._graph_)), file=res)
if self._scope:
print(' Relative to source scope: {}'.format(self._scope), file=res)
if self._scope_:
print(' Scope destination: {}'.format(self._scope_), file=res)
print('Operations mapping:', file=res)
for (op, op_) in iteritems(self._transformed_ops):
print(' {} => {}'.format(op.name, op_.name), file=res)
return res.getvalue()
|
class _TmpInfo(object):
'Transformer temporary data.\n\n An instance of this class holds all the information relevant to a call\n to a transformer instance (that is, a call to __call__). An instance\n is created for the life-time of the __call__ function and is passed as\n argument to the handlers.\n '
def __init__(self, sgv, dst_graph, dst_scope, src_scope):
self.sgv = sgv
self.sgv_inputs_set = frozenset(sgv.inputs)
self.ops = frozenset(sgv.ops)
self.control_outputs = util.ControlOutputs(sgv.graph)
self.graph = sgv.graph
self.scope = src_scope
self.graph_ = dst_graph
self.scope_ = dst_scope
self.transformed_ops = {}
self.transformed_ts = {}
self.collections = dict(((key, self.graph.get_collection(key)) for key in self.graph.get_all_collection_keys()))
self.cyclic_ops = []
self.transform_original_op_handler = transform_op_if_inside_handler
self.tmp_cyclic_ts = []
def new_name(self, name):
'Compute a destination name from a source name.\n\n Args:\n name: the name to be "transformed".\n Returns:\n The transformed name.\n Raises:\n ValueError: if the source scope is used (that is, not an empty string)\n and the source name does not belong to the source scope.\n '
scope = self.scope
if (not name.startswith(scope)):
raise ValueError('{} does not belong to source scope: {}.'.format(name, scope))
rel_name = name[len(scope):]
name_ = (self.scope_ + rel_name)
return name_
|
class Transformer(object):
'Transform a subgraph into another one.\n\n By default, the constructor create a transform which copy a subgraph and\n replaces inputs with placeholders. This behavior can be modified by changing\n the handlers.\n '
def __init__(self):
'Transformer constructor.\n\n The following members can be modified:\n transform_op_handler: handle the transformation of a `tf.Operation`.\n This handler defaults to a simple copy.\n assign_collections_handler: handle the assignment of collections.\n This handler defaults to assigning new collections created under the\n given name-scope.\n transform_external_input_handler: handle the transform of the inputs to\n the given subgraph. This handler defaults to creating placeholders\n instead of the ops just before the input tensors of the subgraph.\n transform_external_hidden_input_handler: handle the transform of the\n hidden inputs of the subgraph, that is, the inputs which are not listed\n in sgv.inputs. This handler defaults to a transform which keep the same\n input if the source and destination graphs are the same, otherwise\n use placeholders.\n transform_original_op_handler: handle the transform of original_op. This\n handler defaults to transforming original_op only if they are in the\n subgraph, otherwise they are ignored.\n '
self.transform_op_handler = copy_op_handler
self.transform_control_input_handler = transform_op_if_inside_handler
self.assign_collections_handler = assign_renamed_collections_handler
self.transform_external_input_handler = replace_t_with_placeholder_handler
self.transform_external_hidden_input_handler = keep_t_if_possible_handler
self.transform_original_op_handler = transform_op_if_inside_handler
def __call__(self, sgv, dst_graph, dst_scope, src_scope='', reuse_dst_scope=False):
'Execute the transformation.\n\n Args:\n sgv: the source subgraph-view.\n dst_graph: the destination graph.\n dst_scope: the destination scope.\n src_scope: the source scope, which specify the path from which the\n relative path of the transformed nodes are computed. For instance, if\n src_scope is a/ and dst_scoped is b/, then the node a/x/y will have a\n relative path of x/y and will be transformed into b/x/y.\n reuse_dst_scope: if True the dst_scope is re-used if it already exists.\n Otherwise, the scope is given a unique name based on the one given\n by appending an underscore followed by a digit (default).\n Returns:\n A tuple `(sgv, info)` where:\n `sgv` is the transformed subgraph view;\n `info` is an instance of TransformerInfo containing\n information about the transform, including mapping between\n original and transformed tensors and operations.\n Raises:\n ValueError: if the arguments are invalid.\n '
sgv = subgraph.make_view(sgv)
if (not isinstance(dst_graph, tf_ops.Graph)):
raise TypeError('Expected a tf.Graph, got: {}'.format(type(dst_graph)))
src_scope = util.scope_finalize(src_scope)
dst_scope = util.scope_finalize(dst_scope)
if (dst_scope and (not reuse_dst_scope)):
dst_scope = util.scope_finalize(dst_graph.unique_name(dst_scope[:(- 1)]))
info = _TmpInfo(sgv, dst_graph, dst_scope, src_scope)
self._copy_ops(info)
self._finalize_cycles(info)
self._connect_control_inputs(info)
res_info = TransformerInfo(info)
sgv_ = self._transform_sgv(info, sgv)
return (sgv_, res_info)
def _copy_ops(self, info):
'Copy ops without connecting them.'
sorted_ops = sorted(info.sgv.ops, key=(lambda op: op._id))
for op in sorted_ops:
new_inputs = [self._transformed_t(info, t, op) for t in op.inputs]
(op_, op_outputs_) = self.transform_op_handler(info, op, new_inputs)
if (op is op_):
raise ValueError('In-place transformation not allowed.')
info.transformed_ops[op] = op_
self.assign_collections_handler(info, op, op_)
for (op_output, op_output_) in zip(op.outputs, op_outputs_):
info.transformed_ts[op_output] = op_output_
self.assign_collections_handler(info, op_output, op_output_)
def _finalize_cycles(self, info):
'Reconnects the cyclic tensors.'
for (t, tmp_t_, consumer_op) in info.tmp_cyclic_ts:
if (t not in info.transformed_ts):
raise ValueError('The tensor {} should be transformed by now.'.format(t.name))
if (consumer_op not in info.transformed_ops):
raise ValueError('The op {} should be transformed by now.'.format(consumer_op.name))
t_ = info.transformed_ts[t]
consumer_op_ = info.transformed_ops[consumer_op]
t_index_ = list(consumer_op_.inputs).index(tmp_t_)
consumer_op_._update_input(t_index_, t_)
def _connect_control_inputs(self, info):
'Connect the previously copied ops.'
for op in info.sgv.ops:
logging.debug('Connecting control inputs of op: %s', op.name)
op_ = info.transformed_ops[op]
if op._original_op:
original_op = self.transform_original_op_handler(info, op._original_op)
if (original_op is None):
logging.debug('Could not find original op for: %s', op_.name)
else:
op_._original_op = original_op
control_inputs_ = [self.transform_control_input_handler(info, ci) for ci in op.control_inputs]
control_inputs_ = [ci for ci in control_inputs_ if (ci is not None)]
reroute.add_control_inputs(op_, control_inputs_)
def _transform_sgv(self, info, sgv):
'Transform a subgraph view.\n\n For convenience, a transform operation returns a subgraph view of the\n transformed graph.\n\n Args:\n info: Temporary information for this transorfm call.\n sgv: the subgraph to be transformed.\n Returns:\n The transformed subgraph.\n '
ops_ = [op_ for (_, op_) in iteritems(info.transformed_ops)]
sgv_ = subgraph.SubGraphView(ops_)
sgv_inputs_ = sgv_.inputs
sgv_outputs_ = sgv_.outputs
input_map_ = []
for input_t in sgv.inputs:
if (input_t not in info.transformed_ts):
continue
input_t_ = info.transformed_ts[input_t]
if (input_t_ not in sgv_inputs_):
continue
input_t_index_ = sgv_.input_index(input_t_)
input_map_.append(input_t_index_)
output_map_ = []
for output_t in sgv.outputs:
if (output_t not in info.transformed_ts):
continue
output_t_ = info.transformed_ts[output_t]
if (output_t_ not in sgv_outputs_):
continue
output_t_index_ = sgv_.output_index(output_t_)
output_map_.append(output_t_index_)
return sgv_.remap(input_map_, output_map_)
def _transformed_t(self, info, t, consumer_op):
'Return tre transformed tensor of `t`.'
if (t in info.transformed_ts):
return info.transformed_ts[t]
if (t in info.sgv_inputs_set):
return self.transform_external_input_handler(info, t)
elif (t.op in info.ops):
logging.debug('Cyclic tensor: t.name = %s', t.name)
if (consumer_op.type == 'Merge'):
first_input = consumer_op.inputs[0]
tmp_t_ = self._transformed_t(info, first_input, consumer_op)
elif (t.op.type == 'Enter'):
enter_input = t.op.inputs[0]
tmp_t_ = self._transformed_t(info, enter_input, consumer_op)
else:
with info.graph_.as_default():
tmp_t_ = util.make_placeholder_from_tensor(t, scope=info.scope_, prefix='geph_tmp')
logging.debug('Created temporary placeholder: %s.', tmp_t_.name)
info.tmp_cyclic_ts.append((t, tmp_t_, consumer_op))
return tmp_t_
else:
return self.transform_external_hidden_input_handler(info, t)
|
def copy(sgv, dst_graph=None, dst_scope='', src_scope='', reuse_dst_scope=False):
'Copy a subgraph.\n\n Args:\n sgv: the source subgraph-view. This argument is converted to a subgraph\n using the same rules than the function subgraph.make_view.\n dst_graph: the destination graph.\n dst_scope: the destination scope.\n src_scope: the source scope.\n reuse_dst_scope: if True the dst_scope is re-used if it already exists.\n Otherwise, the scope is given a unique name based on the one given\n by appending an underscore followed by a digit (default).\n Returns:\n A tuple `(sgv, info)` where:\n `sgv` is the transformed subgraph view;\n `info` is an instance of TransformerInfo containing\n information about the transform, including mapping between\n original and transformed tensors and operations.\n Raises:\n TypeError: if `dst_graph` is not a `tf.Graph`.\n StandardError: if sgv cannot be converted to a SubGraphView using\n the same rules than the function subgraph.make_view.\n '
sgv = subgraph.make_view(sgv)
if (dst_graph is None):
dst_graph = sgv.graph
if (not isinstance(dst_graph, tf_ops.Graph)):
raise TypeError('Expected a tf.Graph, got: {}'.format(type(dst_graph)))
copier = Transformer()
return copier(sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
|
def copy_with_input_replacements(sgv, replacement_ts, dst_graph=None, dst_scope='', src_scope='', reuse_dst_scope=False):
'Copy a subgraph, replacing some of its inputs.\n\n Note a replacement only happens if the tensor to be replaced\n is an input of the given subgraph. The inputs of a subgraph can\n be queried using sgv.inputs.\n\n Args:\n sgv: the source subgraph-view. This argument is converted to a subgraph\n using the same rules as the function subgraph.make_view.\n replacement_ts: dictionary mapping from original tensors to the\n replaced one.\n dst_graph: the destination graph.\n dst_scope: the destination scope.\n src_scope: the source scope.\n reuse_dst_scope: if True the dst_scope is re-used if it already exists.\n Otherwise, the scope is given a unique name based on the one given\n by appending an underscore followed by a digit (default).\n Returns:\n A tuple `(sgv, info)` where:\n `sgv` is the transformed subgraph view;\n `info` is an instance of TransformerInfo containing\n information about the transform, including mapping between\n original and transformed tensors and operations.\n Raises:\n TypeError: if dst_graph is not a tf.Graph.\n StandardError: if sgv cannot be converted to a SubGraphView using\n the same rules as the function subgraph.make_view.\n '
sgv = subgraph.make_view(sgv)
if (dst_graph is None):
dst_graph = sgv.graph
if (not isinstance(dst_graph, tf_ops.Graph)):
raise TypeError('Expected a tf.Graph, got: {}'.format(type(dst_graph)))
copier = Transformer()
def replace_t_with_replacement_handler(info, t):
if (t in replacement_ts):
return replacement_ts[t]
else:
return keep_t_if_possible_handler(info, t)
copier.transform_external_input_handler = replace_t_with_replacement_handler
return copier(sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
|
def _add_control_flow_ops(ops, control_ios):
'Complete `ops` so that the transformed graph is valid.\n\n Partially copying a graph can lead to a malformed graph. For instance,\n copying half of a while construct is likely to result in an invalid graph.\n This function attempts to add missing ops so that the transformation result\n in a valid graph.\n\n Args:\n ops: list of ops (modifed in-place).\n control_ios: object created by a call to `util.ControlOutputs`.\n '
control_flow_contexts = set()
for op in ops:
cfc = op._control_flow_context
if cfc:
control_flow_contexts.add(cfc)
new_ops = []
for cfc in control_flow_contexts:
if cfc.IsWhileContext():
new_ops += select.get_walks_intersection_ops([enter_t.op for enter_t in cfc.loop_enters], [exit_t.op for exit_t in cfc.loop_exits], control_ios=control_ios)
new_ops_set = set(new_ops)
ops_set = frozenset(ops)
for op in new_ops_set:
if (op not in ops_set):
ops.append(op)
|
def graph_replace(target_ts, replacement_ts, dst_scope='', src_scope='', reuse_dst_scope=False):
'Create a new graph which compute the targets from the replaced Tensors.\n\n Args:\n target_ts: a single tf.Tensor or an iterable of tf.Tensor.\n replacement_ts: dictionary mapping from original tensors to replaced tensors\n dst_scope: the destination scope.\n src_scope: the source scope.\n reuse_dst_scope: if True the dst_scope is re-used if it already exists.\n Otherwise, the scope is given a unique name based on the one given\n by appending an underscore followed by a digit (default).\n Returns:\n A single tf.Tensor or a list of target tf.Tensor, depending on\n the type of the input argument `target_ts`.\n The returned tensors are recomputed using the tensors from replacement_ts.\n Raises:\n ValueError: if the targets are not connected to replacement_ts.\n '
flatten_target_ts = util.flatten_tree(target_ts)
graph = util.get_unique_graph(flatten_target_ts, check_types=tf_ops.Tensor)
control_ios = util.ControlOutputs(graph)
ops = select.get_walks_intersection_ops(list(replacement_ts), flatten_target_ts, control_ios=control_ios)
if (not ops):
raise ValueError('Targets and replacements are not connected!')
_add_control_flow_ops(ops, control_ios)
(unused_sgv_, info) = copy_with_input_replacements(ops, replacement_ts, None, dst_scope, src_scope, reuse_dst_scope)
missing_fn = (lambda original_t: original_t)
return info.transformed(target_ts, missing_fn)
|
def concatenate_unique(la, lb):
'Add all the elements of `lb` to `la` if they are not there already.\n\n The elements added to `la` maintain ordering with respect to `lb`.\n\n Args:\n la: List of Python objects.\n lb: List of Python objects.\n Returns:\n `la`: The list `la` with missing elements from `lb`.\n '
la_set = set(la)
for l in lb:
if (l not in la_set):
la.append(l)
la_set.add(l)
return la
|
class ListView(object):
'Immutable list wrapper.\n\n This class is strongly inspired by the one in tf.Operation.\n '
def __init__(self, list_):
if (not isinstance(list_, list)):
raise TypeError('Expected a list, got: {}.'.format(type(list_)))
self._list = list_
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __bool__(self):
return bool(self._list)
__nonzero__ = __bool__
def __getitem__(self, i):
return self._list[i]
def __add__(self, other):
if (not isinstance(other, list)):
other = list(other)
return (list(self) + other)
|
def is_iterable(obj):
'Return true if the object is iterable.'
if isinstance(obj, tf_ops.Tensor):
return False
try:
_ = iter(obj)
except Exception:
return False
return True
|
def flatten_tree(tree, leaves=None):
'Flatten a tree into a list.\n\n Args:\n tree: iterable or not. If iterable, its elements (child) can also be\n iterable or not.\n leaves: list to which the tree leaves are appended (None by default).\n Returns:\n A list of all the leaves in the tree.\n '
if (leaves is None):
leaves = []
if isinstance(tree, dict):
for (_, child) in iteritems(tree):
flatten_tree(child, leaves)
elif is_iterable(tree):
for child in tree:
flatten_tree(child, leaves)
else:
leaves.append(tree)
return leaves
|
def transform_tree(tree, fn, iterable_type=tuple):
'Transform all the nodes of a tree.\n\n Args:\n tree: iterable or not. If iterable, its elements (child) can also be\n iterable or not.\n fn: function to apply to each leaves.\n iterable_type: type use to construct the resulting tree for unknown\n iterable, typically `list` or `tuple`.\n Returns:\n A tree whose leaves has been transformed by `fn`.\n The hierarchy of the output tree mimics the one of the input tree.\n '
if is_iterable(tree):
if isinstance(tree, dict):
res = tree.__new__(type(tree))
res.__init__(((k, transform_tree(child, fn)) for (k, child) in iteritems(tree)))
return res
elif isinstance(tree, tuple):
if hasattr(tree, '_asdict'):
res = tree.__new__(type(tree), **transform_tree(tree._asdict(), fn))
else:
res = tree.__new__(type(tree), (transform_tree(child, fn) for child in tree))
return res
elif isinstance(tree, typing.Sequence):
res = tree.__new__(type(tree))
res.__init__((transform_tree(child, fn) for child in tree))
return res
else:
return iterable_type((transform_tree(child, fn) for child in tree))
else:
return fn(tree)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.