code
stringlengths
17
6.64M
def numpy_set_unused(v): '\n :param numpy.ndarray v: array which will be marked as not-used-anymore\n This will tell mechanisms like SharedNumpyArray that it can reuse the memory.\n On the client side, this will even unmap the memory, so any further access\n to it will cause a SEGFAULT.\n ' if (v is None): return assert isinstance(v, numpy.ndarray) if isinstance(v.base, SharedNumpyArray): assert v.base.is_in_use() v.base.set_unused()
def numpy_copy_and_set_unused(v): '\n :param dict[str,numpy.ndarray|object] | numpy.ndarray | object v: object to be handled\n If v is a dict, we will return a new copied dict where every value is mapped through numpy_copy_and_set_unused.\n If v is a numpy.ndarray and its base is a SharedNumpyArray, we will copy it and\n call numpy_set_unused on the old value.\n If v is a numpy.ndarray and its base is not a SharedNumpyArray, we will just return it as it is and do nothing.\n In all other cases, we will also just return the object as it is and do nothing.\n ' if isinstance(v, numpy.ndarray): if isinstance(v.base, SharedNumpyArray): newv = v.copy(order='A') numpy_set_unused(v) return newv return v if isinstance(v, dict): return {k: numpy_copy_and_set_unused(vv) for (k, vv) in v.items()} return v
def numpy_alloc(shape, dtype, fortran_for_shared=False): '\n If EnableAutoNumpySharedMemPickling is True, this will allocate a Numpy array\n in shared memory so we avoid a copy later on when this Numpy array would\n be transferred to another process via pickling.\n ' if SharedMemNumpyConfig['enabled']: dtype = numpy.dtype(dtype) typestr = dtype.str strides = None if fortran_for_shared: strides = SharedNumpyArray.numpy_strides_for_fortran(shape=shape, typestr=typestr) try: return SharedNumpyArray.create_new(shape=shape, strides=strides, typestr=typestr) except SharedMem.ShmException as e: print(('numpy_alloc: SharedMem exception: %s' % e)) return numpy.ndarray(shape, dtype=dtype)
class Pickler(_BasePickler): '\n We extend the standard Pickler to be able to pickle some more types,\n such as lambdas and functions, code, func cells, buffer and more.\n ' def __init__(self, *args, **kwargs): if (not ('protocol' in kwargs)): kwargs['protocol'] = pickle.HIGHEST_PROTOCOL _BasePickler.__init__(self, *args, **kwargs) dispatch = _BasePickler.dispatch.copy() memo: Dict[(int, Tuple[(int, Any)])] def save_func(self, obj): try: self.save_global(obj) return except pickle.PicklingError: pass assert (type(obj) is types.FunctionType) self.save(types.FunctionType) self.save(get_func_tuple(obj)) self.write(pickle.REDUCE) if (id(obj) not in self.memo): self.memoize(obj) dispatch[types.FunctionType] = save_func def save_method(self, obj): try: self.save_global(obj) return except pickle.PicklingError: pass assert (type(obj) is types.MethodType) self.save(types.MethodType) self.save((obj.__func__, obj.__self__)) self.write(pickle.REDUCE) self.memoize(obj) dispatch[types.MethodType] = save_method def save_code(self, obj): assert (type(obj) is types.CodeType) self.save(marshal.loads) self.save((marshal.dumps(obj),)) self.write(pickle.REDUCE) self.memoize(obj) dispatch[types.CodeType] = save_code def save_cell(self, obj): assert (type(obj) is CellType) self.save(makeFuncCell) self.save((obj.cell_contents,)) self.write(pickle.REDUCE) self.memoize(obj) dispatch[CellType] = save_cell def intellisave_dict(self, obj): modname = getModNameForModDict(obj) if modname: self.save(getModuleDict) self.save((modname, sys.path)) self.write(pickle.REDUCE) self.memoize(obj) return self.save_dict(obj) dispatch[DictType] = intellisave_dict def save_module(self, obj): modname = getModNameForModDict(obj.__dict__) if modname: self.save(import_module) self.save((modname,)) self.write(pickle.REDUCE) self.memoize(obj) return raise pickle.PicklingError(('cannot pickle module %r' % obj)) dispatch[ModuleType] = save_module def save_string(self, obj, pack=struct.pack): self.write((pickle.BINSTRING + pack('<i', len(obj)))) self.write(bytes(obj, 'utf8')) dispatch[str] = save_string def save_ndarray(self, obj): if use_shared_mem_for_numpy_array(obj): try: shared = SharedNumpyArray.as_shared(obj) except SharedMem.ShmException as e: print(('SharedNumpyArray exception: %s' % e)) else: self.save(shared.create_numpy_array) self.save(()) self.write(pickle.REDUCE) return self.save(make_numpy_ndarray_fromstring) self.save((obj.tostring(), str(obj.dtype), obj.shape)) self.write(pickle.REDUCE) dispatch[numpy.ndarray] = save_ndarray def save_iobuffer_dummy(self, obj): self.save_none(None) dispatch[io.BufferedReader] = save_iobuffer_dummy dispatch[io.BufferedWriter] = save_iobuffer_dummy def save_global(self, obj, name=None): assert obj assert (id(obj) not in self.memo) if (name is None): name = obj.__name__ module = getattr(obj, '__module__', None) if ((module == '__main__') and (globals().get(name, None) is obj)): module = __name__ if ((module is None) or (module == '__main__')): module = pickle.whichmodule(obj, name) if ((module is None) or (module == '__main__')): raise pickle.PicklingError(("Can't pickle %r: module not found: %s" % (obj, module))) try: __import__(module) mod = sys.modules[module] klass = getattr(mod, name) except (ImportError, KeyError, AttributeError): raise pickle.PicklingError(("Can't pickle %r: it's not found as %s.%s" % (obj, module, name))) else: if (klass is not obj): raise pickle.PicklingError(("Can't pickle %r: it's not the same object as %s.%s" % (obj, module, name))) assert ('\n' not in module) assert ('\n' not in name) self.write((pickle.GLOBAL + bytes((((module + '\n') + name) + '\n'), 'utf8'))) self.memoize(obj) def save_type(self, obj): try: self.save_global(obj) return except pickle.PicklingError: pass for modname in ['types']: moddict = sys.modules[modname].__dict__ for (modobjname, modobj) in moddict.items(): if (modobj is obj): self.write((pickle.GLOBAL + bytes((((modname + '\n') + modobjname) + '\n'), 'utf8'))) self.memoize(obj) return self._save_type_fallback(obj) def _save_type_fallback(self, obj): self.save(type) self.save((obj.__name__, obj.__bases__, {})) self.write(pickle.REDUCE) self.memoize(obj) self.write(pickle.POP) self.save(assign_obj_attribs) self.save((obj, getNormalDict(obj.__dict__))) self.write(pickle.REDUCE) dispatch[NewStyleClass] = save_type def __getstate__(self): return None def __setstate__(self, state): pass
def watch_memory(): '\n Start thread which watches memory usage over time of the current process and all its children over time.\n ' global _watch_memory_proc if _watch_memory_proc: return _watch_memory_proc = multiprocessing.get_context('spawn').Process(target=_watch_memory_main, args=(os.getpid(),), name='watch_memory', daemon=True) _watch_memory_proc.start()
def _watch_memory_main(pid: int): if (sys.platform == 'linux'): with open('/proc/self/comm', 'w') as f: f.write(f'watch memory') def _print(*args): print('MEMORY:', *args) sys.stdout.flush() cur_proc = psutil.Process(pid) procs = [] mem_per_pid = {} while True: change = False procs_ = ([cur_proc] + cur_proc.children(recursive=True)) for p in procs: if (p not in procs_): _print(f'proc {_format_proc(p)} exited, old:', _format_mem_info(mem_per_pid[p.pid])) mem_per_pid.pop(p.pid, None) change = True procs = procs_ for p in list(procs): old_mem_info = mem_per_pid.get(p.pid, None) try: mem_info = get_mem_info(p) except psutil.NoSuchProcess: if old_mem_info: _print(f'proc {_format_proc(p)} exited, old:', _format_mem_info(old_mem_info)) mem_per_pid.pop(p.pid, None) change = True procs.remove(p) continue proc_prefix = ('main' if (p == cur_proc) else 'sub') if (not old_mem_info): _print(f'{proc_prefix} proc {_format_proc(p)} initial:', _format_mem_info(mem_info)) mem_per_pid[p.pid] = mem_info change = True elif ((mem_info['rss'] > old_mem_info['rss']) and (_format_mem_size(old_mem_info['rss']) != _format_mem_size(mem_info['rss']))): _print(f'{proc_prefix} proc {_format_proc(p)} increased RSS:', _format_mem_info(mem_info)) mem_per_pid[p.pid] = mem_info change = True if change: res = {'pss': 0, 'uss': 0} for mem_info in mem_per_pid.values(): for k in res.keys(): res[k] += mem_info[k] _print('total', f"(main {cur_proc.pid}, {datetime.now().strftime('%Y-%m-%d, %H:%M:%S')}, {len(mem_per_pid)} procs):", _format_mem_info(res)) time.sleep(5)
def _format_proc(proc: psutil.Process) -> str: try: proc_name = proc.name() except psutil.NoSuchProcess: proc_name = getattr(proc, '_name', None) if (not proc_name): proc_name = '<unknown-dead>' if (not proc_name): proc_name = '<noname>' return ('%s(%s)' % (proc_name, proc.pid))
def _format_mem_info(info: Dict[(str, int)]) -> str: return ' '.join((('%s=%s' % (k, _format_mem_size(v))) for (k, v) in info.items()))
def _format_mem_size(c: int) -> str: if (c < 1024): return ('%iB' % c) units = 'KMG' i = 0 while (i < (len(units) - 1)): if (c < (0.8 * (1024 ** (i + 2)))): break i += 1 f = (float(c) / (1024 ** (i + 1))) return ('%.1f%sB' % (f, units[i]))
def get_mem_info(proc: psutil.Process) -> Dict[(str, int)]: '\n Code from:\n https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/\n ' res = defaultdict(int) for mmap in proc.memory_maps(): res['rss'] += mmap.rss res['pss'] += mmap.pss res['uss'] += (mmap.private_clean + mmap.private_dirty) res['shared'] += (mmap.shared_clean + mmap.shared_dirty) return res
def main(): '\n Setup main entry\n ' long_version = get_version_str(verbose=True, fallback='1.0.0+setup-fallback-version', long=True) version = long_version[:long_version.index('+')] if (os.environ.get('DEBUG', '') == '1'): debug_print_file('.') debug_print_file('PKG-INFO') debug_print_file('pip-egg-info') debug_print_file('pip-egg-info/returnn.egg-info') debug_print_file('pip-egg-info/returnn.egg-info/SOURCES.txt') if os.path.exists('PKG-INFO'): if os.path.exists('MANIFEST'): print('package_data, found PKG-INFO and MANIFEST') package_data = (open('MANIFEST').read().splitlines() + ['PKG-INFO']) else: print('package_data, found PKG-INFO, no MANIFEST, use *') shutil.copy('PKG-INFO', 'returnn/') shutil.copy('_setup_info_generated.py', 'returnn/') package_data = [] for (root, dirs, files) in os.walk('.'): for file in files: package_data.append(os.path.join(root, file)) else: print('dummy package_data, does not matter, likely you are running sdist') with open('_setup_info_generated.py', 'w') as f: f.write(('version = %r\n' % version)) f.write(('long_version = %r\n' % long_version)) package_data = ['MANIFEST', '_setup_info_generated.py'] from distutils.core import setup setup(name='returnn', version=version, packages=['returnn'], include_package_data=True, package_data={'returnn': package_data}, description='The RWTH extensible training framework for universal recurrent neural networks', author='Albert Zeyer', author_email='albzey@gmail.com', url='https://github.com/rwth-i6/returnn/', license='RETURNN license', long_description=open('README.rst').read(), long_description_content_type='text/x-rst', classifiers=['Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: GPU', 'Environment :: GPU :: NVIDIA CUDA', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: Other/Proprietary License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development :: Libraries :: Python Modules'])
class ArgParser(): '\n Emulate the Sprint argument parser.\n ' def __init__(self): self.args = {} def add(self, key, value): '\n :param str key:\n :param str value:\n ' self.args[key] = value def get(self, key, default=None): '\n :param str key:\n :param str|T|None default:\n :rtype: str|T|None\n ' return self.args.get(key, default) def parse(self, argv): '\n :type argv: list[str]\n ' i = 0 while (i < len(argv)): arg = argv[i] if arg.startswith('--'): (key, value) = arg[2:].split('=', 1) if key.startswith('*.'): key = key[2:] self.add(key, value) i += 1
def main(argv): '\n Main entry.\n ' print('DummySprintExec init', argv) args = ArgParser() args.parse(argv[1:]) if args.get('pymod-name'): sprint_api = import_module(args.get('pymod-name')) else: import returnn.sprint.extern_interface as sprint_api input_dim = int(args.get('feature-dimension')) assert (input_dim > 0) output_dim = int(args.get('trainer-output-dimension')) assert (output_dim > 0) sprint_config = args.get('pymod-config', '') target_mode = args.get('target-mode', 'target-generic') sprint_api.init(inputDim=input_dim, outputDim=output_dim, config=sprint_config, targetMode=target_mode) if args.get('crnn-dataset'): dataset = eval(args.get('crnn-dataset'), {}, ObjAsDict(generating_dataset)) assert isinstance(dataset, Dataset) assert (dataset.num_inputs == input_dim) assert (dataset.num_outputs == {'classes': (output_dim, 1), 'data': (input_dim, 2)}) dataset.init_seq_order(epoch=1) seq_idx = 0 while dataset.is_less_than_num_seqs(seq_idx): dataset.load_seqs(seq_idx, (seq_idx + 1)) features = dataset.get_data(seq_idx, 'data') features = features.T kwargs = {'features': features} if (target_mode == 'target-generic'): if ('orth' in dataset.get_target_list()): kwargs['orthography'] = dataset.get_targets('orth', seq_idx) if ('classes' in dataset.get_target_list()): kwargs['alignment'] = dataset.get_targets('classes', seq_idx) print(('DummySprintExec seq_idx %i feedInputAndTarget(**%r)' % (seq_idx, kwargs))) sprint_api.feedInputAndTarget(**kwargs) else: raise NotImplementedError(('targetMode = %s' % target_mode)) seq_idx += 1 print('DummySprintExec exit') sprint_api.exit()
def _setup(): import sys import os orig_stdout = sys.stdout try: sys.stdout = sys.__stdout__ for env_var in ['OPENBLAS_NUM_THREADS', 'GOTO_NUM_THREADS', 'OMP_NUM_THREADS']: print(('Env %s = %s' % (env_var, os.environ.get(env_var, None)))) os.environ[env_var] = '1' finally: sys.stdout = orig_stdout
def setup(): '\n Calls necessary setups.\n ' import logging import os import sys os.environ['RETURNN_TEST'] = '1' logging.basicConfig(level=logging.DEBUG, format='%(message)s') import _setup_returnn_env import returnn.util.basic as util util.init_thread_join_hack() util.BehaviorVersion.set_min_behavior_version(util.BehaviorVersion._latest_behavior_version) from returnn.util import better_exchook if (sys.excepthook != sys.__excepthook__): prev_sys_excepthook = sys.excepthook def _chained_excepthook(exctype, value, traceback): better_exchook.better_exchook(exctype, value, traceback) prev_sys_excepthook(exctype, value, traceback) sys.excepthook = _chained_excepthook else: sys.excepthook = better_exchook.better_exchook better_exchook.replace_traceback_format_tb() from returnn.log import log log.initialize(verbosity=[5], propagate=False) import returnn.util.debug as debug debug.install_lib_sig_segfault() try: import faulthandler faulthandler.enable() except ImportError: print('no faulthandler') _try_hook_into_tests()
def _try_hook_into_tests(): '\n Hook into nosetests or other unittest based frameworks.\n\n The hook will throw exceptions such that a debugger like PyCharm can inspect them easily.\n This will only be done if there is just a single test case.\n\n This code might be a bit experimental.\n It should work though. But if it does not, we can also skip this.\n Currently any exception here would be fatal though, as we expect this to work.\n\n Also see: https://youtrack.jetbrains.com/issue/PY-9848\n ' import sys import types get_trace = getattr(sys, 'gettrace', None) in_debugger = False if (get_trace and (get_trace() is not None)): in_debugger = True from unittest import TestProgram from returnn.util.better_exchook import get_current_frame from returnn.util.better_exchook import get_func_str_from_code_object top_frame = get_current_frame() if (not top_frame): return test_program = None frame = top_frame while frame: local_self = frame.f_locals.get('self') if isinstance(local_self, TestProgram): test_program = local_self break frame = frame.f_back test_names = None if test_program: test_names = getattr(test_program, 'testNames') test_session = None try: import pytest except ImportError: pass else: frame = top_frame while frame: local_self = frame.f_locals.get('self') if isinstance(local_self, pytest.Session): test_session = local_self break frame = frame.f_back if (test_session and (not test_names)): test_names = test_session.config.args if (not test_names): return if ((len(test_names) >= 2) or (':' not in test_names[0])): return if (test_program and in_debugger): class _ReraiseExceptionTestHookPlugin(): @staticmethod def _reraise_exception(test, err): (exc_class, exc, tb) = err print(('Test %s, exception %s %s, reraise now.' % (test, exc_class.__name__, exc))) raise exc handleFailure = _reraise_exception handleError = _reraise_exception config = getattr(test_program, 'config') config.plugins.addPlugin(_ReraiseExceptionTestHookPlugin()) if (test_session and in_debugger): items = [] def _custom_pytest_runtestloop(session): print('test env hook pytest_runtestloop') assert (len(session.items) == len(test_names) == 1) items.extend(session.items) def _custom_pytest_sessionfinish(session, exitstatus): (session, exitstatus) print('test env hook pytest_sessionfinish') class _CustomPlugin(): def pytest_unconfigure(self, config): 'hook for pytest_unconfigure.' print('test env hook pytest_unconfigure') frame_ = get_current_frame() while frame_: assert isinstance(frame_, types.FrameType) if (get_func_str_from_code_object(frame_.f_code) == 'pytest_cmdline_main'): for plugin in test_session.config.pluginmanager.get_plugins(): if (plugin != self): test_session.config.pluginmanager.unregister(plugin) config._do_configure() return frame_ = frame_.f_back print('test env hook pytest_unconfigure, final call') config.add_cleanup(self._custom_final_cleanup) @staticmethod def _custom_final_cleanup(): print('test env hook pytest config cleanup') print('Now calling the test:', items[0]) items[0].obj() test_session.config.hook.pytest_runtestloop = _custom_pytest_runtestloop test_session.config.hook.pytest_sessionfinish = _custom_pytest_sessionfinish test_session.config.pluginmanager.register(_CustomPlugin())
def find_all_py_source_files(): '\n :rtype: list[str]\n ' src_files = [] for (root, dirs, files) in os.walk(_root_dir): if (root == _root_dir): root = '' else: assert root.startswith((_root_dir + '/')) root = root[(len(_root_dir) + 1):] root += '/' if (root == ''): dirs[:] = ['returnn', 'demos', 'tools'] else: dirs[:] = sorted(dirs) dirs[:] = [d for d in dirs if (not os.path.exists(('%s/%s%s/.git' % (_root_dir, root, d))))] for file in sorted(files): if (not file.endswith('.py')): continue if (file == '_setup_info_generated.py'): continue src_files.append((root + file)) return src_files
class _StdoutTextFold(): def __init__(self, name): '\n :param str name:\n ' self.name = name self.start_time = time.time() if github_env: if (not folds): print(('::group::%s' % name)) if travis_env: print(('travis_fold:start:%s' % name)) sys.stdout.flush() def finish(self): '\n End fold.\n ' elapsed_time = (time.time() - self.start_time) print(('%s: Elapsed time: %s' % (self.name, hms(elapsed_time)))) if travis_env: print(('travis_fold:end:%s' % folds[(- 1)])) if github_env: if (len(folds) == 1): print('::endgroup::') sys.stdout.flush()
def fold_start(name): '\n :param str name:\n ' folds.append(_StdoutTextFold(name))
def fold_end(): '\n Ends the fold.\n ' assert folds folds[(- 1)].finish() folds.pop((- 1))
def check_pycharm_dir(pycharm_dir): '\n :param str pycharm_dir:\n ' assert os.path.isdir(pycharm_dir) assert os.path.exists(('%s/bin/inspect.sh' % pycharm_dir))
def install_pycharm(): '\n :return: pycharm dir\n :rtype: str\n ' fold_start('script.install') print('travis_fold:start:script.install') install_dir = tempfile.mkdtemp() pycharm_dir = ('%s/pycharm' % install_dir) print('Install PyCharm into:', pycharm_dir) sys.stdout.flush() pycharm_version = (2020, 2) name = ('pycharm-community-%i.%i' % pycharm_version) fn = ('%s.tar.gz' % name) subprocess.check_call(['wget', '--progress=dot:mega', '-c', ('https://download.jetbrains.com/python/%s' % fn)], cwd=install_dir, stderr=subprocess.STDOUT) tar_out = subprocess.check_output(['tar', '-xzvf', fn], cwd=install_dir, stderr=subprocess.STDOUT) print(b'\n'.join(tar_out.splitlines()[(- 10):]).decode('utf8')) assert os.path.isdir(('%s/%s' % (install_dir, name))) os.remove(('%s/%s' % (install_dir, fn))) os.rename(('%s/%s' % (install_dir, name)), pycharm_dir) check_pycharm_dir(pycharm_dir) fold_end() return pycharm_dir
def get_version_str_from_pycharm(pycharm_dir): '\n :param str pycharm_dir:\n :return: e.g. "CE2018.3"\n :rtype: str\n ' import re import json if os.path.exists(('%s/product-info.json' % pycharm_dir)): d = json.load(open(('%s/product-info.json' % pycharm_dir))) name = d['dataDirectoryName'] assert isinstance(name, str) assert name.startswith('PyCharm') return name[len('PyCharm'):] code = open(('%s/bin/pycharm.sh' % pycharm_dir)).read() m = re.search('-Didea\\.paths\\.selector=PyCharm(\\S+) ', code) assert m, ('pycharm %r not as expected' % pycharm_dir) return m.group(1)
def parse_pycharm_version(version_str): '\n :param str version_str: e.g. "CE2018.3"\n :rtype: ((int,int),str)\n :return: e.g. (2018,3),"CE"\n ' name = '' if version_str.startswith('CE'): name = 'CE' version_str = version_str[2:] assert version_str.startswith('2') version_str_parts = version_str.split('.') assert (len(version_str_parts) == 2), ('version %r' % version_str) return (tuple([int(p) for p in version_str_parts]), name)
def create_stub_dir(pycharm_dir, stub_dir, pycharm_major_version): '\n :param str pycharm_dir:\n :param str stub_dir:\n :param int pycharm_major_version:\n ' fold_start('script.create_python_stubs') print('Generating Python stubs via helpers/generator3.py...') if (pycharm_major_version >= 2020): generator_path = ('%s/plugins/python-ce/helpers/generator3/__main__.py' % pycharm_dir) assert os.path.exists(generator_path) cmd = [sys.executable, generator_path, '-d', stub_dir] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (stdout, _) = proc.communicate() if (proc.returncode != 0): raise subprocess.CalledProcessError(returncode=proc.returncode, cmd=cmd, output=stdout) for line in stdout.splitlines(): line = line.decode('utf8') if (len(line) < 240): print(line) else: print((line[:240] + '...')) elif (pycharm_major_version <= 2019): generator_path = ('%s/helpers/generator3.py' % pycharm_dir) assert os.path.exists(generator_path) subprocess.check_call([sys.executable, generator_path, '-d', stub_dir, '-b']) print('Collecting further native modules...') sys.stdout.flush() mod_names = [] for line in subprocess.check_output([sys.executable, generator_path, '-L']).decode('utf8').splitlines()[1:]: assert isinstance(line, str) mod_name = line.split()[0] if (mod_name not in mod_names): mod_names.append(mod_name) for mod_name in mod_names: print(('Generate for %r.' % mod_name)) sys.stdout.flush() subprocess.call([sys.executable, generator_path, '-d', stub_dir, mod_name]) fold_end()
def setup_pycharm_python_interpreter(pycharm_dir): '\n Unfortunately, the headless PyCharm bin/inspect will use the global PyCharm settings,\n and requires that we have a Python interpreter set up,\n with the same name as we use in our `.idea` settings, which we will link in :func:`prepare_src_dir`.\n See here: https://youtrack.jetbrains.com/issue/PY-34864\n\n Our current way to work around this: We create (or extend) the file\n ``~/.PyCharm<VERSION>/config/options/jdk.table.xml`` such that it has the right Python interpreter.\n\n :param str pycharm_dir:\n ' fold_start('script.opt_install_further_py_deps') if ((not pip_check_is_installed('tensorflow')) and (not pip_check_is_installed('tensorflow-gpu'))): pip_install('tensorflow') for pkg in ['typing', 'librosa==0.8.1', 'PySoundFile', 'nltk', 'matplotlib', 'mpi4py', 'pycodestyle']: if (not pip_check_is_installed(pkg)): try: pip_install(pkg) except subprocess.CalledProcessError as exc: print('Pip install failed:', exc) print('Ignore...') fold_end() fold_start('script.setup_pycharm_python_interpreter') print('Setup PyCharm Python interpreter... (jdk.table.xml)') print('Current Python:', sys.executable, sys.version, sys.version_info) name = 'Python 3 (.../bin/python3)' pycharm_version_str = get_version_str_from_pycharm(pycharm_dir) (pycharm_version, pycharm_version_name) = parse_pycharm_version(pycharm_version_str) if (sys.platform == 'darwin'): pycharm_config_dir = os.path.expanduser(('~/Library/Preferences/PyCharm%s' % pycharm_version_str)) pycharm_system_dir = os.path.expanduser(('~/Library/Caches/PyCharm%s' % pycharm_version_str)) elif (pycharm_version[0] >= 2020): pycharm_config_dir = os.path.expanduser(('~/.config/JetBrains/PyCharm%s' % pycharm_version_str)) pycharm_system_dir = os.path.expanduser(('~/.cache/JetBrains/PyCharm%s' % pycharm_version_str)) else: pycharm_config_dir = os.path.expanduser(('~/.PyCharm%s/config' % pycharm_version_str)) pycharm_system_dir = os.path.expanduser(('~/.PyCharm%s/system' % pycharm_version_str)) if _use_stub_zip: stub_base_name = 'pycharm2018.3-python3.6-stubs' stub_fn = ('%s/python_stubs/%s.zip' % (pycharm_system_dir, stub_base_name)) stub_dir = ('%s/python_stubs/%s' % (pycharm_system_dir, stub_base_name)) os.makedirs(os.path.dirname(stub_fn), exist_ok=True) if os.path.exists(stub_dir): print('Python stubs dir exists already:', stub_dir) else: if (not os.path.exists(stub_fn)): subprocess.check_call(['wget', ('https://www-i6.informatik.rwth-aachen.de/web/Software/returnn/%s.zip' % stub_base_name)], cwd=os.path.dirname(stub_fn)) assert os.path.exists(stub_fn) subprocess.check_call(['unzip', ('%s.zip' % stub_base_name), '-d', stub_base_name], cwd=os.path.dirname(stub_fn)) assert os.path.isdir(stub_dir) else: stub_dir = ('%s/python_stubs/python%s-generated' % (pycharm_system_dir, ('%i.%i.%i' % sys.version_info[:3]))) if os.path.exists(stub_dir): print(('Python stubs already exists, not recreating (%s)' % stub_dir)) else: print('Generate stub dir:', stub_dir) os.makedirs(stub_dir) create_stub_dir(pycharm_dir=pycharm_dir, stub_dir=stub_dir, pycharm_major_version=pycharm_version[0]) jdk_table_fn = ('%s/options/jdk.table.xml' % pycharm_config_dir) print('Filename:', jdk_table_fn) os.makedirs(os.path.dirname(jdk_table_fn), exist_ok=True) if os.path.exists(jdk_table_fn): print('Loading existing jdk.table.xml.') et = ElementTree.parse(jdk_table_fn) root = et.getroot() assert isinstance(root, ElementTree.Element) jdk_collection = root.find('./component') assert isinstance(jdk_collection, ElementTree.Element) assert ((jdk_collection.tag == 'component') and (jdk_collection.attrib['name'] == 'ProjectJdkTable')) else: print('Creating new jdk.table.xml.') root = ElementTree.Element('application') et = ElementTree.ElementTree(root) jdk_collection = ElementTree.SubElement(root, 'component', name='ProjectJdkTable') assert isinstance(jdk_collection, ElementTree.Element) existing_jdk = jdk_collection.find(("./jdk/name[@value='%s']/.." % name)) if existing_jdk: print(('Found existing Python interpreter %r. Remove and recreate.' % name)) assert isinstance(existing_jdk, ElementTree.Element) assert (existing_jdk.find('./name').attrib['value'] == name) jdk_collection.remove(existing_jdk) '\n <application>\n <component name="ProjectJdkTable">\n <jdk version="2">\n <name value="Python 2.7.3 (/usr/bin/python2.7)" />\n <type value="Python SDK" />\n <version value="Python 2.7.12" />\n <homePath value="/usr/bin/python2.7" />\n <roots>\n <classPath>\n <root type="composite">\n <root url="file:///usr/bin" type="simple" />\n ...\n </root>\n </classPath>\n <sourcePath>\n <root type="composite" />\n </sourcePath>\n </roots>\n <additional />\n </jdk>\n </component>\n </application>\n ' jdk_entry = ElementTree.SubElement(jdk_collection, 'jdk', version='2') ElementTree.SubElement(jdk_entry, 'name', value=name) ElementTree.SubElement(jdk_entry, 'type', value='Python SDK') ElementTree.SubElement(jdk_entry, 'version', value=('Python %i.%i.%i' % sys.version_info[:3])) ElementTree.SubElement(jdk_entry, 'homePath', value=sys.executable) paths_root = ElementTree.SubElement(jdk_entry, 'roots') classes_paths = ElementTree.SubElement(ElementTree.SubElement(paths_root, 'classPath'), 'root', type='composite') relevant_paths = list(sys.path) if (root_dir in relevant_paths): relevant_paths.remove(root_dir) if (my_dir in relevant_paths): relevant_paths.remove(my_dir) relevant_paths.extend([stub_dir, '$APPLICATION_HOME_DIR$/helpers/python-skeletons', '$APPLICATION_HOME_DIR$/helpers/typeshed/stdlib/3', '$APPLICATION_HOME_DIR$/helpers/typeshed/stdlib/2and3', '$APPLICATION_HOME_DIR$/helpers/typeshed/third_party/3', '$APPLICATION_HOME_DIR$/helpers/typeshed/third_party/2and3']) for path in relevant_paths: ElementTree.SubElement(classes_paths, 'root', url=('file://%s' % path), type='simple') ElementTree.SubElement(ElementTree.SubElement(paths_root, 'sourcePath'), 'root', type='composite') ElementTree.SubElement(jdk_entry, 'additional') print('Save XML.') et.write(jdk_table_fn, encoding='UTF-8') fold_start('script.jdk_table') print('XML content:') rough_string = ElementTree.tostring(root, 'utf-8') print(minidom.parseString(rough_string).toprettyxml(indent=' ')) fold_end() fold_end()
def read_spelling_dict(): '\n :rtype: list[str]\n ' return open(('%s/spelling.dic' % my_dir)).read().splitlines()
def create_spelling_dict_xml(src_dir): '\n Need to create this on-the-fly for the current user.\n ' '\n <component name="ProjectDictionaryState">\n <dictionary name="az">\n <words>\n <w>dtype</w>\n <w>idxs</w>\n <w>keepdims</w>\n ...\n </words>\n </dictionary>\n </component>\n ' from returnn.util.basic import get_login_username user_name = get_login_username() root = ElementTree.Element('component', name='ProjectDictionaryState') dict_ = ElementTree.SubElement(root, 'dictionary', name=user_name) words = ElementTree.SubElement(dict_, 'words') for w in read_spelling_dict(): ElementTree.SubElement(words, 'w').text = w et = ElementTree.ElementTree(root) print('Save XML.') xml_filename = ('%s/.idea/dictionaries/%s.xml' % (src_dir, user_name)) os.makedirs(os.path.dirname(xml_filename), exist_ok=True) et.write(xml_filename, encoding='UTF-8')
def prepare_src_dir(files=None): '\n New clean source dir, where we symlink only the relevant src files.\n\n :param list[str]|None files:\n :return: src dir\n :rtype: str\n ' fold_start('script.prepare') print('Prepare project source files...') if (not files): files = ['returnn', 'tools', 'demos', 'rnn.py', 'setup.py', '__init__.py'] src_tmp_dir = ('%s/returnn' % tempfile.mkdtemp()) os.mkdir(src_tmp_dir) shutil.copytree(('%s/PyCharm.idea' % my_dir), ('%s/.idea' % src_tmp_dir), symlinks=True) for fn in files: fn = ('%s/%s' % (root_dir, fn)) dst = ('%s/%s' % (src_tmp_dir, os.path.basename(fn))) if os.path.isdir(fn): shutil.copytree(fn, dst, symlinks=True) else: shutil.copy(fn, dst) create_spelling_dict_xml(src_tmp_dir) print('All source files:') sys.stdout.flush() subprocess.check_call(['ls', '-la', src_tmp_dir]) fold_end() return src_tmp_dir
def run_inspect(pycharm_dir, src_dir, skip_pycharm_inspect=False): '\n :param str pycharm_dir:\n :param str src_dir:\n :param bool skip_pycharm_inspect:\n :return: dir of xml files\n :rtype: str\n ' out_tmp_dir = tempfile.mkdtemp() fold_start('script.inspect') if (not skip_pycharm_inspect): cmd = [('%s/bin/inspect.sh' % pycharm_dir), src_dir, ('%s/PyCharm-inspection-profile.xml' % my_dir), out_tmp_dir, '-v2'] print(('$ %s' % ' '.join(cmd))) subprocess.check_call(cmd, stderr=subprocess.STDOUT) root = ElementTree.Element('problems') from lint_common import find_all_py_source_files for py_src_file in find_all_py_source_files(): ignore_codes = 'E121,E123,E126,E226,E24,E704,W503,W504' ignore_codes += ',E203' indent_size = 4 cmd = [sys.executable, '-m', 'pycodestyle', py_src_file, ('--ignore=%s' % ignore_codes), ('--indent-size=%i' % indent_size), '--max-line-length=120'] print(('$ %s' % ' '.join(cmd))) sys.stdout.flush() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (stdout, _) = proc.communicate() problem_count = 0 for line in stdout.decode('utf8').splitlines(): m = re.match('^(.*):([0-9]+):([0-9]+): ([EW][0-9]+) (.+)$', line) assert m, ('unexpected line %r' % line) (fn_, line_nr, col_nr, warn_id, description) = m.groups() assert (fn_ == py_src_file), ('unexpected line %r' % line) (line_nr, col_nr) = (int(line_nr), int(col_nr)) description = ('%s: %s' % (warn_id, description)) prob = ElementTree.SubElement(root, 'problem') ElementTree.SubElement(prob, 'file').text = ('file://$PROJECT_DIR$/%s' % py_src_file) ElementTree.SubElement(prob, 'line').text = str(line_nr) ElementTree.SubElement(prob, 'offset').text = str(col_nr) ElementTree.SubElement(prob, 'problem_class', severity='WEAK WARNING', id=warn_id).text = description ElementTree.SubElement(prob, 'description').text = description problem_count += 1 if (proc.returncode != 0): assert (problem_count > 0), 'pycodestyle returned error but did not list any problems' et = ElementTree.ElementTree(root) et.write(('%s/Pep8CodeStyle.xml' % out_tmp_dir), encoding='UTF-8') fold_end() return out_tmp_dir
def report_inspect_xml(fn): '\n :param str fn:\n :return: list of (filename, line, problem_severity, inspect_class, description)\n :rtype: list[(str,int,str,str,str)]\n ' '\n <problems is_local_tool="true">\n <problem>\n <file>file://$PROJECT_DIR$/TFUtil.py</file>\n <line>1</line>\n <module>returnn</module>\n <entry_point TYPE="file" FQNAME="file://$PROJECT_DIR$/TFUtil.py" />\n <problem_class severity="WARNING" attribute_key="WARNING_ATTRIBUTES">Package requirements</problem_class>\n <description>Package requirements \'h5py\', \'theano==0.9\' are not satisfied</description>\n </problem>\n </problems>\n ' inspect_class = os.path.splitext(os.path.basename(fn))[0] root = ElementTree.parse(fn).getroot() assert isinstance(root, ElementTree.Element) assert (root.tag == 'problems') result = [] for problem in root.findall('./problem'): assert isinstance(problem, ElementTree.Element) assert (problem.tag == 'problem') filename = problem.find('./file').text.strip() if filename.startswith('file://$PROJECT_DIR$/'): filename = filename[len('file://$PROJECT_DIR$/'):] line = int(problem.find('./line').text.strip()) problem_severity = problem.find('./problem_class').attrib['severity'] description = problem.find('./description').text.strip() possible_false_positive = False if ((inspect_class == 'PyArgumentListInspection') and ("'d0' unfilled" in description)): possible_false_positive = True if ((inspect_class == 'PyArgumentListInspection') and ("'d1' unfilled" in description)): possible_false_positive = True if ((inspect_class == 'PyArgumentListInspection') and ("'self' unfilled" in description)): possible_false_positive = True if ((inspect_class == 'PyStringFormatInspection') and ('Unexpected type None' in description)): possible_false_positive = True if possible_false_positive: problem_severity = ('POSSIBLE-FALSE %s' % problem_severity) result.append((filename, line, problem_severity, inspect_class, description)) return result
def report_inspect_dir(inspect_xml_dir, inspect_class_blacklist=None, inspect_class_not_counted=None, ignore_count_for_files=()): '\n :param str inspect_xml_dir:\n :param set[str]|None inspect_class_blacklist:\n :param set[str]|None inspect_class_not_counted:\n :param set[str]|tuple[str]|None ignore_count_for_files:\n :return: count of reports\n :rtype: int\n ' if os.path.isfile(inspect_xml_dir): assert inspect_xml_dir.endswith('.xml') inspect_xml_files = [inspect_xml_dir] else: assert os.path.isdir(inspect_xml_dir) inspect_xml_files = list(glob((inspect_xml_dir + '/*.xml'))) assert inspect_xml_files inspections = [] for fn in inspect_xml_files: inspections.extend(report_inspect_xml(fn)) inspections.sort() inspections.append((None, None, None, None, None)) inspect_class_blacklist = set((inspect_class_blacklist or ())) inspect_class_not_counted = set((inspect_class_not_counted or ())) from lint_common import find_all_py_source_files returnn_py_source_files = set(find_all_py_source_files()) all_files = set() relevant_inspections_for_file = set() explicitly_ignored_files = ignore_count_for_files ignore_count_for_files = set(ignore_count_for_files) for (filename, line, problem_severity, inspect_class, description) in inspections: all_files.add(filename) if (filename not in returnn_py_source_files): continue if (inspect_class in inspect_class_blacklist): continue if (inspect_class in inspect_class_not_counted): continue if problem_severity.startswith('POSSIBLE-FALSE '): continue relevant_inspections_for_file.add(filename) for filename in all_files: if (filename not in relevant_inspections_for_file): ignore_count_for_files.add(filename) print('Reporting individual files. We skip all files which have no warnings at all.') color = better_exchook.Color() total_relevant_count = 0 file_count = None last_filename = None for (filename, line, problem_severity, inspect_class, description) in inspections: if (filename and (filename not in returnn_py_source_files)): continue if (inspect_class in inspect_class_blacklist): continue if (filename != last_filename): if last_filename: if (last_filename in explicitly_ignored_files): msg = color.color('This file is on the ignore list.', color=gray_color) elif (last_filename not in returnn_py_source_files): msg = color.color('This file is not part of the official RETURNN Python source code.', color=gray_color) elif (last_filename in ignore_count_for_files): msg = color.color('The inspection reports for this file are all non critical.', color=gray_color) else: msg = color.color('The inspection reports for this file are fatal!', color='red') print(msg) fold_end() if filename: file_msg = color.color(('File: %s' % filename), color=(gray_color if (filename in ignore_count_for_files) else 'red')) if github_env: fold_start(file_msg) else: fold_start(('inspect.%s' % filename)) print(file_msg) last_filename = filename file_count = 0 if (not filename): continue if ((filename in ignore_count_for_files) and (file_count >= 10)): if (file_count == 10): print('... (further warnings skipped)') file_count += 1 continue msg = ('%s:%i: %s %s: %s' % (filename, line, problem_severity, inspect_class, description)) msg_counted = True if (inspect_class in inspect_class_not_counted): msg_counted = False if problem_severity.startswith('POSSIBLE-FALSE '): msg_counted = False if msg_counted: print(color.color(msg, color='red')) if (filename not in ignore_count_for_files): total_relevant_count += 1 else: print(color.color(msg, color=gray_color)) file_count += 1 print('Total relevant inspection reports:', total_relevant_count) return total_relevant_count
def main(): '\n Main entry point for this script.\n ' arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--xml') arg_parser.add_argument('--pycharm') arg_parser.add_argument('--setup_pycharm_only', action='store_true') arg_parser.add_argument('--skip_setup_pycharm', action='store_true') arg_parser.add_argument('--skip_pycharm_inspect', action='store_true', help='only PEP8') arg_parser.add_argument('--files', nargs='*') args = arg_parser.parse_args() from lint_common import ignore_count_for_files inspect_kwargs = dict(inspect_class_blacklist={}, inspect_class_not_counted={'PyTypeCheckerInspection', 'SpellCheckingInspection', 'PyClassHasNoInitInspection', 'PyMethodMayBeStaticInspection'}, ignore_count_for_files=ignore_count_for_files) if args.xml: if (report_inspect_dir(args.xml, **inspect_kwargs) > 0): sys.exit(1) return if args.pycharm: pycharm_dir = args.pycharm check_pycharm_dir(pycharm_dir) else: pycharm_dir = install_pycharm() if ((not args.skip_setup_pycharm) and (not args.skip_pycharm_inspect)): setup_pycharm_python_interpreter(pycharm_dir=pycharm_dir) if args.setup_pycharm_only: return src_dir = prepare_src_dir(files=args.files) res_dir = run_inspect(pycharm_dir=pycharm_dir, src_dir=src_dir, skip_pycharm_inspect=args.skip_pycharm_inspect) if (report_inspect_dir(res_dir, **inspect_kwargs) > 0): sys.exit(1)
def setup(): '\n Some generic setup.\n ' print('travis_fold:start:script.install') for pkg in ['pylint', 'better-exchook']: if (not pip_check_is_installed(pkg)): pip_install(pkg) print('travis_fold:end:script.install')
def main(): '\n Main entry point.\n ' setup() from lint_common import ignore_count_for_files, find_all_py_source_files color = better_exchook.Color() num_relevant_files_with_errors = 0 for rel_filename in find_all_py_source_files(): print(('travis_fold:start:pylint.%s' % rel_filename)) extra_args = [] if (('/' in rel_filename) and (not rel_filename.startswith('returnn/'))): extra_args += ['--module-rgx=([a-z_][a-z0-9_\\-]*)$'] extra_args += ['--disable=wrong-import-position'] proc = subprocess.Popen((['pylint', rel_filename] + extra_args), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (stdout, _) = proc.communicate() stdout = stdout.decode('utf8') file_has_errors = (proc.returncode != 0) print(color.color(('File: %s' % rel_filename), color=('black' if ((rel_filename in ignore_count_for_files) or (not file_has_errors)) else 'red'))) if (('EXCEPTION' in stdout) and ('RecursionError' in stdout)): print('PyLint issue #1452 triggered. https://github.com/PyCQA/pylint/issues/1452') elif (rel_filename in ignore_count_for_files): print(stdout[:1000]) print('... (ignored further output; file is not relevant)') else: print(stdout) print('Return code:', proc.returncode) if file_has_errors: if (rel_filename in ignore_count_for_files): print('The inspection reports for this file are currently ignored.') else: print(color.color('The inspection reports for this file are fatal!', color='red')) num_relevant_files_with_errors += 1 print(('travis_fold:end:pylint.%s' % rel_filename)) if num_relevant_files_with_errors: print('Num relevant files with errors:', num_relevant_files_with_errors) sys.exit(1)
def setup(old_style=False, target_package_name='returnn'): '\n Setup env/sys.path such that `import returnn` works.\n\n :param bool old_style: make it an old-style setup\n :param str target_package_name:\n ' print('Setup for importing RETURNN as framework/package.') tmp_env_path_dir = tempfile.mkdtemp() print('Temp dir:', tmp_env_path_dir) if old_style: print('Old-style setup!') src_dir = _base_dir else: src_dir = ('%s/returnn' % _base_dir) os.symlink(src_dir, ('%s/%s' % (tmp_env_path_dir, target_package_name))) sys.path.insert(0, tmp_env_path_dir) print(('Import %s module/package.' % target_package_name)) if (target_package_name == 'returnn'): import returnn else: __import__(target_package_name) print('Setup better_exchook.') if (target_package_name == 'returnn'): if old_style: from returnn import better_exchook better_exchook.install() else: from returnn.util import better_exchook better_exchook.install() else: __import__(target_package_name).better_exchook.install()
def test_TaskSystem_Pickler(): from returnn.util.task_system import Pickler from io import BytesIO stream = BytesIO() pickler = Pickler(stream) obj = {'foo': 'bar'} pickler.dump(obj)
def test_old_style_import_crnn_TFUtil(): '\n This assumes that you use ``--returnn-package-name "crnn"`.\n ' import crnn.TFUtil print('TF:', crnn.TFUtil.tf_version_tuple())
def test_old_style_import_TFUtil(): import returnn.TFUtil print('TF:', returnn.TFUtil.tf_version_tuple())
@contextlib.contextmanager def tf_scope(): 'tf scope' with tf_compat.v1.Graph().as_default(), tf_compat.v1.Session().as_default() as session: (yield session)
def run_model(extern_data: TensorDict, get_model: rf.GetModelFunc, forward_step: rf.StepFunc, *, dyn_dim_max_sizes: Optional[Dict[(Dim, int)]]=None, dyn_dim_min_sizes: Optional[Dict[(Dim, int)]]=None, test_tensorflow: bool=True) -> TensorDict: 'run' print(f'* run_model with dyn_dim_max_sizes={dyn_dim_max_sizes!r}') extern_data.reset_content() tensor_dict_fill_random_numpy_(extern_data, dyn_dim_max_sizes=dyn_dim_max_sizes, dyn_dim_min_sizes=dyn_dim_min_sizes) print('** run with PyTorch backend') with rft.TorchBackend.random_journal_record() as random_journal: out_pt = _run_model_torch(extern_data, get_model, forward_step) _pad_mask_zeros(out_pt) out_pt_raw = out_pt.as_raw_tensor_dict(include_const_sizes=True) if (not test_tensorflow): return out_pt print('** run with TensorFlow-net-dict backend') with rfl.ReturnnLayersBackend.random_journal_replay(random_journal): out_tf = _run_model_net_dict_tf(extern_data, get_model, forward_step) _pad_mask_zeros(out_tf) out_tf_raw = out_tf.as_raw_tensor_dict(include_const_sizes=True) random_journal: RandomJournal assert random_journal.reached_end() print(out_pt, out_tf) assert (set(out_pt.data.keys()) == set(out_tf.data.keys())) for (k, v_pt) in out_pt.data.items(): v_tf = out_tf[k] assert (len(v_pt.dims) == len(v_tf.dims)) assert (v_pt.feature_dim_axis == v_tf.feature_dim_axis) for (d_pt, d_tf) in zip(v_pt.dims, v_tf.dims): _check_dim(d_pt, d_tf) if v_pt.dtype.startswith('int'): assert v_tf.dtype.startswith('int') else: assert (v_pt.dtype == v_tf.dtype) assert (bool(v_pt.sparse_dim) == bool(v_tf.sparse_dim)) if v_pt.sparse_dim: _check_dim(v_pt.sparse_dim, v_tf.sparse_dim) assert (set(out_pt_raw.keys()) == set(out_tf_raw.keys())) for (k, v_pt) in out_pt_raw.items(): v_tf = out_tf_raw[k] numpy.testing.assert_allclose(v_pt, v_tf, atol=1e-05, rtol=1e-05, err_msg=f'output {k!r} differs') return out_pt
def _run_model_torch(extern_data: TensorDict, get_model: rf.GetModelFunc, forward_step: rf.StepFunc) -> TensorDict: 'run' extern_data_raw = extern_data.as_raw_tensor_dict(expected_value_type=numpy.ndarray) rf.select_backend_torch() rf.set_random_seed(42) tensor_dict_numpy_to_torch_(extern_data) model = get_model(epoch=1, step=0) rf.init_forward_step_run_ctx(epoch=1, step=0) forward_step(model=model, extern_data=extern_data) outputs = rf.get_run_ctx().outputs assert outputs.data tensor_dict_torch_to_numpy_(outputs) extern_data.assign_from_raw_tensor_dict_(extern_data_raw) return outputs
def run_model_torch_train(extern_data: TensorDict, get_model: rf.GetModelFunc, train_step: rf.StepFunc, *, dyn_dim_max_sizes: Optional[Dict[(Dim, int)]]=None, dyn_dim_min_sizes: Optional[Dict[(Dim, int)]]=None) -> Dict[(str, float)]: 'run' rf.select_backend_torch() rf.set_random_seed(42) extern_data.reset_content() tensor_dict_fill_random_numpy_(extern_data, dyn_dim_max_sizes=dyn_dim_max_sizes, dyn_dim_min_sizes=dyn_dim_min_sizes) tensor_dict_numpy_to_torch_(extern_data) for v in extern_data.data.values(): v: Tensor v.raw_tensor.requires_grad = True model = get_model(epoch=1, step=0) rf.init_train_step_run_ctx(train_flag=True, step=0, epoch=1) train_step(model=model, extern_data=extern_data) total_loss = rf.get_run_ctx().total_loss() assert (isinstance(total_loss, Tensor) and (not total_loss.dims) and total_loss.raw_tensor.dtype.is_floating_point) total_loss_v = total_loss.raw_tensor.detach().numpy().item() print('total loss (for backprop):', total_loss_v) res = {'total_loss': total_loss} total_loss.raw_tensor.backward() for (k, loss) in rf.get_run_ctx().losses.items(): loss_v = loss.get_summed_loss().raw_tensor.detach().cpu().numpy().item() res[f'{k}:summed'] = loss_v print(f'loss (summed) {k!r}: {loss_v}') loss_v = loss.get_mean_loss().raw_tensor.detach().cpu().numpy().item() print(f'loss (mean) {k!r}: {loss_v}') res[f'{k}:mean'] = loss_v inv_norm_factor = loss.get_inv_norm_factor() if isinstance(inv_norm_factor, Tensor): inv_norm_factor = inv_norm_factor.raw_tensor.detach().sum().cpu().numpy().item() print(f'inv_norm_factor {k!r}: {inv_norm_factor}') res[f'{k}:inv_norm_factor'] = inv_norm_factor return res
def _run_model_net_dict_tf(extern_data: TensorDict, get_model: rf.GetModelFunc, forward_step: rf.StepFunc) -> TensorDict: 'run' extern_data_raw = extern_data.as_raw_tensor_dict(expected_value_type=numpy.ndarray) extern_data.reset_content() rf.select_backend_returnn_layers_tf() rf.set_random_seed(42) from returnn.tf.frontend_layers.config_entry_points import get_net_dict config = Config({'debug_runtime_sanity_checks': True, 'extern_data': extern_data, 'get_model': get_model, 'task': 'forward', 'forward_step': forward_step}) with tf_scope() as session, global_config_ctx(config): (net_dict, model) = get_net_dict(epoch=1, step=0) print('*** TF net dict:') pprint(net_dict) outputs_layers = rf.get_run_ctx().outputs print('*** outputs:', outputs_layers) net = TFNetwork(config=config, train_flag=False) net.construct_from_dict(net_dict) rf_params = {name.replace('.', '/'): p for (name, p) in model.named_parameters()} tf_params = {re.sub('/param:0$', '', p.name): p for p in net.get_params_list()} rf_params_not_in_tf = (set(rf_params.keys()) - set(tf_params.keys())) tf_params_not_in_rf = (set(tf_params.keys()) - set(rf_params.keys())) if (rf_params_not_in_tf or tf_params_not_in_rf): raise Exception(f'''params not equal: RF params not in TF: {rf_params_not_in_tf} TF params not in RF: {tf_params_not_in_rf}''') session.run(tf_compat.v1.global_variables_initializer()) outputs_tf = TensorDict() for (k, v) in outputs_layers.data.items(): v: Tensor[rfl.Layer] assert isinstance(v.raw_tensor, rfl.Layer) layer_name = v.raw_tensor.get_abs_name() layer = net.get_layer(layer_name) outputs_tf.data[k] = layer.output.copy() fetches = outputs_tf.as_raw_tensor_dict(expected_value_type=tf.Tensor) assert (set(extern_data.data.keys()) == set(net.extern_data.data.keys())) extern_data_tf_placeholders = net.extern_data.as_raw_tensor_dict(expected_value_type=tf.Tensor) assert (set(extern_data_tf_placeholders.keys()) == set(extern_data_raw.keys())) feed_dict = {extern_data_tf_placeholders[k]: v for (k, v) in extern_data_raw.items()} outputs_numpy_raw = session.run(fetches, feed_dict=feed_dict) def _make_numpy_array(x): if isinstance(x, numpy.ndarray): return x return numpy.array(x) outputs_numpy_raw = {k: _make_numpy_array(v) for (k, v) in outputs_numpy_raw.items()} outputs_numpy = outputs_tf.copy_template() outputs_numpy.reset_content() outputs_numpy.assign_from_raw_tensor_dict_(outputs_numpy_raw) extern_data.assign_from_raw_tensor_dict_(extern_data_raw) return outputs_numpy
def _dim_is_scalar_size(dim: Dim) -> bool: 'dim is scalar size' if (dim.size is not None): return True if dim.dyn_size_ext: return (dim.dyn_size_ext.dims == ()) return False
def _dim_scalar_size(dim: Dim) -> int: 'dim scalar size' if (dim.size is not None): return dim.size if dim.dyn_size_ext: assert (dim.dyn_size_ext.dims == ()) return dim.dyn_size_ext.raw_tensor raise Exception(f'dim {dim} has no known size')
def _pad_mask_zeros(x: Union[(TensorDict, Tensor, Dim)]): if isinstance(x, TensorDict): for v in x.data.values(): _pad_mask_zeros(v) return if isinstance(x, Dim): if x.dyn_size_ext: _pad_mask_zeros(x.dyn_size_ext) return assert isinstance(x, Tensor) for (i, d) in enumerate(x.dims): _pad_mask_zeros(d) if d.need_masking(): mask = x.get_sequence_mask_tensor(i) if (not set(mask.dims).issubset(set(x.dims))): print(f'Warning: cannot apply mask {mask} for dim {d} on tensor {x}.') continue mask_raw = mask.copy_compatible_to_dims_raw(x.dims) x.raw_tensor = numpy.where(mask_raw, x.raw_tensor, numpy.zeros((), dtype=x.raw_tensor.dtype))
def _check_dim(d_pt: Dim, d_tf: Dim): assert (isinstance(d_pt, Dim) and isinstance(d_tf, Dim)) assert (d_pt.size == d_tf.size) assert (_dim_is_scalar_size(d_pt) == _dim_is_scalar_size(d_tf)) if (not _dim_is_scalar_size(d_pt)): assert (d_pt.dyn_size_ext and d_tf.dyn_size_ext) assert (d_pt.dyn_size_ext.dims == d_tf.dyn_size_ext.dims)
def _walk_dims(start: Dim, *, func=print): visited = set() queue = [((), start)] while queue: (path, dim) = queue.pop(0) path: Tuple[(Any, ...)] dim: Dim if (id(dim) in visited): continue visited.add(id(dim)) func(path, dim) dim_extra = dim._extra if dim_extra: if dim_extra.cache_dim_math: for (k, v) in dim_extra.cache_dim_math.items(): k: Any queue.append(((path + ('_extra.cache_dim_math', k)), v)) if dim_extra.same_as: queue.append(((path + ('_extra.same_as',)), dim_extra.same_as)) if dim_extra.derived_from_op: for (i, v) in enumerate(dim_extra.derived_from_op.inputs): queue.append(((path + ('_extra.derived_from_op.inputs', i)), v)) for (k, v) in dim_extra.same_for_batch_ctx.items(): k: Any queue.append(((path + ('_extra.same_for_batch_ctx', k)), v))
def test_old_format(): config = Config() config.load_file(StringIO('\n # comment\n num_inputs 3\n hidden_type forward,lstm\n ')) assert_true(config.has('num_inputs')) assert_true(config.has('hidden_type')) assert_equal(config.int('num_inputs', (- 1)), 3) assert_equal(config.value('hidden_type', 'x'), 'forward,lstm') assert_equal(config.value('hidden_type', 'x', index=0), 'forward') assert_equal(config.value('hidden_type', 'x', index=1), 'lstm') assert_equal(config.list('hidden_type', ['x']), ['forward', 'lstm']) assert_false(config.is_typed('num_inputs')) assert_false(config.is_typed('hidden_type'))
def test_json_format(): config = Config() config.load_file(StringIO('\n {\n // comment\n "num_inputs": 3,\n "hidden_type": ["forward", "lstm"]\n }\n ')) assert_true(config.has('num_inputs')) assert_true(config.has('hidden_type')) assert_equal(config.int('num_inputs', (- 1)), 3) assert_equal(config.value('hidden_type', 'x'), 'forward,lstm') assert_equal(config.value('hidden_type', 'x', index=0), 'forward') assert_equal(config.value('hidden_type', 'x', index=1), 'lstm') assert_equal(config.list('hidden_type', ['x']), ['forward', 'lstm']) assert_true(config.is_typed('num_inputs')) assert_true(config.is_typed('hidden_type')) assert_is_instance(config.typed_value('num_inputs'), int) assert_is_instance(config.typed_value('hidden_type'), list) assert_equal(config.typed_value('hidden_type'), ['forward', 'lstm'])
def test_py_config(): config = Config() config.load_file(StringIO('#!rnn.py\n# comment\nnum_inputs = 3\nhidden_type = ["forward", "lstm"]\n ')) assert_true(config.has('num_inputs')) assert_true(config.has('hidden_type')) assert_equal(config.int('num_inputs', (- 1)), 3) assert_equal(config.value('hidden_type', 'x'), 'forward,lstm') assert_equal(config.value('hidden_type', 'x', index=0), 'forward') assert_equal(config.value('hidden_type', 'x', index=1), 'lstm') assert_equal(config.list('hidden_type', ['x']), ['forward', 'lstm']) assert_true(config.is_typed('num_inputs')) assert_true(config.is_typed('hidden_type')) assert_is_instance(config.typed_value('num_inputs'), int) assert_is_instance(config.typed_value('hidden_type'), list) assert_equal(config.typed_value('hidden_type'), ['forward', 'lstm'])
def test_rnn_init_config_py_global_var(): import returnn.__main__ as rnn import tempfile with tempfile.NamedTemporaryFile(mode='w', suffix='.config', prefix='test_rnn_initConfig') as cfgfile: cfgfile.write('#!rnn.py\ntask = config.value("task", "train")\n\ntest_value = 42\n\ndef test_func():\n return task\n\n ') cfgfile.flush() rnn.init_config(command_line_options=[cfgfile.name, '--task', 'search']) assert isinstance(rnn.config, Config) rnn.config.typed_dict.pop('__builtins__', None) pprint(rnn.config.dict) pprint(rnn.config.typed_dict) assert rnn.config.has('task') assert rnn.config.has('test_value') assert rnn.config.has('test_func') assert_equal(rnn.config.value('task', None), 'search') assert rnn.config.is_typed('test_value') assert_equal(rnn.config.typed_value('test_value'), 42) assert rnn.config.is_typed('test_func') assert rnn.config.is_typed('task') test_func = rnn.config.typed_dict['test_func'] assert callable(test_func) assert_equal(test_func(), 'search')
def test_rnn_init_config_py_cmd_type(): import returnn.__main__ as rnn import tempfile with tempfile.NamedTemporaryFile(mode='w', suffix='.config', prefix='test_rnn_initConfig') as cfgfile: cfgfile.write("#!rnn.py\nmax_seq_length = {'bpe': 75}\n\ndef test_func():\n return max_seq_length\n\n ") cfgfile.flush() rnn.init_config(command_line_options=[cfgfile.name, '++max_seq_length', '0']) assert isinstance(rnn.config, Config) assert rnn.config.has('max_seq_length') assert rnn.config.has('test_func') assert rnn.config.is_typed('max_seq_length') assert rnn.config.is_typed('test_func') test_func = rnn.config.typed_dict['test_func'] assert callable(test_func) assert_equal(test_func(), 0)
def test_config_py_ext(): import tempfile with tempfile.NamedTemporaryFile(mode='w', suffix='.py', prefix='test_rnn_initConfig') as cfgfile: cfgfile.write('\ndef test_func():\n return config.value("task", "train")\n ') cfgfile.flush() config = Config() config.load_file(cfgfile.name) config.typed_dict.pop('__builtins__', None) pprint(config.dict) pprint(config.typed_dict) assert config.has('test_func') assert config.is_typed('test_func') test_func = config.typed_dict['test_func'] assert callable(test_func) assert_equal(test_func(), 'train') config.set('task', 'search') assert_equal(test_func(), 'search')
def test_config_py_old_returnn_imports(): import tempfile with tempfile.NamedTemporaryFile(mode='w', suffix='.py', prefix='test_rnn_initConfig') as cfgfile: cfgfile.write('\n# These are some common imports found in older config files.\nfrom Pretrain import WrapEpochValue\nfrom TFUtil import where_bc\nimport TFUtil\nimport returnn.TFUtil\n\nimport TFHorovod\nhvd = TFHorovod.get_ctx(config=config) # should return None if no Horovod context\n\nfrom returnn.Util import describe_returnn_version\nreturnn_version = describe_returnn_version()\n\ntf_version_tuple = returnn.TFUtil.tf_version_tuple()\n\n ') cfgfile.flush() config = Config() config.load_file(cfgfile.name) config.typed_dict.pop('__builtins__', None) pprint(config.typed_dict) import returnn.util.basic as util import returnn.tf.util.basic as tf_util assert (config.typed_dict['where_bc'] is tf_util.where_bc) assert (config.typed_dict['TFUtil'].where_bc is tf_util.where_bc) assert (config.typed_dict['hvd'] is None) assert (config.typed_dict['tf_version_tuple'] == tf_util.tf_version_tuple()) assert (config.typed_dict['returnn_version'] == util.describe_returnn_version()) assert (config.typed_dict['returnn'].TFUtil is tf_util)
def test_pickle_config(): import pickle import io config = Config() config.load_file(StringIO(textwrap.dedent(' #!returnn.py\n\n def my_custom_func():\n return 42\n\n class CustomClass:\n x = 43\n\n def __init__(self):\n super().__init__()\n CustomClass.x = 44\n\n def get_value(self):\n return CustomClass.x\n '))) f = config.typed_dict['my_custom_func'] obj = config.typed_dict['CustomClass']() sio = io.BytesIO() p = pickle._Pickler(sio) with global_config_ctx(config): p.dump(config) config_ = pickle.loads(sio.getvalue()) f_ = config_.typed_dict['my_custom_func'] assert (f is not f_) assert (f() == f_() == 42) obj_ = config_.typed_dict['CustomClass']() assert (type(obj) is not type(obj_)) assert (obj.get_value() == obj_.get_value() == 44)
def test_config_pickle_function(): import pickle config = Config() config.load_file(StringIO(textwrap.dedent(' #!returnn.py\n\n def my_custom_func():\n return 42\n '))) with global_config_ctx(config): f = config.typed_dict['my_custom_func'] f_ = pickle.loads(pickle.dumps(f)) assert (f_ is f) assert (f_() == 42)
def _config_pickle_proc_main(config, f): assert isinstance(config, Config) assert (get_global_config() is config) assert callable(f) f()
def test_config_pickle_function_multi_proc(): import multiprocessing _mp = multiprocessing.get_context('spawn') config = Config() config.load_file(StringIO(textwrap.dedent(' #!returnn.py\n\n def my_custom_func():\n import sys\n sys.exit(42)\n '))) with global_config_ctx(config): f = config.typed_dict['my_custom_func'] proc = _mp.Process(target=_config_pickle_proc_main, args=(config, f)) proc.start() proc.join() assert (proc.exitcode == 42)
def dummy_iter_dataset(dataset: Dataset) -> List[DatasetSeq]: '\n :param Dataset dataset:\n :return: seqs\n ' dataset.init_seq_order(epoch=1) data_keys = dataset.get_data_keys() seq_idx = 0 seqs = [] while dataset.is_less_than_num_seqs(seq_idx): dataset.load_seqs(seq_idx, (seq_idx + 1)) data = {} for key in data_keys: data[key] = dataset.get_data(seq_idx=seq_idx, key=key) tag = dataset.get_tag(seq_idx) seq = DatasetSeq(seq_idx=seq_idx, seq_tag=tag, features=data) seqs.append(seq) seq_idx += 1 print(('Iterated through %r, num seqs %i' % (dataset, seq_idx))) return seqs
def compare_dataset_seqs(seqs1: List[DatasetSeq], seqs2: List[DatasetSeq]): '\n :param list[DatasetSeq] seqs1:\n :param list[DatasetSeq] seqs2:\n ' assert (len(seqs1) == len(seqs2)) for (i, (seq1, seq2)) in enumerate(zip(seqs1, seqs2)): assert (seq1.seq_idx == seq2.seq_idx == i) assert (seq1.seq_tag == seq2.seq_tag), f'seq1 tag {seq1.seq_tag!r} != seq2 tag {seq2.seq_tag!r} for seq idx {i}' assert (set(seq1.features.keys()) == set(seq2.features.keys())) for key in seq1.features.keys(): assert (seq1.features[key].shape == seq2.features[key].shape) assert (seq1.features[key].dtype == seq2.features[key].dtype) assert (seq1.features[key] == seq2.features[key]).all()
def test_Task12AXDataset_deepcopy(): from copy import deepcopy dataset = Task12AXDataset(num_seqs=10) dataset = deepcopy(dataset) dataset.init_seq_order(1) n = dataset.num_seqs for i in range(n): dataset.load_seqs(i, (i + 1)) targets = dataset.get_data(i, 'classes') print(targets) assert (not dataset.is_less_than_num_seqs(n))
def test_Task12AXDataset_inf(): dataset = Task12AXDataset(num_seqs=float('inf')) dataset.init_seq_order(1) n = 10 for i in range(n): dataset.load_seqs(i, (i + 1)) targets = dataset.get_data(i, 'classes') print(targets) assert dataset.is_less_than_num_seqs(n)
def test_Task12AXDataset_random(): dataset = Task12AXDataset(num_seqs=10, seq_ordering='random') dataset.init_seq_order(1) n = dataset.num_seqs for i in range(n): dataset.load_seqs(i, (i + 1)) targets = dataset.get_data(i, 'classes') print(targets) assert (not dataset.is_less_than_num_seqs(n))
def test_generate_batches(): dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=20) dataset.init_seq_order(1) batch_gen = dataset.generate_batches(recurrent_net=False, max_seqs=2, batch_size=5) while batch_gen.has_more(): batch_gen.peek_next_n(1) batch_gen.advance(1)
def test_generate_batches_recurrent(): dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=20) dataset.init_seq_order(1) batch_gen = dataset.generate_batches(recurrent_net=True, max_seqs=2, batch_size=5) while batch_gen.has_more(): batch_gen.peek_next_n(1) batch_gen.advance(1)
def test_iterate_seqs_no_chunking_1(): dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=2, seq_len=11) dataset.chunk_step = 0 dataset.chunk_size = 0 dataset.init_seq_order(1) seqs = list(dataset.iterate_seqs()) assert_equal(len(seqs), 2) assert_equal(seqs[0], (0, 0, 11)) assert_equal(seqs[1], (1, 0, 11))
def test_iterate_seqs_chunking_1(): dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=2, seq_len=11) dataset.chunk_step = 5 dataset.chunk_size = 10 dataset.init_seq_order(1) seqs = list(dataset.iterate_seqs()) for s in seqs: print(s) assert_equal(len(seqs), 6) assert_equal(seqs[0], (0, 0, 10)) assert_equal(seqs[1], (0, 5, 11)) assert_equal(seqs[2], (0, 10, 11)) assert_equal(seqs[3], (1, 0, 10)) assert_equal(seqs[4], (1, 5, 11)) assert_equal(seqs[5], (1, 10, 11))
def test_iterate_seqs_chunking_varying_sequence_length(): dataset = DummyDatasetMultipleSequenceLength(input_dim=2, output_dim=3, num_seqs=2, seq_len={'data': 24, 'classes': 12}) dataset.chunk_size = {'data': 12, 'classes': 6} dataset.chunk_step = {'data': 6, 'classes': 3} dataset.init_seq_order(1) seqs = list(dataset.iterate_seqs()) for s in seqs: print(s) assert_equal(len(seqs), 8) assert_equal(seqs[0], (0, NumbersDict({'data': 0, 'classes': 0}), NumbersDict({'data': 12, 'classes': 6}))) assert_equal(seqs[1], (0, NumbersDict({'data': 6, 'classes': 3}), NumbersDict({'data': 18, 'classes': 9}))) assert_equal(seqs[2], (0, NumbersDict({'data': 12, 'classes': 6}), NumbersDict({'data': 24, 'classes': 12}))) assert_equal(seqs[3], (0, NumbersDict({'data': 18, 'classes': 9}), NumbersDict({'data': 24, 'classes': 12}))) assert_equal(seqs[4], (1, NumbersDict({'data': 0, 'classes': 0}), NumbersDict({'data': 12, 'classes': 6}))) assert_equal(seqs[5], (1, NumbersDict({'data': 6, 'classes': 3}), NumbersDict({'data': 18, 'classes': 9}))) assert_equal(seqs[6], (1, NumbersDict({'data': 12, 'classes': 6}), NumbersDict({'data': 24, 'classes': 12}))) assert_equal(seqs[7], (1, NumbersDict({'data': 18, 'classes': 9}), NumbersDict({'data': 24, 'classes': 12})))
def test_iterate_seqs_custom_chunking(): default_key = 'data' chunk_step = 5 chunk_size = 10 def _custom_chunking_func(dataset, seq_idx_start, **_kwargs): assert isinstance(dataset, Dataset) seq_idx = seq_idx_start while dataset.is_less_than_num_seqs(seq_idx): length = dataset.get_seq_length(seq_idx) t = NumbersDict.constant_like(0, numbers_dict=length) while (length[default_key] > t[default_key]): chunk_start = NumbersDict(t) chunk_end = NumbersDict.min([(t + chunk_size), length]) (yield (seq_idx, chunk_start, chunk_end)) t += chunk_step seq_idx += 1 dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=2, seq_len=11) dataset.custom_chunking_func = _custom_chunking_func dataset.init_seq_order(1) seqs = list(dataset.iterate_seqs()) for s in seqs: print(s) assert_equal(len(seqs), 6) assert_equal(seqs[0], (0, 0, 10)) assert_equal(seqs[1], (0, 5, 11)) assert_equal(seqs[2], (0, 10, 11)) assert_equal(seqs[3], (1, 0, 10)) assert_equal(seqs[4], (1, 5, 11)) assert_equal(seqs[5], (1, 10, 11))
def test_batches_recurrent_1(): dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=2, seq_len=11) dataset.init_seq_order(1) dataset.chunk_size = 10 dataset.chunk_step = 5 batch_gen = dataset.generate_batches(recurrent_net=True, max_seqs=1, batch_size=20) all_batches = [] ' :type: list[Batch] ' while batch_gen.has_more(): (batch,) = batch_gen.peek_next_n(1) assert_is_instance(batch, Batch) print('batch:', batch) print('batch seqs:', batch.seqs) all_batches.append(batch) batch_gen.advance(1) assert_equal(len(all_batches), 6) assert_equal(all_batches[0].start_seq, 0) assert_equal(all_batches[0].end_seq, 1) assert_equal(len(all_batches[0].seqs), 1) assert_equal(all_batches[0].seqs[0].seq_idx, 0) assert_equal(all_batches[0].seqs[0].seq_start_frame, 0) assert_equal(all_batches[0].seqs[0].seq_end_frame, 10) assert_equal(all_batches[0].seqs[0].frame_length, 10) assert_equal(all_batches[0].seqs[0].batch_slice, 0) assert_equal(all_batches[0].seqs[0].batch_frame_offset, 0) assert_equal(all_batches[1].start_seq, 0) assert_equal(all_batches[1].end_seq, 1) assert_equal(len(all_batches[1].seqs), 1) assert_equal(all_batches[1].seqs[0].seq_idx, 0) assert_equal(all_batches[1].seqs[0].seq_start_frame, 5) assert_equal(all_batches[1].seqs[0].seq_end_frame, 11) assert_equal(all_batches[1].seqs[0].frame_length, 6) assert_equal(all_batches[1].seqs[0].batch_slice, 0) assert_equal(all_batches[1].seqs[0].batch_frame_offset, 0) assert_equal(all_batches[2].start_seq, 0) assert_equal(all_batches[2].end_seq, 1) assert_equal(len(all_batches[2].seqs), 1) assert_equal(all_batches[2].seqs[0].seq_idx, 0) assert_equal(all_batches[2].seqs[0].seq_start_frame, 10) assert_equal(all_batches[2].seqs[0].seq_end_frame, 11) assert_equal(all_batches[2].seqs[0].frame_length, 1) assert_equal(all_batches[2].seqs[0].batch_slice, 0) assert_equal(all_batches[2].seqs[0].batch_frame_offset, 0) assert_equal(all_batches[3].start_seq, 1) assert_equal(all_batches[3].end_seq, 2) assert_equal(len(all_batches[3].seqs), 1) assert_equal(all_batches[3].seqs[0].seq_idx, 1) assert_equal(all_batches[3].seqs[0].seq_start_frame, 0) assert_equal(all_batches[3].seqs[0].seq_end_frame, 10) assert_equal(all_batches[3].seqs[0].frame_length, 10) assert_equal(all_batches[3].seqs[0].batch_slice, 0) assert_equal(all_batches[3].seqs[0].batch_frame_offset, 0)
def test_batches_non_recurrent_1(): dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=2, seq_len=11) dataset.init_seq_order(1) batch_gen = dataset.generate_batches(recurrent_net=False, max_seqs=2, batch_size=5) all_batches = [] while batch_gen.has_more(): (batch,) = batch_gen.peek_next_n(1) assert_is_instance(batch, Batch) print('batch:', batch) print('batch seqs:', batch.seqs) all_batches.append(batch) batch_gen.advance(1) assert_equal(len(all_batches), 5) assert_equal(all_batches[0].start_seq, 0) assert_equal(all_batches[0].end_seq, 1) assert_equal(len(all_batches[0].seqs), 1) assert_equal(all_batches[0].seqs[0].seq_idx, 0) assert_equal(all_batches[0].seqs[0].seq_start_frame, 0) assert_equal(all_batches[0].seqs[0].seq_end_frame, 5) assert_equal(all_batches[0].seqs[0].frame_length, 5) assert_equal(all_batches[0].seqs[0].batch_slice, 0) assert_equal(all_batches[0].seqs[0].batch_frame_offset, 0) assert_equal(all_batches[1].start_seq, 0) assert_equal(all_batches[1].end_seq, 1) assert_equal(len(all_batches[1].seqs), 1) assert_equal(all_batches[1].seqs[0].seq_idx, 0) assert_equal(all_batches[1].seqs[0].seq_start_frame, 5) assert_equal(all_batches[1].seqs[0].seq_end_frame, 10) assert_equal(all_batches[1].seqs[0].frame_length, 5) assert_equal(all_batches[1].seqs[0].batch_slice, 0) assert_equal(all_batches[1].seqs[0].batch_frame_offset, 0) assert_equal(all_batches[2].start_seq, 0) assert_equal(all_batches[2].end_seq, 2) assert_equal(len(all_batches[2].seqs), 2) assert_equal(all_batches[2].seqs[0].seq_idx, 0) assert_equal(all_batches[2].seqs[0].seq_start_frame, 10) assert_equal(all_batches[2].seqs[0].seq_end_frame, 11) assert_equal(all_batches[2].seqs[0].frame_length, 1) assert_equal(all_batches[2].seqs[0].batch_slice, 0) assert_equal(all_batches[2].seqs[0].batch_frame_offset, 0) assert_equal(all_batches[2].seqs[1].seq_idx, 1) assert_equal(all_batches[2].seqs[1].seq_start_frame, 0) assert_equal(all_batches[2].seqs[1].seq_end_frame, 4) assert_equal(all_batches[2].seqs[1].frame_length, 4) assert_equal(all_batches[2].seqs[1].batch_slice, 0) assert_equal(all_batches[2].seqs[1].batch_frame_offset, 1) assert_equal(all_batches[3].start_seq, 1) assert_equal(all_batches[3].end_seq, 2) assert_equal(len(all_batches[3].seqs), 1) assert_equal(all_batches[3].seqs[0].seq_idx, 1) assert_equal(all_batches[3].seqs[0].seq_start_frame, 4) assert_equal(all_batches[3].seqs[0].seq_end_frame, 9) assert_equal(all_batches[3].seqs[0].frame_length, 5) assert_equal(all_batches[3].seqs[0].batch_slice, 0) assert_equal(all_batches[3].seqs[0].batch_frame_offset, 0) assert_equal(all_batches[4].start_seq, 1) assert_equal(all_batches[4].end_seq, 2) assert_equal(len(all_batches[4].seqs), 1) assert_equal(all_batches[4].seqs[0].seq_idx, 1) assert_equal(all_batches[4].seqs[0].seq_start_frame, 9) assert_equal(all_batches[4].seqs[0].seq_end_frame, 11) assert_equal(all_batches[4].seqs[0].frame_length, 2) assert_equal(all_batches[4].seqs[0].batch_slice, 0) assert_equal(all_batches[4].seqs[0].batch_frame_offset, 0)
def test_batches_context_window(): context_window = 2 ctx_lr = (context_window - 1) ctx_left = (ctx_lr // 2) ctx_right = (ctx_lr - ctx_left) dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=1, seq_len=11, context_window=context_window) dataset.init_seq_order(1) dataset.chunk_size = 5 dataset.chunk_step = 5 batch_gen = dataset.generate_batches(recurrent_net=True, max_seqs=1, batch_size=20) all_batches = [] while batch_gen.has_more(): (batch,) = batch_gen.peek_next_n(1) assert_is_instance(batch, Batch) print('batch:', batch) print('batch seqs:', batch.seqs) all_batches.append(batch) batch_gen.advance(1) assert_equal(len(all_batches), 3) (b0, b1, b2) = all_batches assert isinstance(b0, Batch) assert isinstance(b1, Batch) assert isinstance(b2, Batch) assert_equal(b0.start_seq, 0) assert_equal(b0.end_seq, 1) assert_equal(len(b0.seqs), 1) assert_equal(b0.seqs[0].seq_idx, 0) assert_equal(b0.seqs[0].seq_start_frame['classes'], 0) assert_equal(b0.seqs[0].seq_end_frame['classes'], 5) assert_equal(b0.seqs[0].frame_length['classes'], 5) assert_equal(b0.seqs[0].seq_start_frame['data'], (0 - ctx_left)) assert_equal(b0.seqs[0].seq_end_frame['data'], (5 + ctx_right)) assert_equal(b0.seqs[0].frame_length['data'], (5 + ctx_lr)) assert_equal(b0.seqs[0].batch_slice, 0) assert_equal(b0.seqs[0].batch_frame_offset, 0) assert_equal(b1.start_seq, 0) assert_equal(b1.end_seq, 1) assert_equal(len(b1.seqs), 1) assert_equal(b1.seqs[0].seq_idx, 0) assert_equal(b1.seqs[0].seq_start_frame['classes'], 5) assert_equal(b1.seqs[0].seq_end_frame['classes'], 10) assert_equal(b1.seqs[0].frame_length['classes'], 5) assert_equal(b1.seqs[0].seq_start_frame['data'], (5 - ctx_left)) assert_equal(b1.seqs[0].seq_end_frame['data'], (10 + ctx_right)) assert_equal(b1.seqs[0].frame_length['data'], (5 + ctx_lr)) assert_equal(b1.seqs[0].batch_slice, 0) assert_equal(b1.seqs[0].batch_frame_offset, 0) assert_equal(b2.start_seq, 0) assert_equal(b2.end_seq, 1) assert_equal(len(b2.seqs), 1) assert_equal(b2.seqs[0].seq_idx, 0) assert_equal(b2.seqs[0].seq_start_frame['classes'], 10) assert_equal(b2.seqs[0].seq_end_frame['classes'], 11) assert_equal(b2.seqs[0].frame_length['classes'], 1) assert_equal(b2.seqs[0].seq_start_frame['data'], (10 - ctx_left)) assert_equal(b2.seqs[0].seq_end_frame['data'], (11 + ctx_right)) assert_equal(b2.seqs[0].frame_length['data'], (1 + ctx_lr)) assert_equal(b2.seqs[0].batch_slice, 0) assert_equal(b2.seqs[0].batch_frame_offset, 0)
def test_task12ax_window(): from returnn.datasets.generating import Task12AXDataset window = 3 dataset_kwargs = dict(num_seqs=10) dataset1 = Task12AXDataset(**dataset_kwargs) dataset2 = Task12AXDataset(window=window, **dataset_kwargs) input_dim = dataset1.num_inputs dataset1.initialize() dataset2.initialize() dataset1.init_seq_order(epoch=1) dataset2.init_seq_order(epoch=1) dataset1.load_seqs(0, 1) dataset2.load_seqs(0, 1) assert_equal(dataset1.get_data_dim('data'), input_dim) assert_equal(dataset2.get_data_dim('data'), (input_dim * window)) data1 = dataset1.get_data(0, 'data') data2 = dataset2.get_data(0, 'data') seq_len = data1.shape[0] assert_equal(data1.shape, (seq_len, input_dim)) assert_equal(data2.shape, (seq_len, (window * input_dim))) data2a = data2.reshape(seq_len, window, input_dim) print('data1:') print(data1) print('data2:') print(data2) print('data1[0]:') print(data1[0]) print('data2[0]:') print(data2[0]) print('data2a[0,0]:') print(data2a[(0, 0)]) assert_equal(list(data2a[(0, 0)]), ([0] * input_dim)) assert_equal(list(data2a[(0, 1)]), list(data1[0])) assert_equal(list(data2a[(0, 2)]), list(data1[1])) assert_equal(list(data2a[(1, 0)]), list(data1[0])) assert_equal(list(data2a[(1, 1)]), list(data1[1])) assert_equal(list(data2a[(1, 2)]), list(data1[2])) assert_equal(list(data2a[((- 1), 2)]), ([0] * input_dim))
def test_get_seq_order(): dataset = Dataset() num_seqs = 30 def get_seq_len(i): return ((i ** 2) % 17) for seq_ordering in ['default', 'default_every_n:5', 'sorted', 'sorted_reverse', 'random:3', 'laplace:3', 'laplace:.10', 'sort_bin_shuffle:3', 'sort_bin_shuffle_x2:.10']: dataset.seq_ordering = seq_ordering dataset.partition_epoch = 1 epoch = 3 seq_index = dataset.get_seq_order_for_epoch(epoch, num_seqs, get_seq_len) assert isinstance(seq_index, (list, range, numpy.ndarray)) assert (len(set(seq_index)) == num_seqs) partition_epoch = 4 dataset.partition_epoch = partition_epoch all_partitions_seq_index = [] for epoch in range(1, (partition_epoch + 1)): partition_seq_index = dataset.get_seq_order_for_epoch(epoch, num_seqs, get_seq_len) all_partitions_seq_index += list(partition_seq_index) assert (set(all_partitions_seq_index) == set(seq_index))
@contextlib.contextmanager def create_ogg_zip_txt_only_dataset_opts(*, text: str='hello world', seq_tag: str='sequence0.wav'): 'create OggZipDataset' import zipfile with tempfile.NamedTemporaryFile(suffix='.zip') as tmp_zip_file, tempfile.NamedTemporaryFile(suffix='.txt') as tmp_vocab_file: with zipfile.ZipFile(tmp_zip_file.name, 'w') as zip_file: zip_file.writestr((os.path.basename(tmp_zip_file.name)[:(- 4)] + '.txt'), repr([{'text': text, 'duration': 2.3, 'file': seq_tag}])) vocab = {'@': 2, ' ': 1, '.': 0} vocab.update({chr(i): ((i - ord('a')) + 3) for i in range(ord('a'), (ord('z') + 1))}) tmp_vocab_file.write(repr(vocab).encode('utf8')) tmp_vocab_file.flush() (yield {'class': 'OggZipDataset', 'path': tmp_zip_file.name, 'audio': None, 'targets': {'class': 'CharacterTargets', 'vocab_file': tmp_vocab_file.name, 'seq_postfix': [0]}})
@contextlib.contextmanager def create_ogg_zip_txt_only_dataset(*, text: str='hello world', seq_tag: str='sequence0.wav', num_seqs: int=1): 'create OggZipDataset' from returnn.datasets.audio import OggZipDataset with create_ogg_zip_txt_only_dataset_opts(text=text, seq_tag=seq_tag) as opts: dataset = init_dataset(opts) assert isinstance(dataset, OggZipDataset) (yield dataset)
@contextlib.contextmanager def create_ogg_zip_txt_only_dataset_mult_seqs(*, seed: int=1, num_seqs: int=100, max_seq_len: int=100): 'create OggZipDataset' import zipfile from returnn.datasets.audio import OggZipDataset rnd = numpy.random.RandomState(seed) with tempfile.NamedTemporaryFile(suffix='.zip') as tmp_zip_file, tempfile.NamedTemporaryFile(suffix='.txt') as tmp_vocab_file: vocab = {'@': 2, ' ': 1, '.': 0} vocab.update({chr(i): ((i - ord('a')) + 3) for i in range(ord('a'), (ord('z') + 1))}) tmp_vocab_file.write(repr(vocab).encode('utf8')) tmp_vocab_file.flush() seqs = [] for i in range(num_seqs): text = ''.join((rnd.choice(list(vocab.keys())) for _ in range(rnd.randint(1, (max_seq_len + 1))))) seqs.append({'text': text, 'duration': rnd.uniform(1.0, 5.0), 'file': f'seq{i}.wav'}) with zipfile.ZipFile(tmp_zip_file.name, 'w') as zip_file: zip_file.writestr((os.path.basename(tmp_zip_file.name)[:(- 4)] + '.txt'), repr(seqs)) opts = {'class': 'OggZipDataset', 'path': tmp_zip_file.name, 'audio': None, 'targets': {'class': 'CharacterTargets', 'vocab_file': tmp_vocab_file.name, 'seq_postfix': []}} dataset = init_dataset(opts) assert isinstance(dataset, OggZipDataset) (yield dataset)
def test_OggZipDataset(): from returnn.datasets.audio import OggZipDataset _demo_txt = 'some utterance text' with create_ogg_zip_txt_only_dataset(text=_demo_txt) as dataset: assert isinstance(dataset, OggZipDataset) assert dataset.have_seqs() dataset.init_seq_order(epoch=1) dataset.load_seqs(0, 1) raw = dataset.get_data(0, 'raw') orth = dataset.get_data(0, 'orth') classes = dataset.get_data(0, 'classes') print('raw:', raw) print('orth:', orth) print('classes:', classes) assert (isinstance(raw, numpy.ndarray) and raw.dtype.name.startswith('str') and (raw.shape == ())) raw_ = raw.item() assert (isinstance(raw_, str) and (raw_ == _demo_txt)) assert (isinstance(orth, numpy.ndarray) and (orth.dtype == numpy.uint8) and (orth.ndim == 1)) orth_ = orth.tostring() assert (orth_.decode('utf8') == _demo_txt) assert (isinstance(classes, numpy.ndarray) and (classes.dtype == numpy.int32) and (classes.ndim == 1)) classes_ = ''.join([dataset.targets.id_to_label(c) for c in classes]) assert (classes_ == (_demo_txt + '.'))
def test_MetaDataset(): _demo_txt = 'some utterance text' with create_ogg_zip_txt_only_dataset_opts(text=_demo_txt) as sub_ds_opts: meta_ds_opts = {'class': 'MetaDataset', 'datasets': {'sub': sub_ds_opts}, 'data_map': {'classes': ('sub', 'classes')}, 'seq_order_control_dataset': 'sub'} dataset = init_dataset(meta_ds_opts) assert dataset.have_seqs() dataset.init_seq_order(epoch=1) dataset.load_seqs(0, 1) classes = dataset.get_data(0, 'classes') print('classes:', classes) assert (isinstance(classes, numpy.ndarray) and (classes.dtype == numpy.int32) and (classes.ndim == 1)) assert (len(classes) == (len(_demo_txt) + 1))
def test_MapDatasetWrapper(): from returnn.datasets.map import MapDatasetBase class _MyCustomMapDataset(MapDatasetBase): def __init__(self): super().__init__(data_types={'data': {'shape': (None, 3)}}) def __len__(self): return 1 def __getitem__(self, item): return {'data': numpy.zeros((5, 3))} ds = init_dataset({'class': 'MapDatasetWrapper', 'map_dataset': _MyCustomMapDataset}) (res,) = dummy_iter_dataset(ds) assert isinstance(res, DatasetSeq) assert (res.features['data'].shape == (5, 3))
def test_init(): dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=4) assert_equal(dataset.num_inputs, 2) assert_equal(dataset.num_outputs, {'classes': (3, 1), 'data': (2, 2)}) assert_equal(dataset.num_seqs, 4)
def test_load_seqs(): dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=4) dataset.init_seq_order(epoch=1) dataset.load_seqs(0, 1) dataset.load_seqs(1, 3)
@unittest.skipIf((not os.path.exists('/tmp/enwik8.zip')), 'we will not trigger the download') def test_Enwik8Corpus_batch_num_seqs(): dataset = Enwik8Corpus(path='/tmp', subset='validation', seq_len=13) dataset.init_seq_order(epoch=17) data = b'' n = 0 while (dataset.is_less_than_num_seqs(n) and (n < 100)): dataset.load_seqs(n, (n + 1)) data += bytes(dataset.get_data(n, 'data')) n += 1 batch_size = 23 batch_data = [b'' for i in range(batch_size)] dataset = Enwik8Corpus(path='/tmp', subset='validation', seq_len=9, batch_num_seqs=batch_size) dataset.init_seq_order(epoch=31) n = 0 while (dataset.is_less_than_num_seqs(n) and (n < 100)): dataset.load_seqs(n, (n + 1)) new_data = bytes(dataset.get_data(n, 'data')) batch_data[(n % batch_size)] += new_data n += 1 assert data.startswith(batch_data[0])
def test_StaticDataset_custom_keys(): dataset = StaticDataset([{'source': numpy.array([1, 2, 3]), 'target': numpy.array([3, 4, 5, 6, 7])}]) dataset.init_seq_order(epoch=1) assert (dataset.num_seqs == 1) assert_equal(dataset.get_data_keys(), ['source', 'target']) assert_equal(dataset.num_outputs['source'][1], 1) assert_equal(dataset.num_outputs['target'][1], 1) dataset.load_seqs(0, 1) assert_equal(list(dataset.get_data(0, 'source')), [1, 2, 3]) assert_equal(list(dataset.get_data(0, 'target')), [3, 4, 5, 6, 7])
def test_StaticDataset_custom_keys_with_dims(): dataset = StaticDataset(data=[{'source': numpy.array([1, 2, 3]), 'target': numpy.array([3, 4, 5, 6, 7])}], output_dim={'source': [5, 1], 'target': [10, 1]}) dataset.init_seq_order(epoch=1) assert (dataset.num_seqs == 1) assert_equal(dataset.get_data_keys(), ['source', 'target']) assert_equal(dataset.num_outputs['source'][1], 1) assert_equal(dataset.num_outputs['target'][1], 1) dataset.load_seqs(0, 1) assert_equal(list(dataset.get_data(0, 'source')), [1, 2, 3]) assert_equal(list(dataset.get_data(0, 'target')), [3, 4, 5, 6, 7])
def test_StaticDataset_utf8(): s = 'wër' print('some unicode str:', s, 'repr:', repr(s), 'type:', type(s), 'len:', len(s)) assert (len(s) == 3) if PY3: assert isinstance(s, str) s_byte_list = list(s.encode('utf8')) else: assert isinstance(s, unicode) s_byte_list = list(map(ord, s.encode('utf8'))) print('utf8 byte list:', s_byte_list) assert (len(s_byte_list) == 4 > 3) raw = numpy.array(s_byte_list, dtype='uint8') assert_equal(raw.tolist(), [119, 195, 171, 114]) data = StaticDataset([{'data': raw}], output_dim={'data': (255, 1)}) if ('data' not in data.labels): data.labels['data'] = [chr(i) for i in range(255)] data.init_seq_order(epoch=1) data.load_seqs(0, 1) raw_ = data.get_data(seq_idx=0, key='data') assert_equal(raw.tolist(), raw_.tolist()) assert data.can_serialize_data(key='data') s_serialized = data.serialize_data(key='data', data=raw) print('serialized:', s_serialized, 'repr:', repr(s_serialized), 'type:', type(s_serialized)) assert_equal(s, s_serialized)
def test_ConcatSeqsDataset(): num_seqs = 2 seq_len = 3 sub_dataset = StaticDataset(([{'data': numpy.array(range(1, (seq_len + 1)))}] * num_seqs)) from returnn.datasets.meta import ConcatSeqsDataset import tempfile seq_list_f = tempfile.NamedTemporaryFile(mode='w', prefix='seq-list', suffix='.txt') seq_len_f = tempfile.NamedTemporaryFile(mode='w', prefix='seq-len', suffix='.txt') with seq_list_f, seq_len_f: seq_len_f.write('{\n') for i in range(num_seqs): seq_list_f.write(('seq-%i\n' % i)) seq_len_f.write(("'seq-%i': %i,\n" % (i, seq_len))) seq_len_f.write('}\n') for i in range(0, num_seqs, 2): seq_list_f.write(('seq-%i;seq-%i\n' % (i, (i + 1)))) seq_list_f.flush() seq_len_f.flush() dataset = ConcatSeqsDataset(dataset=sub_dataset, seq_list_file=seq_list_f.name, seq_len_file=seq_len_f.name) dataset.init_seq_order(epoch=1) concat_num_seqs = (num_seqs + (num_seqs // 2)) dataset.load_seqs(0, concat_num_seqs) assert (dataset.num_seqs == concat_num_seqs == 3) assert_equal(dataset.get_data(0, 'data').tolist(), [1, 2, 3]) assert_equal(dataset.get_data(1, 'data').tolist(), [1, 2, 3]) assert_equal(dataset.get_data(2, 'data').tolist(), [1, 2, 3, 1, 2, 3])
def test_ConcatSeqsDataset_repeat_in_between_last_frame_up_to_multiple_of(): num_seqs = 2 sub_dataset = StaticDataset([{'data': numpy.array([1, 2])}, {'data': numpy.array([1, 2, 3])}]) from returnn.datasets.meta import ConcatSeqsDataset import tempfile seq_list_f = tempfile.NamedTemporaryFile(mode='w', prefix='seq-list', suffix='.txt') seq_len_f = tempfile.NamedTemporaryFile(mode='w', prefix='seq-len', suffix='.txt') with seq_list_f, seq_len_f: seq_len_f.write(('%r\n' % ({'seq-0': 2, 'seq-1': 3},))) seq_len_f.flush() seq_list_f.write('seq-0\n') seq_list_f.write('seq-1\n') seq_list_f.write('seq-0;seq-1;seq-1\n') seq_list_f.flush() concat_num_seqs = 3 dataset = ConcatSeqsDataset(dataset=sub_dataset, repeat_in_between_last_frame_up_to_multiple_of={'data': 5}, seq_list_file=seq_list_f.name, seq_len_file=seq_len_f.name) dataset.init_seq_order(epoch=1) dataset.load_seqs(0, concat_num_seqs) assert (dataset.num_seqs == concat_num_seqs == 3) assert_equal(dataset.get_data(0, 'data').tolist(), [1, 2]) assert_equal(dataset.get_data(1, 'data').tolist(), [1, 2, 3]) assert_equal(dataset.get_data(2, 'data').tolist(), [1, 2, 2, 2, 2, 1, 2, 3, 3, 3, 1, 2, 3])
def test_BytePairEncoding_unicode(): bpe = BytePairEncoding(bpe_file=('%s/bpe-unicode-demo.codes' % my_dir), vocab_file=('%s/bpe-unicode-demo.vocab' % my_dir), unknown_label='<unk>') assert_equal(bpe.num_labels, 189) assert_equal(bpe.id_to_label(5), 'z') assert_equal(bpe.label_to_id('z'), 5) assert_equal(bpe.bpe._bpe_codes[('n', 'd</w>')], 1) assert_equal(bpe.id_to_label(6), 'å') assert_equal(bpe.label_to_id('å'), 6) assert_equal(bpe.bpe._bpe_codes[('à', 'nd</w>')], 2) def get_bpe_seq(text): '\n :param str text:\n :rtype: str\n ' bpe_label_seq = bpe.get_seq(text) res = ' '.join((bpe.id_to_label(i) for i in bpe_label_seq)) print(('%r -> %r' % (text, res))) return res assert_equal(get_bpe_seq('kod'), 'k@@ o@@ d') assert_equal(get_bpe_seq('kod'), 'k@@ o@@ d') assert_equal(get_bpe_seq('råt'), 'råt') assert_equal(get_bpe_seq('råt råt iz ďër iz ďër ám àn iz ďër ë låk ë kod áv dres wër yù wêk dù ďë àsk'), 'råt råt iz ďër iz ďër ám à@@ n iz ďër ë låk ë k@@ o@@ d áv d@@ r@@ e@@ s w@@ ër yù w@@ ê@@ k dù ďë à@@ s@@ k')
def test_save_load(): import tempfile with tempfile.NamedTemporaryFile(mode='w') as f: filename = f.name assert (not os.path.exists(filename)) control = LearningRateControl(default_learning_rate=1.0, filename=filename) assert (2 not in control.epoch_data) control.epoch_data[2] = LearningRateControl.EpochData(learning_rate=0.0008, error={'dev_error_ctc': 0.22486433815293946, 'dev_error_decision': 0.0, 'dev_error_output/output_prob': 0.16270349413262444, 'dev_score_ctc': 1.0732941136466485, 'dev_score_output/output_prob': 0.7378438060027533, 'train_error_ctc': 0.13954045252681482, 'train_error_decision': 0.0, 'train_error_output/output_prob': 0.106904268810835, 'train_score_ctc': 0.5132869609859635, 'train_score_output/output_prob': 0.5098970897590558}) control.save() assert os.path.exists(filename) control = LearningRateControl(default_learning_rate=1.0, filename=filename) assert (2 in control.epoch_data) data = control.epoch_data[2] numpy.testing.assert_allclose(data.learning_rate, 0.0008) assert ('dev_error_output/output_prob' in data.error) numpy.testing.assert_allclose(data.error['dev_error_output/output_prob'], 0.16270349413262444)
def test_load(): import tempfile with tempfile.NamedTemporaryFile(mode='w') as f: f.write("{\n 1: EpochData(learningRate=0.0008, error={\n 'dev_error_ctc': 0.21992561489090365,\n 'dev_error_decision': 0.0,\n 'dev_error_output/output_prob': 0.1597158714185534,\n 'dev_score_ctc': 1.0742086989480388,\n 'dev_score_output/output_prob': 0.7316125233255415,\n 'train_error_ctc': 0.11740542462939381,\n 'train_error_decision': 0.0,\n 'train_error_output/output_prob': 0.10000902651529825,\n 'train_score_ctc': 0.42154947919396146,\n 'train_score_output/output_prob': 0.4958179737218142,\n }),\n 2: EpochData(learningRate=0.0008, error={\n 'dev_error_ctc': 0.22486433815293946,\n 'dev_error_decision': 0.0,\n 'dev_error_output/output_prob': 0.16270349413262444,\n 'dev_score_ctc': 1.0732941136466485,\n 'dev_score_output/output_prob': 0.7378438060027533,\n 'train_error_ctc': 0.13954045252681482,\n 'train_error_decision': 0.0,\n 'train_error_output/output_prob': 0.106904268810835,\n 'train_score_ctc': 0.5132869609859635,\n 'train_score_output/output_prob': 0.5098970897590558,\n }),\n }") f.flush() control = LearningRateControl(default_learning_rate=1.0, filename=f.name) assert (set(control.epoch_data.keys()) == {1, 2}) data = control.epoch_data[2] numpy.testing.assert_allclose(data.learning_rate, 0.0008) assert ('dev_error_output/output_prob' in data.error) numpy.testing.assert_allclose(data.error['dev_error_output/output_prob'], 0.16270349413262444)
def test_init_error_old(): config = Config() config.update({'learning_rate_control': 'newbob', 'learning_rate_control_error_measure': 'dev_score'}) lrc = load_learning_rate_control_from_config(config) assert isinstance(lrc, NewbobRelative) lrc.get_learning_rate_for_epoch(1) lrc.set_epoch_error(1, {'train_score': 1.9344199658230012}) lrc.set_epoch_error(1, {'dev_score': 1.99, 'dev_error': 0.6}) error = lrc.get_epoch_error_dict(1) assert ('train_score' in error) assert ('dev_score' in error) assert ('dev_error' in error) assert_equal(lrc.get_error_key(1), 'dev_score') lrc.get_learning_rate_for_epoch(2) lrc.set_epoch_error(2, {'train_score': 1.8}) lrc.set_epoch_error(2, {'dev_score': 1.9, 'dev_error': 0.5}) lrc.get_learning_rate_for_epoch(3)
def test_init_error_new(): config = Config() config.update({'learning_rate_control': 'newbob', 'learning_rate_control_error_measure': 'dev_score'}) lrc = load_learning_rate_control_from_config(config) assert isinstance(lrc, NewbobRelative) lrc.get_learning_rate_for_epoch(1) lrc.set_epoch_error(1, {'train_score': {'cost:output': 1.9344199658230012}}) lrc.set_epoch_error(1, {'dev_score': {'cost:output': 1.99}, 'dev_error': {'error:output': 0.6}}) error = lrc.get_epoch_error_dict(1) assert ('train_score' in error) assert ('dev_score' in error) assert ('dev_error' in error) assert_equal(lrc.get_error_key(1), 'dev_score') lrc.get_learning_rate_for_epoch(2) lrc.set_epoch_error(2, {'train_score': {'cost:output': 1.8}}) lrc.set_epoch_error(2, {'dev_score': {'cost:output': 1.9}, 'dev_error': {'error:output': 0.5}}) lrc.get_learning_rate_for_epoch(3)
def test_init_error_muliple_out(): config = Config() config.update({'learning_rate_control': 'newbob', 'learning_rate_control_error_measure': 'dev_score'}) lrc = load_learning_rate_control_from_config(config) assert isinstance(lrc, NewbobRelative) lrc.get_learning_rate_for_epoch(1) lrc.set_epoch_error(1, {'train_score': {'cost:output': 1.95, 'cost:out2': 2.95}}) lrc.set_epoch_error(1, {'dev_score': {'cost:output': 1.99, 'cost:out2': 2.99}, 'dev_error': {'error:output': 0.6, 'error:out2': 0.7}}) error = lrc.get_epoch_error_dict(1) assert ('train_score_output' in error) assert ('train_score_out2' in error) assert ('dev_score_output' in error) assert ('dev_score_out2' in error) assert ('dev_error_output' in error) assert ('dev_error_out2' in error) assert_equal(lrc.get_error_key(1), 'dev_score_output') lrc.get_learning_rate_for_epoch(2) lrc.set_epoch_error(2, {'train_score': {'cost:output': 1.8, 'cost:out2': 2.8}}) lrc.set_epoch_error(2, {'dev_score': {'cost:output': 1.9, 'cost:out2': 2.9}, 'dev_error': {'error:output': 0.5, 'error:out2': 0.6}}) lrc.get_learning_rate_for_epoch(3)
def test_newbob(): lr = 0.01 config = Config() config.update({'learning_rate_control': 'newbob', 'learning_rate': lr}) lrc = load_learning_rate_control_from_config(config) assert isinstance(lrc, NewbobRelative) assert_equal(lrc.get_learning_rate_for_epoch(1), lr) lrc.set_epoch_error(1, {'train_score': {'cost:output': 1.9344199658230012}}) lrc.set_epoch_error(1, {'dev_score': {'cost:output': 1.99}, 'dev_error': {'error:output': 0.6}}) error = lrc.get_epoch_error_dict(1) assert ('train_score' in error) assert ('dev_score' in error) assert ('dev_error' in error) assert_equal(lrc.get_error_key(1), 'dev_score') assert_equal(lrc.get_learning_rate_for_epoch(2), lr) lrc.set_epoch_error(2, {'train_score': {'cost:output': 1.8}}) lrc.set_epoch_error(2, {'dev_score': {'cost:output': 1.9}, 'dev_error': {'error:output': 0.5}}) lrc.get_learning_rate_for_epoch(3)
def test_newbob_multi_epoch(): lr = 0.0005 config = Config() config.update({'learning_rate_control': 'newbob_multi_epoch', 'learning_rate_control_relative_error_relative_lr': True, 'newbob_multi_num_epochs': 6, 'newbob_multi_update_interval': 1, 'learning_rate': lr}) lrc = load_learning_rate_control_from_config(config) assert isinstance(lrc, NewbobMultiEpoch) assert_equal(lrc.get_learning_rate_for_epoch(1), lr) lrc.set_epoch_error(1, {'dev_error': 0.5028317604690472, 'dev_score': 2.3209858321263455, 'train_score': 3.095824052426714}) assert_equal(lrc.get_learning_rate_for_epoch(2), lr)
def test_later_default_lr(): import tempfile tmp_file = tempfile.mktemp() lr = 0.0005 learning_rates = list(numpy.linspace(0.0003, lr, num=10)) config = Config() config.update({'learning_rate_file': tmp_file, 'learning_rate_control': 'newbob_multi_epoch', 'learning_rate_control_relative_error_relative_lr': True, 'learning_rate_control_min_num_epochs_per_new_lr': 3, 'newbob_multi_num_epochs': 6, 'newbob_multi_update_interval': 1, 'learning_rate': lr, 'learning_rates': learning_rates, 'min_learning_rate': (lr / 50.0)}) lrc = load_learning_rate_control_from_config(config) assert isinstance(lrc, NewbobMultiEpoch) num_epochs = 250 for epoch in range(1, (num_epochs + 1)): lrc.get_learning_rate_for_epoch(epoch) lrc.set_epoch_error(epoch, {'train_score': 0.5, 'train_error': 0.5}) lrc.set_epoch_error(epoch, {'dev_score': 0.5, 'dev_error': 0.5}) print('Learning rates:') print(lrc) lrc.save() print('Saved to:', lrc.filename) learning_rates = {(i + 1): v for (i, v) in enumerate(learning_rates)} later_epoch = (num_epochs + 1) learning_rates[later_epoch] = (lr * 0.5) config.update({'learning_rates': learning_rates}) lrc = load_learning_rate_control_from_config(config) assert (later_epoch in lrc.epoch_data) lr251 = lrc.get_learning_rate_for_epoch(later_epoch) numpy.testing.assert_almost_equal(lr251, learning_rates[later_epoch])
def build_env(): 'build env' env_update = os.environ.copy() return env_update