body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def test_get_filepaths_by_extensions(self):
'Test get_filepaths_by_extensions only returns filepaths in\n directory with given extensions.\n '
filepaths = []
build.ensure_directory_exists(MOCK_ASSETS_DEV_DIR)
extensions = ('.json', '.svg')
self.assertEqual(len(filepaths), 0)
filepaths = build.get_filepaths_by_extensions(MOCK_ASSETS_DEV_DIR, extensions)
for filepath in filepaths:
self.assertTrue(any((filepath.endswith(p) for p in extensions)))
file_count = 0
for (_, _, filenames) in os.walk(MOCK_ASSETS_DEV_DIR):
for filename in filenames:
if any((filename.endswith(p) for p in extensions)):
file_count += 1
self.assertEqual(len(filepaths), file_count)
filepaths = []
extensions = ('.pdf', '.viminfo', '.idea')
self.assertEqual(len(filepaths), 0)
filepaths = build.get_filepaths_by_extensions(MOCK_ASSETS_DEV_DIR, extensions)
self.assertEqual(len(filepaths), 0)
| -3,231,396,549,521,197,600
|
Test get_filepaths_by_extensions only returns filepaths in
directory with given extensions.
|
scripts/build_test.py
|
test_get_filepaths_by_extensions
|
muarachmann/oppia
|
python
|
def test_get_filepaths_by_extensions(self):
'Test get_filepaths_by_extensions only returns filepaths in\n directory with given extensions.\n '
filepaths = []
build.ensure_directory_exists(MOCK_ASSETS_DEV_DIR)
extensions = ('.json', '.svg')
self.assertEqual(len(filepaths), 0)
filepaths = build.get_filepaths_by_extensions(MOCK_ASSETS_DEV_DIR, extensions)
for filepath in filepaths:
self.assertTrue(any((filepath.endswith(p) for p in extensions)))
file_count = 0
for (_, _, filenames) in os.walk(MOCK_ASSETS_DEV_DIR):
for filename in filenames:
if any((filename.endswith(p) for p in extensions)):
file_count += 1
self.assertEqual(len(filepaths), file_count)
filepaths = []
extensions = ('.pdf', '.viminfo', '.idea')
self.assertEqual(len(filepaths), 0)
filepaths = build.get_filepaths_by_extensions(MOCK_ASSETS_DEV_DIR, extensions)
self.assertEqual(len(filepaths), 0)
|
def test_get_file_hashes(self):
'Test get_file_hashes gets hashes of all files in directory,\n excluding file with extensions in FILE_EXTENSIONS_TO_IGNORE.\n '
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html',)):
file_hashes = dict()
self.assertEqual(len(file_hashes), 0)
file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_DEV_DIR)
self.assertGreater(len(file_hashes), 0)
for filepath in file_hashes:
abs_filepath = os.path.join(MOCK_EXTENSIONS_DEV_DIR, filepath)
self.assertTrue(os.path.isfile(abs_filepath))
self.assertFalse(filepath.endswith('.html'))
| -5,967,998,651,860,690,000
|
Test get_file_hashes gets hashes of all files in directory,
excluding file with extensions in FILE_EXTENSIONS_TO_IGNORE.
|
scripts/build_test.py
|
test_get_file_hashes
|
muarachmann/oppia
|
python
|
def test_get_file_hashes(self):
'Test get_file_hashes gets hashes of all files in directory,\n excluding file with extensions in FILE_EXTENSIONS_TO_IGNORE.\n '
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html',)):
file_hashes = dict()
self.assertEqual(len(file_hashes), 0)
file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_DEV_DIR)
self.assertGreater(len(file_hashes), 0)
for filepath in file_hashes:
abs_filepath = os.path.join(MOCK_EXTENSIONS_DEV_DIR, filepath)
self.assertTrue(os.path.isfile(abs_filepath))
self.assertFalse(filepath.endswith('.html'))
|
def test_filter_hashes(self):
'Test filter_hashes filters the provided hash correctly.'
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('*',)):
hashes = {'path/to/file.js': '123456', 'path/file.min.js': '123456'}
filtered_hashes = build.filter_hashes(hashes)
self.assertEqual(filtered_hashes['/path/to/file.js'], hashes['path/to/file.js'])
self.assertEqual(filtered_hashes['/path/file.min.js'], hashes['path/file.min.js'])
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('test_path/*', 'path/to/file.js')):
hashes = {'path/to/file.js': '123456', 'test_path/to/file.html': '123456', 'test_path/to/file.js': 'abcdef', 'path/path/file.js': 'zyx123', 'file.html': '321xyz'}
filtered_hashes = build.filter_hashes(hashes)
self.assertTrue(filtered_hashes.has_key('/path/to/file.js'))
self.assertTrue(filtered_hashes.has_key('/test_path/to/file.html'))
self.assertTrue(filtered_hashes.has_key('/test_path/to/file.js'))
self.assertFalse(filtered_hashes.has_key('/path/path/file.js'))
self.assertFalse(filtered_hashes.has_key('/file.html'))
| -8,685,788,288,741,124,000
|
Test filter_hashes filters the provided hash correctly.
|
scripts/build_test.py
|
test_filter_hashes
|
muarachmann/oppia
|
python
|
def test_filter_hashes(self):
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('*',)):
hashes = {'path/to/file.js': '123456', 'path/file.min.js': '123456'}
filtered_hashes = build.filter_hashes(hashes)
self.assertEqual(filtered_hashes['/path/to/file.js'], hashes['path/to/file.js'])
self.assertEqual(filtered_hashes['/path/file.min.js'], hashes['path/file.min.js'])
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('test_path/*', 'path/to/file.js')):
hashes = {'path/to/file.js': '123456', 'test_path/to/file.html': '123456', 'test_path/to/file.js': 'abcdef', 'path/path/file.js': 'zyx123', 'file.html': '321xyz'}
filtered_hashes = build.filter_hashes(hashes)
self.assertTrue(filtered_hashes.has_key('/path/to/file.js'))
self.assertTrue(filtered_hashes.has_key('/test_path/to/file.html'))
self.assertTrue(filtered_hashes.has_key('/test_path/to/file.js'))
self.assertFalse(filtered_hashes.has_key('/path/path/file.js'))
self.assertFalse(filtered_hashes.has_key('/file.html'))
|
def test_get_hashes_json_file_contents(self):
'Test get_hashes_json_file_contents parses provided hash dict\n correctly to JSON format.\n '
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('*',)):
hashes = {'path/file.js': '123456'}
self.assertEqual(build.get_hashes_json_file_contents(hashes), 'var hashes = JSON.parse(\'{"/path/file.js": "123456"}\');')
hashes = {'file.js': '123456', 'file.min.js': '654321'}
self.assertEqual(build.get_hashes_json_file_contents(hashes), 'var hashes = JSON.parse(\'{"/file.min.js": "654321", "/file.js": "123456"}\');')
| 6,738,413,142,785,860,000
|
Test get_hashes_json_file_contents parses provided hash dict
correctly to JSON format.
|
scripts/build_test.py
|
test_get_hashes_json_file_contents
|
muarachmann/oppia
|
python
|
def test_get_hashes_json_file_contents(self):
'Test get_hashes_json_file_contents parses provided hash dict\n correctly to JSON format.\n '
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('*',)):
hashes = {'path/file.js': '123456'}
self.assertEqual(build.get_hashes_json_file_contents(hashes), 'var hashes = JSON.parse(\'{"/path/file.js": "123456"}\');')
hashes = {'file.js': '123456', 'file.min.js': '654321'}
self.assertEqual(build.get_hashes_json_file_contents(hashes), 'var hashes = JSON.parse(\'{"/file.min.js": "654321", "/file.js": "123456"}\');')
|
def test_execute_tasks(self):
'Test _execute_tasks joins all threads after executing all tasks.'
build_tasks = collections.deque()
TASK_COUNT = 2
count = TASK_COUNT
while count:
task = threading.Thread(target=build._minify, args=(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH))
build_tasks.append(task)
count -= 1
self.assertEqual(threading.active_count(), 1)
build._execute_tasks(build_tasks)
with self.assertRaisesRegexp(OSError, 'threads can only be started once'):
build._execute_tasks(build_tasks)
self.assertEqual(threading.active_count(), 1)
| -1,951,065,411,219,942,100
|
Test _execute_tasks joins all threads after executing all tasks.
|
scripts/build_test.py
|
test_execute_tasks
|
muarachmann/oppia
|
python
|
def test_execute_tasks(self):
build_tasks = collections.deque()
TASK_COUNT = 2
count = TASK_COUNT
while count:
task = threading.Thread(target=build._minify, args=(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH))
build_tasks.append(task)
count -= 1
self.assertEqual(threading.active_count(), 1)
build._execute_tasks(build_tasks)
with self.assertRaisesRegexp(OSError, 'threads can only be started once'):
build._execute_tasks(build_tasks)
self.assertEqual(threading.active_count(), 1)
|
def test_generate_build_tasks_to_build_all_files_in_directory(self):
'Test generate_build_tasks_to_build_all_files_in_directory queues up\n the same number of build tasks as the number of files in the source\n directory.\n '
asset_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
tasks = collections.deque()
self.assertEqual(len(tasks), 0)
tasks = build.generate_build_tasks_to_build_all_files_in_directory(MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, asset_hashes)
total_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR)
self.assertEqual(len(tasks), total_file_count)
| -4,802,834,169,793,278,000
|
Test generate_build_tasks_to_build_all_files_in_directory queues up
the same number of build tasks as the number of files in the source
directory.
|
scripts/build_test.py
|
test_generate_build_tasks_to_build_all_files_in_directory
|
muarachmann/oppia
|
python
|
def test_generate_build_tasks_to_build_all_files_in_directory(self):
'Test generate_build_tasks_to_build_all_files_in_directory queues up\n the same number of build tasks as the number of files in the source\n directory.\n '
asset_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
tasks = collections.deque()
self.assertEqual(len(tasks), 0)
tasks = build.generate_build_tasks_to_build_all_files_in_directory(MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, asset_hashes)
total_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR)
self.assertEqual(len(tasks), total_file_count)
|
def test_generate_build_tasks_to_build_files_from_filepaths(self):
'Test generate_build_tasks_to_build_files_from_filepaths queues up a\n corresponding number of build tasks to the number of file changes.\n '
new_filename = 'manifest.json'
recently_changed_filenames = [os.path.join(MOCK_ASSETS_DEV_DIR, new_filename)]
asset_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
build_tasks = collections.deque()
self.assertEqual(len(build_tasks), 0)
build_tasks += build.generate_build_tasks_to_build_files_from_filepaths(MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, recently_changed_filenames, asset_hashes)
self.assertEqual(len(build_tasks), len(recently_changed_filenames))
build_tasks.clear()
svg_filepaths = build.get_filepaths_by_extensions(MOCK_ASSETS_DEV_DIR, ('.svg',))
self.assertGreater(len(svg_filepaths), 0)
self.assertEqual(len(build_tasks), 0)
build_tasks += build.generate_build_tasks_to_build_files_from_filepaths(MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, svg_filepaths, asset_hashes)
self.assertEqual(len(build_tasks), len(svg_filepaths))
| -7,522,509,054,264,482,000
|
Test generate_build_tasks_to_build_files_from_filepaths queues up a
corresponding number of build tasks to the number of file changes.
|
scripts/build_test.py
|
test_generate_build_tasks_to_build_files_from_filepaths
|
muarachmann/oppia
|
python
|
def test_generate_build_tasks_to_build_files_from_filepaths(self):
'Test generate_build_tasks_to_build_files_from_filepaths queues up a\n corresponding number of build tasks to the number of file changes.\n '
new_filename = 'manifest.json'
recently_changed_filenames = [os.path.join(MOCK_ASSETS_DEV_DIR, new_filename)]
asset_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
build_tasks = collections.deque()
self.assertEqual(len(build_tasks), 0)
build_tasks += build.generate_build_tasks_to_build_files_from_filepaths(MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, recently_changed_filenames, asset_hashes)
self.assertEqual(len(build_tasks), len(recently_changed_filenames))
build_tasks.clear()
svg_filepaths = build.get_filepaths_by_extensions(MOCK_ASSETS_DEV_DIR, ('.svg',))
self.assertGreater(len(svg_filepaths), 0)
self.assertEqual(len(build_tasks), 0)
build_tasks += build.generate_build_tasks_to_build_files_from_filepaths(MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, svg_filepaths, asset_hashes)
self.assertEqual(len(build_tasks), len(svg_filepaths))
|
def test_generate_build_tasks_to_build_directory(self):
'Test generate_build_tasks_to_build_directory queues up a\n corresponding number of build tasks according to the given scenario.\n '
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {'dev_dir': MOCK_EXTENSIONS_DEV_DIR, 'compiled_js_dir': MOCK_EXTENSIONS_COMPILED_JS_DIR, 'staging_dir': os.path.join(TEST_DIR, 'backend_prod_files', 'extensions', ''), 'out_dir': os.path.join(TEST_DIR, 'build', 'extensions', '')}
file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_DEV_DIR)
compiled_js_file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_COMPILED_JS_DIR)
build_dir_tasks = collections.deque()
build_all_files_tasks = build.generate_build_tasks_to_build_all_files_in_directory(MOCK_EXTENSIONS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'], file_hashes)
build_all_files_tasks += build.generate_build_tasks_to_build_all_files_in_directory(MOCK_EXTENSIONS_COMPILED_JS_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'], compiled_js_file_hashes)
self.assertGreater(len(build_all_files_tasks), 0)
self.assertEqual(len(build_dir_tasks), 0)
build_dir_tasks += build.generate_build_tasks_to_build_directory(EXTENSIONS_DIRNAMES_TO_DIRPATHS, file_hashes)
self.assertEqual(len(build_dir_tasks), len(build_all_files_tasks))
build.safe_delete_directory_tree(TEST_DIR)
build_dir_tasks.clear()
build.ensure_directory_exists(EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'])
self.assertEqual(len(build_dir_tasks), 0)
source_hashes = file_hashes
source_hashes.update(compiled_js_file_hashes)
build_dir_tasks += build.generate_build_tasks_to_build_directory(EXTENSIONS_DIRNAMES_TO_DIRPATHS, source_hashes)
self.assertEqual(len(build_dir_tasks), len(build_all_files_tasks))
build.safe_delete_directory_tree(TEST_DIR)
build.ensure_directory_exists(EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'])
build._execute_tasks(build_dir_tasks)
self.assertEqual(threading.active_count(), 1)
build._execute_tasks(build.generate_copy_tasks_to_copy_from_source_to_target(EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'], EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'], file_hashes))
build_dir_tasks.clear()
self.assertEqual(len(build_dir_tasks), 0)
build_dir_tasks += build.generate_build_tasks_to_build_directory(EXTENSIONS_DIRNAMES_TO_DIRPATHS, build_dir_tasks)
file_extensions_to_always_rebuild = ('.html', '.py')
always_rebuilt_filepaths = build.get_filepaths_by_extensions(MOCK_EXTENSIONS_DEV_DIR, file_extensions_to_always_rebuild)
self.assertGreater(len(always_rebuilt_filepaths), 0)
self.assertEqual(len(build_dir_tasks), len(always_rebuilt_filepaths))
build.safe_delete_directory_tree(TEST_DIR)
| -4,306,466,468,645,775,000
|
Test generate_build_tasks_to_build_directory queues up a
corresponding number of build tasks according to the given scenario.
|
scripts/build_test.py
|
test_generate_build_tasks_to_build_directory
|
muarachmann/oppia
|
python
|
def test_generate_build_tasks_to_build_directory(self):
'Test generate_build_tasks_to_build_directory queues up a\n corresponding number of build tasks according to the given scenario.\n '
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {'dev_dir': MOCK_EXTENSIONS_DEV_DIR, 'compiled_js_dir': MOCK_EXTENSIONS_COMPILED_JS_DIR, 'staging_dir': os.path.join(TEST_DIR, 'backend_prod_files', 'extensions', ), 'out_dir': os.path.join(TEST_DIR, 'build', 'extensions', )}
file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_DEV_DIR)
compiled_js_file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_COMPILED_JS_DIR)
build_dir_tasks = collections.deque()
build_all_files_tasks = build.generate_build_tasks_to_build_all_files_in_directory(MOCK_EXTENSIONS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'], file_hashes)
build_all_files_tasks += build.generate_build_tasks_to_build_all_files_in_directory(MOCK_EXTENSIONS_COMPILED_JS_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'], compiled_js_file_hashes)
self.assertGreater(len(build_all_files_tasks), 0)
self.assertEqual(len(build_dir_tasks), 0)
build_dir_tasks += build.generate_build_tasks_to_build_directory(EXTENSIONS_DIRNAMES_TO_DIRPATHS, file_hashes)
self.assertEqual(len(build_dir_tasks), len(build_all_files_tasks))
build.safe_delete_directory_tree(TEST_DIR)
build_dir_tasks.clear()
build.ensure_directory_exists(EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'])
self.assertEqual(len(build_dir_tasks), 0)
source_hashes = file_hashes
source_hashes.update(compiled_js_file_hashes)
build_dir_tasks += build.generate_build_tasks_to_build_directory(EXTENSIONS_DIRNAMES_TO_DIRPATHS, source_hashes)
self.assertEqual(len(build_dir_tasks), len(build_all_files_tasks))
build.safe_delete_directory_tree(TEST_DIR)
build.ensure_directory_exists(EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'])
build._execute_tasks(build_dir_tasks)
self.assertEqual(threading.active_count(), 1)
build._execute_tasks(build.generate_copy_tasks_to_copy_from_source_to_target(EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'], EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'], file_hashes))
build_dir_tasks.clear()
self.assertEqual(len(build_dir_tasks), 0)
build_dir_tasks += build.generate_build_tasks_to_build_directory(EXTENSIONS_DIRNAMES_TO_DIRPATHS, build_dir_tasks)
file_extensions_to_always_rebuild = ('.html', '.py')
always_rebuilt_filepaths = build.get_filepaths_by_extensions(MOCK_EXTENSIONS_DEV_DIR, file_extensions_to_always_rebuild)
self.assertGreater(len(always_rebuilt_filepaths), 0)
self.assertEqual(len(build_dir_tasks), len(always_rebuilt_filepaths))
build.safe_delete_directory_tree(TEST_DIR)
|
def test_get_recently_changed_filenames(self):
'Test get_recently_changed_filenames detects file recently added.'
build.ensure_directory_exists(EMPTY_DIR)
assets_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
recently_changed_filenames = []
self.assertEqual(len(recently_changed_filenames), 0)
recently_changed_filenames = build.get_recently_changed_filenames(assets_hashes, EMPTY_DIR)
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html', '.py')):
self.assertEqual(len(recently_changed_filenames), build.get_file_count(MOCK_ASSETS_DEV_DIR))
build.safe_delete_directory_tree(EMPTY_DIR)
| -888,671,311,690,525,000
|
Test get_recently_changed_filenames detects file recently added.
|
scripts/build_test.py
|
test_get_recently_changed_filenames
|
muarachmann/oppia
|
python
|
def test_get_recently_changed_filenames(self):
build.ensure_directory_exists(EMPTY_DIR)
assets_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
recently_changed_filenames = []
self.assertEqual(len(recently_changed_filenames), 0)
recently_changed_filenames = build.get_recently_changed_filenames(assets_hashes, EMPTY_DIR)
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html', '.py')):
self.assertEqual(len(recently_changed_filenames), build.get_file_count(MOCK_ASSETS_DEV_DIR))
build.safe_delete_directory_tree(EMPTY_DIR)
|
def test_generate_delete_tasks_to_remove_deleted_files(self):
'Test generate_delete_tasks_to_remove_deleted_files queues up the\n same number of deletion task as the number of deleted files.\n '
delete_tasks = collections.deque()
file_hashes = dict()
self.assertEqual(len(delete_tasks), 0)
delete_tasks += build.generate_delete_tasks_to_remove_deleted_files(file_hashes, MOCK_TEMPLATES_DEV_DIR)
self.assertEqual(len(delete_tasks), build.get_file_count(MOCK_TEMPLATES_DEV_DIR))
| -5,156,963,052,401,631,000
|
Test generate_delete_tasks_to_remove_deleted_files queues up the
same number of deletion task as the number of deleted files.
|
scripts/build_test.py
|
test_generate_delete_tasks_to_remove_deleted_files
|
muarachmann/oppia
|
python
|
def test_generate_delete_tasks_to_remove_deleted_files(self):
'Test generate_delete_tasks_to_remove_deleted_files queues up the\n same number of deletion task as the number of deleted files.\n '
delete_tasks = collections.deque()
file_hashes = dict()
self.assertEqual(len(delete_tasks), 0)
delete_tasks += build.generate_delete_tasks_to_remove_deleted_files(file_hashes, MOCK_TEMPLATES_DEV_DIR)
self.assertEqual(len(delete_tasks), build.get_file_count(MOCK_TEMPLATES_DEV_DIR))
|
def test_compiled_js_dir_validation(self):
'Test that build.COMPILED_JS_DIR is validated correctly with\n outDir in build.TSCONFIG_FILEPATH.\n '
build.require_compiled_js_dir_to_be_valid()
out_dir = ''
with open(build.TSCONFIG_FILEPATH) as f:
config_data = json.load(f)
out_dir = os.path.join(config_data['compilerOptions']['outDir'], '')
with self.assertRaisesRegexp(Exception, ('COMPILED_JS_DIR: %s does not match the output directory in %s: %s' % (MOCK_COMPILED_JS_DIR, build.TSCONFIG_FILEPATH, out_dir))), self.swap(build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR):
build.require_compiled_js_dir_to_be_valid()
| 3,159,340,658,849,626,600
|
Test that build.COMPILED_JS_DIR is validated correctly with
outDir in build.TSCONFIG_FILEPATH.
|
scripts/build_test.py
|
test_compiled_js_dir_validation
|
muarachmann/oppia
|
python
|
def test_compiled_js_dir_validation(self):
'Test that build.COMPILED_JS_DIR is validated correctly with\n outDir in build.TSCONFIG_FILEPATH.\n '
build.require_compiled_js_dir_to_be_valid()
out_dir =
with open(build.TSCONFIG_FILEPATH) as f:
config_data = json.load(f)
out_dir = os.path.join(config_data['compilerOptions']['outDir'], )
with self.assertRaisesRegexp(Exception, ('COMPILED_JS_DIR: %s does not match the output directory in %s: %s' % (MOCK_COMPILED_JS_DIR, build.TSCONFIG_FILEPATH, out_dir))), self.swap(build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR):
build.require_compiled_js_dir_to_be_valid()
|
def test_compiled_js_dir_is_deleted_before_compilation(self):
'Test that compiled_js_dir is deleted before a fresh compilation.'
def mock_check_call(unused_cmd):
pass
def mock_require_compiled_js_dir_to_be_valid():
pass
with self.swap(build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR), self.swap(build, 'require_compiled_js_dir_to_be_valid', mock_require_compiled_js_dir_to_be_valid):
if (not os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR))):
os.mkdir(os.path.dirname(MOCK_COMPILED_JS_DIR))
with self.swap(subprocess, 'check_call', mock_check_call):
build.compile_typescript_files('.')
self.assertFalse(os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)))
| -8,542,689,104,886,041,000
|
Test that compiled_js_dir is deleted before a fresh compilation.
|
scripts/build_test.py
|
test_compiled_js_dir_is_deleted_before_compilation
|
muarachmann/oppia
|
python
|
def test_compiled_js_dir_is_deleted_before_compilation(self):
def mock_check_call(unused_cmd):
pass
def mock_require_compiled_js_dir_to_be_valid():
pass
with self.swap(build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR), self.swap(build, 'require_compiled_js_dir_to_be_valid', mock_require_compiled_js_dir_to_be_valid):
if (not os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR))):
os.mkdir(os.path.dirname(MOCK_COMPILED_JS_DIR))
with self.swap(subprocess, 'check_call', mock_check_call):
build.compile_typescript_files('.')
self.assertFalse(os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)))
|
def test_compiled_js_dir_is_deleted_before_watch_mode_compilation(self):
'Test that compiled_js_dir is deleted before a fresh watch mode\n compilation.\n '
def mock_call(unused_cmd, shell, stdout):
pass
def mock_popen(unused_cmd, stdout):
pass
def mock_require_compiled_js_dir_to_be_valid():
pass
with self.swap(build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR), self.swap(build, 'require_compiled_js_dir_to_be_valid', mock_require_compiled_js_dir_to_be_valid):
if (not os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR))):
os.mkdir(os.path.dirname(MOCK_COMPILED_JS_DIR))
with self.swap(subprocess, 'Popen', mock_popen), self.swap(subprocess, 'call', mock_call), self.swap(build, 'TSC_OUTPUT_LOG_FILEPATH', MOCK_TSC_OUTPUT_LOG_FILEPATH):
build.compile_typescript_files_continuously('.')
self.assertFalse(os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)))
| -4,450,239,991,488,878,000
|
Test that compiled_js_dir is deleted before a fresh watch mode
compilation.
|
scripts/build_test.py
|
test_compiled_js_dir_is_deleted_before_watch_mode_compilation
|
muarachmann/oppia
|
python
|
def test_compiled_js_dir_is_deleted_before_watch_mode_compilation(self):
'Test that compiled_js_dir is deleted before a fresh watch mode\n compilation.\n '
def mock_call(unused_cmd, shell, stdout):
pass
def mock_popen(unused_cmd, stdout):
pass
def mock_require_compiled_js_dir_to_be_valid():
pass
with self.swap(build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR), self.swap(build, 'require_compiled_js_dir_to_be_valid', mock_require_compiled_js_dir_to_be_valid):
if (not os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR))):
os.mkdir(os.path.dirname(MOCK_COMPILED_JS_DIR))
with self.swap(subprocess, 'Popen', mock_popen), self.swap(subprocess, 'call', mock_call), self.swap(build, 'TSC_OUTPUT_LOG_FILEPATH', MOCK_TSC_OUTPUT_LOG_FILEPATH):
build.compile_typescript_files_continuously('.')
self.assertFalse(os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)))
|
def _mock_safe_delete_file(unused_filepath):
'Mocks build.safe_delete_file().'
pass
| -2,236,168,809,398,343,000
|
Mocks build.safe_delete_file().
|
scripts/build_test.py
|
_mock_safe_delete_file
|
muarachmann/oppia
|
python
|
def _mock_safe_delete_file(unused_filepath):
pass
|
@pytest.mark.usefixtures('os', 'instance')
def test_existing_hosted_zone(hosted_zone_factory, pcluster_config_reader, clusters_factory, vpc_stack, cfn_stacks_factory, key_name, scheduler, region, instance):
'Test hosted_zone_id is provided in the config file.'
num_computes = 2
(hosted_zone_id, domain_name) = hosted_zone_factory()
cluster_config = pcluster_config_reader(existing_hosted_zone=hosted_zone_id, queue_size=num_computes)
cluster = clusters_factory(cluster_config, upper_case_cluster_name=True)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_mpi(remote_command_executor, slots_per_instance=fetch_instance_slots(region, instance), scheduler=scheduler, region=region, stack_name=cluster.cfn_name, scaledown_idletime=3, verify_scaling=False)
compute_nodes = scheduler_commands.get_compute_nodes()
_test_hostname_same_as_nodename(scheduler_commands, remote_command_executor, compute_nodes)
resolv_conf = remote_command_executor.run_remote_command('cat /etc/resolv.conf').stdout
assert_that(resolv_conf).contains(((cluster.cfn_name.lower() + '.') + domain_name))
| -1,448,538,545,670,695,400
|
Test hosted_zone_id is provided in the config file.
|
tests/integration-tests/tests/dns/test_dns.py
|
test_existing_hosted_zone
|
Chen188/aws-parallelcluster
|
python
|
@pytest.mark.usefixtures('os', 'instance')
def test_existing_hosted_zone(hosted_zone_factory, pcluster_config_reader, clusters_factory, vpc_stack, cfn_stacks_factory, key_name, scheduler, region, instance):
num_computes = 2
(hosted_zone_id, domain_name) = hosted_zone_factory()
cluster_config = pcluster_config_reader(existing_hosted_zone=hosted_zone_id, queue_size=num_computes)
cluster = clusters_factory(cluster_config, upper_case_cluster_name=True)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_mpi(remote_command_executor, slots_per_instance=fetch_instance_slots(region, instance), scheduler=scheduler, region=region, stack_name=cluster.cfn_name, scaledown_idletime=3, verify_scaling=False)
compute_nodes = scheduler_commands.get_compute_nodes()
_test_hostname_same_as_nodename(scheduler_commands, remote_command_executor, compute_nodes)
resolv_conf = remote_command_executor.run_remote_command('cat /etc/resolv.conf').stdout
assert_that(resolv_conf).contains(((cluster.cfn_name.lower() + '.') + domain_name))
|
@pytest.fixture(scope='class')
def hosted_zone_factory(vpc_stack, cfn_stacks_factory, request, region):
'Create a hosted zone stack.'
hosted_zone_stack_name = generate_stack_name('integ-tests-hosted-zone', request.config.getoption('stackname_suffix'))
domain_name = (hosted_zone_stack_name + '.com')
def create_hosted_zone():
hosted_zone_template = Template()
hosted_zone_template.set_version('2010-09-09')
hosted_zone_template.set_description('Hosted zone stack created for testing existing DNS')
hosted_zone_template.add_resource(HostedZone('HostedZoneResource', Name=domain_name, VPCs=[HostedZoneVPCs(VPCId=vpc_stack.cfn_outputs['VpcId'], VPCRegion=region)]))
hosted_zone_stack = CfnStack(name=hosted_zone_stack_name, region=region, template=hosted_zone_template.to_json())
cfn_stacks_factory.create_stack(hosted_zone_stack)
return (hosted_zone_stack.cfn_resources['HostedZoneResource'], domain_name)
(yield create_hosted_zone)
if (not request.config.getoption('no_delete')):
cfn_stacks_factory.delete_stack(hosted_zone_stack_name, region)
| -8,856,291,509,646,637,000
|
Create a hosted zone stack.
|
tests/integration-tests/tests/dns/test_dns.py
|
hosted_zone_factory
|
Chen188/aws-parallelcluster
|
python
|
@pytest.fixture(scope='class')
def hosted_zone_factory(vpc_stack, cfn_stacks_factory, request, region):
hosted_zone_stack_name = generate_stack_name('integ-tests-hosted-zone', request.config.getoption('stackname_suffix'))
domain_name = (hosted_zone_stack_name + '.com')
def create_hosted_zone():
hosted_zone_template = Template()
hosted_zone_template.set_version('2010-09-09')
hosted_zone_template.set_description('Hosted zone stack created for testing existing DNS')
hosted_zone_template.add_resource(HostedZone('HostedZoneResource', Name=domain_name, VPCs=[HostedZoneVPCs(VPCId=vpc_stack.cfn_outputs['VpcId'], VPCRegion=region)]))
hosted_zone_stack = CfnStack(name=hosted_zone_stack_name, region=region, template=hosted_zone_template.to_json())
cfn_stacks_factory.create_stack(hosted_zone_stack)
return (hosted_zone_stack.cfn_resources['HostedZoneResource'], domain_name)
(yield create_hosted_zone)
if (not request.config.getoption('no_delete')):
cfn_stacks_factory.delete_stack(hosted_zone_stack_name, region)
|
def build_run_config():
'Return RunConfig for TPU estimator.'
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
eval_steps = (FLAGS.num_eval_images // FLAGS.eval_batch_size)
iterations_per_loop = (eval_steps if (FLAGS.mode == 'eval') else FLAGS.iterations_per_loop)
save_checkpoints_steps = (FLAGS.save_checkpoints_steps or iterations_per_loop)
run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, model_dir=FLAGS.model_dir, save_checkpoints_steps=save_checkpoints_steps, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=iterations_per_loop, num_shards=FLAGS.num_shards, per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2))
return run_config
| 4,576,793,555,163,632,600
|
Return RunConfig for TPU estimator.
|
models/official/amoeba_net/amoeba_net.py
|
build_run_config
|
boristown/tpu
|
python
|
def build_run_config():
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
eval_steps = (FLAGS.num_eval_images // FLAGS.eval_batch_size)
iterations_per_loop = (eval_steps if (FLAGS.mode == 'eval') else FLAGS.iterations_per_loop)
save_checkpoints_steps = (FLAGS.save_checkpoints_steps or iterations_per_loop)
run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, model_dir=FLAGS.model_dir, save_checkpoints_steps=save_checkpoints_steps, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=iterations_per_loop, num_shards=FLAGS.num_shards, per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2))
return run_config
|
def build_image_serving_input_receiver_fn(shape, dtype=tf.float32):
'Returns a input_receiver_fn for raw images during serving.'
def _preprocess_image(encoded_image):
'Preprocess a single raw image.'
image = tf.image.decode_image(encoded_image, channels=shape[(- 1)])
image.set_shape(shape)
return tf.cast(image, dtype)
def serving_input_receiver_fn():
image_bytes_list = tf.placeholder(shape=[None], dtype=tf.string)
images = tf.map_fn(_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(features=images, receiver_tensors=image_bytes_list)
return serving_input_receiver_fn
| 3,808,841,042,814,280,700
|
Returns a input_receiver_fn for raw images during serving.
|
models/official/amoeba_net/amoeba_net.py
|
build_image_serving_input_receiver_fn
|
boristown/tpu
|
python
|
def build_image_serving_input_receiver_fn(shape, dtype=tf.float32):
def _preprocess_image(encoded_image):
'Preprocess a single raw image.'
image = tf.image.decode_image(encoded_image, channels=shape[(- 1)])
image.set_shape(shape)
return tf.cast(image, dtype)
def serving_input_receiver_fn():
image_bytes_list = tf.placeholder(shape=[None], dtype=tf.string)
images = tf.map_fn(_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(features=images, receiver_tensors=image_bytes_list)
return serving_input_receiver_fn
|
def _encode_image(image_array, fmt='PNG'):
'encodes an (numpy) image array to string.\n\n Args:\n image_array: (numpy) image array\n fmt: image format to use\n\n Returns:\n encoded image string\n '
pil_image = Image.fromarray(image_array)
image_io = io.BytesIO()
pil_image.save(image_io, format=fmt)
return image_io.getvalue()
| -2,579,279,950,762,076,700
|
encodes an (numpy) image array to string.
Args:
image_array: (numpy) image array
fmt: image format to use
Returns:
encoded image string
|
models/official/amoeba_net/amoeba_net.py
|
_encode_image
|
boristown/tpu
|
python
|
def _encode_image(image_array, fmt='PNG'):
'encodes an (numpy) image array to string.\n\n Args:\n image_array: (numpy) image array\n fmt: image format to use\n\n Returns:\n encoded image string\n '
pil_image = Image.fromarray(image_array)
image_io = io.BytesIO()
pil_image.save(image_io, format=fmt)
return image_io.getvalue()
|
def write_warmup_requests(savedmodel_dir, model_name, image_size, batch_sizes=None, num_requests=8):
'Writes warmup requests for inference into a tfrecord file.\n\n Args:\n savedmodel_dir: string, the file to the exported model folder.\n model_name: string, a model name used inside the model server.\n image_size: int, size of image, assuming image height and width.\n batch_sizes: list, a list of batch sizes to create different input requests.\n num_requests: int, number of requests per batch size.\n\n Raises:\n ValueError: if batch_sizes is not a valid integer list.\n '
if ((not isinstance(batch_sizes, list)) or (not batch_sizes)):
raise ValueError('batch sizes should be a valid non-empty list.')
extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra')
tf.gfile.MkDir(extra_assets_dir)
with tf.python_io.TFRecordWriter(os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer:
for batch_size in batch_sizes:
for _ in range(num_requests):
request = predict_pb2.PredictRequest()
image = np.uint8((np.random.rand(image_size, image_size, 3) * 255))
request.inputs['input'].CopyFrom(tf.make_tensor_proto(([_encode_image(image)] * batch_size), shape=[batch_size]))
request.model_spec.name = model_name
request.model_spec.signature_name = 'serving_default'
log = prediction_log_pb2.PredictionLog(predict_log=prediction_log_pb2.PredictLog(request=request))
writer.write(log.SerializeToString())
| 2,654,014,410,891,139,600
|
Writes warmup requests for inference into a tfrecord file.
Args:
savedmodel_dir: string, the file to the exported model folder.
model_name: string, a model name used inside the model server.
image_size: int, size of image, assuming image height and width.
batch_sizes: list, a list of batch sizes to create different input requests.
num_requests: int, number of requests per batch size.
Raises:
ValueError: if batch_sizes is not a valid integer list.
|
models/official/amoeba_net/amoeba_net.py
|
write_warmup_requests
|
boristown/tpu
|
python
|
def write_warmup_requests(savedmodel_dir, model_name, image_size, batch_sizes=None, num_requests=8):
'Writes warmup requests for inference into a tfrecord file.\n\n Args:\n savedmodel_dir: string, the file to the exported model folder.\n model_name: string, a model name used inside the model server.\n image_size: int, size of image, assuming image height and width.\n batch_sizes: list, a list of batch sizes to create different input requests.\n num_requests: int, number of requests per batch size.\n\n Raises:\n ValueError: if batch_sizes is not a valid integer list.\n '
if ((not isinstance(batch_sizes, list)) or (not batch_sizes)):
raise ValueError('batch sizes should be a valid non-empty list.')
extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra')
tf.gfile.MkDir(extra_assets_dir)
with tf.python_io.TFRecordWriter(os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer:
for batch_size in batch_sizes:
for _ in range(num_requests):
request = predict_pb2.PredictRequest()
image = np.uint8((np.random.rand(image_size, image_size, 3) * 255))
request.inputs['input'].CopyFrom(tf.make_tensor_proto(([_encode_image(image)] * batch_size), shape=[batch_size]))
request.model_spec.name = model_name
request.model_spec.signature_name = 'serving_default'
log = prediction_log_pb2.PredictionLog(predict_log=prediction_log_pb2.PredictLog(request=request))
writer.write(log.SerializeToString())
|
def override_with_flags(hparams):
'Overrides parameters with flag values.'
override_flag_names = ['aux_scaling', 'train_batch_size', 'batch_norm_decay', 'batch_norm_epsilon', 'dense_dropout_keep_prob', 'drop_connect_keep_prob', 'drop_connect_version', 'eval_batch_size', 'gradient_clipping_by_global_norm', 'lr', 'lr_decay_method', 'lr_decay_value', 'lr_num_epochs_per_decay', 'moving_average_decay', 'image_size', 'num_cells', 'reduction_size', 'stem_reduction_size', 'num_epochs', 'num_epochs_per_eval', 'optimizer', 'enable_hostcall', 'use_aux_head', 'use_bp16', 'use_tpu', 'lr_warmup_epochs', 'weight_decay', 'num_shards', 'distributed_group_size', 'num_train_images', 'num_eval_images', 'num_label_classes']
for flag_name in override_flag_names:
flag_value = getattr(FLAGS, flag_name, 'INVALID')
if (flag_value == 'INVALID'):
tf.logging.fatal(('Unknown flag %s.' % str(flag_name)))
if (flag_value is not None):
_set_or_add_hparam(hparams, flag_name, flag_value)
| 4,258,256,473,116,058,600
|
Overrides parameters with flag values.
|
models/official/amoeba_net/amoeba_net.py
|
override_with_flags
|
boristown/tpu
|
python
|
def override_with_flags(hparams):
override_flag_names = ['aux_scaling', 'train_batch_size', 'batch_norm_decay', 'batch_norm_epsilon', 'dense_dropout_keep_prob', 'drop_connect_keep_prob', 'drop_connect_version', 'eval_batch_size', 'gradient_clipping_by_global_norm', 'lr', 'lr_decay_method', 'lr_decay_value', 'lr_num_epochs_per_decay', 'moving_average_decay', 'image_size', 'num_cells', 'reduction_size', 'stem_reduction_size', 'num_epochs', 'num_epochs_per_eval', 'optimizer', 'enable_hostcall', 'use_aux_head', 'use_bp16', 'use_tpu', 'lr_warmup_epochs', 'weight_decay', 'num_shards', 'distributed_group_size', 'num_train_images', 'num_eval_images', 'num_label_classes']
for flag_name in override_flag_names:
flag_value = getattr(FLAGS, flag_name, 'INVALID')
if (flag_value == 'INVALID'):
tf.logging.fatal(('Unknown flag %s.' % str(flag_name)))
if (flag_value is not None):
_set_or_add_hparam(hparams, flag_name, flag_value)
|
def build_hparams():
'Build tf.Hparams for training Amoeba Net.'
hparams = model_lib.build_hparams(FLAGS.cell_name)
override_with_flags(hparams)
return hparams
| 7,598,903,149,163,873,000
|
Build tf.Hparams for training Amoeba Net.
|
models/official/amoeba_net/amoeba_net.py
|
build_hparams
|
boristown/tpu
|
python
|
def build_hparams():
hparams = model_lib.build_hparams(FLAGS.cell_name)
override_with_flags(hparams)
return hparams
|
def _preprocess_image(encoded_image):
'Preprocess a single raw image.'
image = tf.image.decode_image(encoded_image, channels=shape[(- 1)])
image.set_shape(shape)
return tf.cast(image, dtype)
| -2,410,232,163,323,720,000
|
Preprocess a single raw image.
|
models/official/amoeba_net/amoeba_net.py
|
_preprocess_image
|
boristown/tpu
|
python
|
def _preprocess_image(encoded_image):
image = tf.image.decode_image(encoded_image, channels=shape[(- 1)])
image.set_shape(shape)
return tf.cast(image, dtype)
|
def add_port(component: Component, **kwargs) -> Component:
'Return Component with a new port.'
component.add_port(**kwargs)
return component
| -5,908,829,619,112,604,000
|
Return Component with a new port.
|
gdsfactory/functions.py
|
add_port
|
jorgepadilla19/gdsfactory
|
python
|
def add_port(component: Component, **kwargs) -> Component:
component.add_port(**kwargs)
return component
|
@cell
def add_text(component: ComponentOrFactory, text: str='', text_offset: Float2=(0, 0), text_anchor: Anchor='cc', text_factory: ComponentFactory=text_rectangular_multi_layer) -> Component:
'Return component inside a new component with text geometry.\n\n Args:\n component:\n text: text string.\n text_offset: relative to component anchor. Defaults to center (cc).\n text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).\n text_factory: function to add text labels.\n '
component = (component() if callable(component) else component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
t = (component_new << text_factory(text))
t.move((np.array(text_offset) + getattr(ref.size_info, text_anchor)))
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
| 6,078,697,613,539,204,000
|
Return component inside a new component with text geometry.
Args:
component:
text: text string.
text_offset: relative to component anchor. Defaults to center (cc).
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
|
gdsfactory/functions.py
|
add_text
|
jorgepadilla19/gdsfactory
|
python
|
@cell
def add_text(component: ComponentOrFactory, text: str=, text_offset: Float2=(0, 0), text_anchor: Anchor='cc', text_factory: ComponentFactory=text_rectangular_multi_layer) -> Component:
'Return component inside a new component with text geometry.\n\n Args:\n component:\n text: text string.\n text_offset: relative to component anchor. Defaults to center (cc).\n text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).\n text_factory: function to add text labels.\n '
component = (component() if callable(component) else component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
t = (component_new << text_factory(text))
t.move((np.array(text_offset) + getattr(ref.size_info, text_anchor)))
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
|
def add_texts(components: List[ComponentOrFactory], prefix: str='', index0: int=0, **kwargs) -> List[Component]:
'Return a list of Component with text labels.\n\n Args:\n components: list of components\n prefix: Optional prefix for the labels\n index0: defaults to 0 (0, for first component, 1 for second ...)\n\n keyword Args:\n text_offset: relative to component size info anchor. Defaults to center.\n text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).\n text_factory: function to add text labels.\n '
return [add_text(component, text=f'{prefix}{(i + index0)}', **kwargs) for (i, component) in enumerate(components)]
| 2,259,754,371,796,914,400
|
Return a list of Component with text labels.
Args:
components: list of components
prefix: Optional prefix for the labels
index0: defaults to 0 (0, for first component, 1 for second ...)
keyword Args:
text_offset: relative to component size info anchor. Defaults to center.
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
|
gdsfactory/functions.py
|
add_texts
|
jorgepadilla19/gdsfactory
|
python
|
def add_texts(components: List[ComponentOrFactory], prefix: str=, index0: int=0, **kwargs) -> List[Component]:
'Return a list of Component with text labels.\n\n Args:\n components: list of components\n prefix: Optional prefix for the labels\n index0: defaults to 0 (0, for first component, 1 for second ...)\n\n keyword Args:\n text_offset: relative to component size info anchor. Defaults to center.\n text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).\n text_factory: function to add text labels.\n '
return [add_text(component, text=f'{prefix}{(i + index0)}', **kwargs) for (i, component) in enumerate(components)]
|
@cell
def rotate(component: ComponentOrFactory, angle: float=90) -> Component:
'Return rotated component inside a new component.\n\n Most times you just need to place a reference and rotate it.\n This rotate function just encapsulates the rotated reference into a new component.\n\n Args:\n component:\n angle: in degrees\n '
component = (component() if callable(component) else component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.rotate(angle)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
| 3,448,322,324,605,236,700
|
Return rotated component inside a new component.
Most times you just need to place a reference and rotate it.
This rotate function just encapsulates the rotated reference into a new component.
Args:
component:
angle: in degrees
|
gdsfactory/functions.py
|
rotate
|
jorgepadilla19/gdsfactory
|
python
|
@cell
def rotate(component: ComponentOrFactory, angle: float=90) -> Component:
'Return rotated component inside a new component.\n\n Most times you just need to place a reference and rotate it.\n This rotate function just encapsulates the rotated reference into a new component.\n\n Args:\n component:\n angle: in degrees\n '
component = (component() if callable(component) else component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.rotate(angle)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
|
@cell
def mirror(component: Component, p1: Float2=(0, 1), p2: Float2=(0, 0)) -> Component:
'Return new Component with a mirrored reference.\n\n Args:\n p1: first point to define mirror axis\n p2: second point to define mirror axis\n '
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.mirror(p1=p1, p2=p2)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
| 2,300,571,083,734,599,700
|
Return new Component with a mirrored reference.
Args:
p1: first point to define mirror axis
p2: second point to define mirror axis
|
gdsfactory/functions.py
|
mirror
|
jorgepadilla19/gdsfactory
|
python
|
@cell
def mirror(component: Component, p1: Float2=(0, 1), p2: Float2=(0, 0)) -> Component:
'Return new Component with a mirrored reference.\n\n Args:\n p1: first point to define mirror axis\n p2: second point to define mirror axis\n '
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.mirror(p1=p1, p2=p2)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
|
@cell
def move(component: Component, origin=(0, 0), destination=None, axis: Optional[Axis]=None) -> Component:
'Return new Component with a moved reference to the original component.\n\n Args:\n origin: of component\n destination:\n axis: x or y axis\n '
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.move(origin=origin, destination=destination, axis=axis)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
| -3,906,964,808,911,511,000
|
Return new Component with a moved reference to the original component.
Args:
origin: of component
destination:
axis: x or y axis
|
gdsfactory/functions.py
|
move
|
jorgepadilla19/gdsfactory
|
python
|
@cell
def move(component: Component, origin=(0, 0), destination=None, axis: Optional[Axis]=None) -> Component:
'Return new Component with a moved reference to the original component.\n\n Args:\n origin: of component\n destination:\n axis: x or y axis\n '
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.move(origin=origin, destination=destination, axis=axis)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
|
def move_port_to_zero(component: Component, port_name: str='o1'):
'Return a container that contains a reference to the original component.\n where the new component has port_name in (0, 0)\n '
if (port_name not in component.ports):
raise ValueError(f'port_name = {port_name!r} not in {list(component.ports.keys())}')
return move(component, (- component.ports[port_name].midpoint))
| 3,064,900,530,110,951,000
|
Return a container that contains a reference to the original component.
where the new component has port_name in (0, 0)
|
gdsfactory/functions.py
|
move_port_to_zero
|
jorgepadilla19/gdsfactory
|
python
|
def move_port_to_zero(component: Component, port_name: str='o1'):
'Return a container that contains a reference to the original component.\n where the new component has port_name in (0, 0)\n '
if (port_name not in component.ports):
raise ValueError(f'port_name = {port_name!r} not in {list(component.ports.keys())}')
return move(component, (- component.ports[port_name].midpoint))
|
def update_info(component: Component, **kwargs) -> Component:
'Return Component with updated info.'
component.info.update(**kwargs)
return component
| 2,849,792,957,458,223,000
|
Return Component with updated info.
|
gdsfactory/functions.py
|
update_info
|
jorgepadilla19/gdsfactory
|
python
|
def update_info(component: Component, **kwargs) -> Component:
component.info.update(**kwargs)
return component
|
@validate_arguments
def add_settings_label(component: Component, layer_label: Layer=(66, 0), settings: Optional[Strs]=None) -> Component:
'Add a settings label to a component.\n\n Args:\n component:\n layer_label:\n settings: tuple or list of settings. if None, adds all changed settings\n\n '
d = ({setting: component.get_setting(setting) for setting in settings} if settings else component.info.changed)
component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label)
return component
| -811,722,326,502,638,200
|
Add a settings label to a component.
Args:
component:
layer_label:
settings: tuple or list of settings. if None, adds all changed settings
|
gdsfactory/functions.py
|
add_settings_label
|
jorgepadilla19/gdsfactory
|
python
|
@validate_arguments
def add_settings_label(component: Component, layer_label: Layer=(66, 0), settings: Optional[Strs]=None) -> Component:
'Add a settings label to a component.\n\n Args:\n component:\n layer_label:\n settings: tuple or list of settings. if None, adds all changed settings\n\n '
d = ({setting: component.get_setting(setting) for setting in settings} if settings else component.info.changed)
component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label)
return component
|
def _summarize_str(st):
'Aux function'
return (st[:56][::(- 1)].split(',', 1)[(- 1)][::(- 1)] + ', ...')
| 30,060,154,108,599,572
|
Aux function
|
mne/fiff/meas_info.py
|
_summarize_str
|
Anevar/mne-python
|
python
|
def _summarize_str(st):
return (st[:56][::(- 1)].split(',', 1)[(- 1)][::(- 1)] + ', ...')
|
def read_fiducials(fname):
'Read fiducials from a fiff file\n\n Returns\n -------\n pts : list of dicts\n List of digitizer points (each point in a dict).\n coord_frame : int\n The coordinate frame of the points (one of\n mne.fiff.FIFF.FIFFV_COORD_...)\n '
(fid, tree, _) = fiff_open(fname)
with fid:
isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)
isotrak = isotrak[0]
pts = []
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if (kind == FIFF.FIFF_DIG_POINT):
tag = read_tag(fid, pos)
pts.append(tag.data)
elif (kind == FIFF.FIFF_MNE_COORD_FRAME):
tag = read_tag(fid, pos)
coord_frame = tag.data[0]
if (coord_frame == FIFF.FIFFV_COORD_UNKNOWN):
err = ('No coordinate frame was found in the file %r, it is probably not a valid fiducials file.' % fname)
raise ValueError(err)
for pt in pts:
pt['coord_frame'] = coord_frame
return (pts, coord_frame)
| -6,872,709,896,282,553,000
|
Read fiducials from a fiff file
Returns
-------
pts : list of dicts
List of digitizer points (each point in a dict).
coord_frame : int
The coordinate frame of the points (one of
mne.fiff.FIFF.FIFFV_COORD_...)
|
mne/fiff/meas_info.py
|
read_fiducials
|
Anevar/mne-python
|
python
|
def read_fiducials(fname):
'Read fiducials from a fiff file\n\n Returns\n -------\n pts : list of dicts\n List of digitizer points (each point in a dict).\n coord_frame : int\n The coordinate frame of the points (one of\n mne.fiff.FIFF.FIFFV_COORD_...)\n '
(fid, tree, _) = fiff_open(fname)
with fid:
isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)
isotrak = isotrak[0]
pts = []
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if (kind == FIFF.FIFF_DIG_POINT):
tag = read_tag(fid, pos)
pts.append(tag.data)
elif (kind == FIFF.FIFF_MNE_COORD_FRAME):
tag = read_tag(fid, pos)
coord_frame = tag.data[0]
if (coord_frame == FIFF.FIFFV_COORD_UNKNOWN):
err = ('No coordinate frame was found in the file %r, it is probably not a valid fiducials file.' % fname)
raise ValueError(err)
for pt in pts:
pt['coord_frame'] = coord_frame
return (pts, coord_frame)
|
def write_fiducials(fname, pts, coord_frame=0):
"Write fiducials to a fiff file\n\n Parameters\n ----------\n fname : str\n Destination file name.\n pts : iterator of dict\n Iterator through digitizer points. Each point is a dictionary with\n the keys 'kind', 'ident' and 'r'.\n coord_frame : int\n The coordinate frame of the points (one of\n mne.fiff.FIFF.FIFFV_COORD_...)\n "
pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))
bad_frames = (pts_frames - set((coord_frame,)))
if (len(bad_frames) > 0):
err = ('Points have coord_frame entries that are incompatible with coord_frame=%i: %s.' % (coord_frame, str(tuple(bad_frames))))
raise ValueError(err)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_ISOTRAK)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
for pt in pts:
write_dig_point(fid, pt)
end_block(fid, FIFF.FIFFB_ISOTRAK)
end_file(fid)
| -5,395,714,530,013,654,000
|
Write fiducials to a fiff file
Parameters
----------
fname : str
Destination file name.
pts : iterator of dict
Iterator through digitizer points. Each point is a dictionary with
the keys 'kind', 'ident' and 'r'.
coord_frame : int
The coordinate frame of the points (one of
mne.fiff.FIFF.FIFFV_COORD_...)
|
mne/fiff/meas_info.py
|
write_fiducials
|
Anevar/mne-python
|
python
|
def write_fiducials(fname, pts, coord_frame=0):
"Write fiducials to a fiff file\n\n Parameters\n ----------\n fname : str\n Destination file name.\n pts : iterator of dict\n Iterator through digitizer points. Each point is a dictionary with\n the keys 'kind', 'ident' and 'r'.\n coord_frame : int\n The coordinate frame of the points (one of\n mne.fiff.FIFF.FIFFV_COORD_...)\n "
pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))
bad_frames = (pts_frames - set((coord_frame,)))
if (len(bad_frames) > 0):
err = ('Points have coord_frame entries that are incompatible with coord_frame=%i: %s.' % (coord_frame, str(tuple(bad_frames))))
raise ValueError(err)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_ISOTRAK)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
for pt in pts:
write_dig_point(fid, pt)
end_block(fid, FIFF.FIFFB_ISOTRAK)
end_file(fid)
|
@verbose
def read_info(fname, verbose=None):
'Read measurement info from a file\n\n Parameters\n ----------\n fname : str\n File name.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of mne.fiff.meas_info.Info\n Info on dataset.\n '
(f, tree, _) = fiff_open(fname)
with f as fid:
info = read_meas_info(fid, tree)[0]
return info
| 8,250,280,954,245,872,000
|
Read measurement info from a file
Parameters
----------
fname : str
File name.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
info : instance of mne.fiff.meas_info.Info
Info on dataset.
|
mne/fiff/meas_info.py
|
read_info
|
Anevar/mne-python
|
python
|
@verbose
def read_info(fname, verbose=None):
'Read measurement info from a file\n\n Parameters\n ----------\n fname : str\n File name.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of mne.fiff.meas_info.Info\n Info on dataset.\n '
(f, tree, _) = fiff_open(fname)
with f as fid:
info = read_meas_info(fid, tree)[0]
return info
|
@verbose
def read_meas_info(fid, tree, verbose=None):
'Read the measurement info\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n tree : tree\n FIF tree structure.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of mne.fiff.meas_info.Info\n Info on dataset.\n meas : dict\n Node in tree that contains the info.\n '
meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
if (len(meas) == 0):
raise ValueError('Could not find measurement data')
if (len(meas) > 1):
raise ValueError('Cannot read more that 1 measurement data')
meas = meas[0]
meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)
if (len(meas_info) == 0):
raise ValueError('Could not find measurement info')
if (len(meas_info) > 1):
raise ValueError('Cannot read more that 1 measurement info')
meas_info = meas_info[0]
dev_head_t = None
ctf_head_t = None
meas_date = None
highpass = None
lowpass = None
nchan = None
sfreq = None
chs = []
experimenter = None
description = None
proj_id = None
proj_name = None
line_freq = None
p = 0
for k in range(meas_info['nent']):
kind = meas_info['directory'][k].kind
pos = meas_info['directory'][k].pos
if (kind == FIFF.FIFF_NCHAN):
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif (kind == FIFF.FIFF_SFREQ):
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif (kind == FIFF.FIFF_CH_INFO):
tag = read_tag(fid, pos)
chs.append(tag.data)
p += 1
elif (kind == FIFF.FIFF_LOWPASS):
tag = read_tag(fid, pos)
lowpass = float(tag.data)
elif (kind == FIFF.FIFF_HIGHPASS):
tag = read_tag(fid, pos)
highpass = float(tag.data)
elif (kind == FIFF.FIFF_MEAS_DATE):
tag = read_tag(fid, pos)
meas_date = tag.data
elif (kind == FIFF.FIFF_COORD_TRANS):
tag = read_tag(fid, pos)
cand = tag.data
if ((cand['from'] == FIFF.FIFFV_COORD_DEVICE) and (cand['to'] == FIFF.FIFFV_COORD_HEAD)):
dev_head_t = cand
elif ((cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD) and (cand['to'] == FIFF.FIFFV_COORD_HEAD)):
ctf_head_t = cand
elif (kind == FIFF.FIFF_EXPERIMENTER):
tag = read_tag(fid, pos)
experimenter = tag.data
elif (kind == FIFF.FIFF_DESCRIPTION):
tag = read_tag(fid, pos)
description = tag.data
elif (kind == FIFF.FIFF_PROJ_ID):
tag = read_tag(fid, pos)
proj_id = tag.data
elif (kind == FIFF.FIFF_PROJ_NAME):
tag = read_tag(fid, pos)
proj_name = tag.data
elif (kind == FIFF.FIFF_LINE_FREQ):
tag = read_tag(fid, pos)
line_freq = float(tag.data)
if (nchan is None):
raise ValueError('Number of channels in not defined')
if (sfreq is None):
raise ValueError('Sampling frequency is not defined')
if (len(chs) == 0):
raise ValueError('Channel information not defined')
if (len(chs) != nchan):
raise ValueError('Incorrect number of channel definitions found')
if ((dev_head_t is None) or (ctf_head_t is None)):
hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
if (len(hpi_result) == 1):
hpi_result = hpi_result[0]
for k in range(hpi_result['nent']):
kind = hpi_result['directory'][k].kind
pos = hpi_result['directory'][k].pos
if (kind == FIFF.FIFF_COORD_TRANS):
tag = read_tag(fid, pos)
cand = tag.data
if ((cand['from'] == FIFF.FIFFV_COORD_DEVICE) and (cand['to'] == FIFF.FIFFV_COORD_HEAD)):
dev_head_t = cand
elif ((cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD) and (cand['to'] == FIFF.FIFFV_COORD_HEAD)):
ctf_head_t = cand
isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
dig = None
if (len(isotrak) == 0):
logger.info('Isotrak not found')
elif (len(isotrak) > 1):
warn('Multiple Isotrak found')
else:
isotrak = isotrak[0]
dig = []
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if (kind == FIFF.FIFF_DIG_POINT):
tag = read_tag(fid, pos)
dig.append(tag.data)
dig[(- 1)]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)
acq_pars = None
acq_stim = None
if (len(acqpars) == 1):
acqpars = acqpars[0]
for k in range(acqpars['nent']):
kind = acqpars['directory'][k].kind
pos = acqpars['directory'][k].pos
if (kind == FIFF.FIFF_DACQ_PARS):
tag = read_tag(fid, pos)
acq_pars = tag.data
elif (kind == FIFF.FIFF_DACQ_STIM):
tag = read_tag(fid, pos)
acq_stim = tag.data
projs = read_proj(fid, meas_info)
comps = read_ctf_comp(fid, meas_info, chs)
bads = read_bad_channels(fid, meas_info)
if (tree['id'] is not None):
info = Info(file_id=tree['id'])
else:
info = Info(file_id=None)
subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)
if (len(subject_info) == 1):
subject_info = subject_info[0]
si = dict()
for k in range(subject_info['nent']):
kind = subject_info['directory'][k].kind
pos = subject_info['directory'][k].pos
if (kind == FIFF.FIFF_SUBJ_ID):
tag = read_tag(fid, pos)
si['id'] = int(tag.data)
elif (kind == FIFF.FIFF_SUBJ_HIS_ID):
tag = read_tag(fid, pos)
si['his_id'] = str(tag.data)
elif (kind == FIFF.FIFF_SUBJ_LAST_NAME):
tag = read_tag(fid, pos)
si['last_name'] = str(tag.data)
elif (kind == FIFF.FIFF_SUBJ_FIRST_NAME):
tag = read_tag(fid, pos)
si['first_name'] = str(tag.data)
elif (kind == FIFF.FIFF_SUBJ_BIRTH_DAY):
tag = read_tag(fid, pos)
si['birthday'] = tag.data
elif (kind == FIFF.FIFF_SUBJ_SEX):
tag = read_tag(fid, pos)
si['sex'] = int(tag.data)
elif (kind == FIFF.FIFF_SUBJ_HAND):
tag = read_tag(fid, pos)
si['hand'] = int(tag.data)
else:
si = None
info['subject_info'] = si
read_extra_meas_info(fid, tree, info)
if (meas_info['parent_id'] is None):
if (meas_info['id'] is None):
if (meas['id'] is None):
if (meas['parent_id'] is None):
info['meas_id'] = info['file_id']
else:
info['meas_id'] = meas['parent_id']
else:
info['meas_id'] = meas['id']
else:
info['meas_id'] = meas_info['id']
else:
info['meas_id'] = meas_info['parent_id']
info['experimenter'] = experimenter
info['description'] = description
info['proj_id'] = proj_id
info['proj_name'] = proj_name
if (meas_date is None):
info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']]
else:
info['meas_date'] = meas_date
info['nchan'] = nchan
info['sfreq'] = sfreq
info['highpass'] = (highpass if (highpass is not None) else 0)
info['lowpass'] = (lowpass if (lowpass is not None) else (info['sfreq'] / 2.0))
info['line_freq'] = line_freq
info['chs'] = chs
info['ch_names'] = [ch['ch_name'] for ch in chs]
info['dev_head_t'] = dev_head_t
info['ctf_head_t'] = ctf_head_t
if ((dev_head_t is not None) and (ctf_head_t is not None)):
head_ctf_trans = linalg.inv(ctf_head_t['trans'])
dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])
info['dev_ctf_t'] = {'from': FIFF.FIFFV_COORD_DEVICE, 'to': FIFF.FIFFV_MNE_COORD_CTF_HEAD, 'trans': dev_ctf_trans}
else:
info['dev_ctf_t'] = None
info['dig'] = dig
info['bads'] = bads
info['projs'] = projs
info['comps'] = comps
info['acq_pars'] = acq_pars
info['acq_stim'] = acq_stim
return (info, meas)
| -1,168,243,709,760,774,000
|
Read the measurement info
Parameters
----------
fid : file
Open file descriptor.
tree : tree
FIF tree structure.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
info : instance of mne.fiff.meas_info.Info
Info on dataset.
meas : dict
Node in tree that contains the info.
|
mne/fiff/meas_info.py
|
read_meas_info
|
Anevar/mne-python
|
python
|
@verbose
def read_meas_info(fid, tree, verbose=None):
'Read the measurement info\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n tree : tree\n FIF tree structure.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of mne.fiff.meas_info.Info\n Info on dataset.\n meas : dict\n Node in tree that contains the info.\n '
meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
if (len(meas) == 0):
raise ValueError('Could not find measurement data')
if (len(meas) > 1):
raise ValueError('Cannot read more that 1 measurement data')
meas = meas[0]
meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)
if (len(meas_info) == 0):
raise ValueError('Could not find measurement info')
if (len(meas_info) > 1):
raise ValueError('Cannot read more that 1 measurement info')
meas_info = meas_info[0]
dev_head_t = None
ctf_head_t = None
meas_date = None
highpass = None
lowpass = None
nchan = None
sfreq = None
chs = []
experimenter = None
description = None
proj_id = None
proj_name = None
line_freq = None
p = 0
for k in range(meas_info['nent']):
kind = meas_info['directory'][k].kind
pos = meas_info['directory'][k].pos
if (kind == FIFF.FIFF_NCHAN):
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif (kind == FIFF.FIFF_SFREQ):
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif (kind == FIFF.FIFF_CH_INFO):
tag = read_tag(fid, pos)
chs.append(tag.data)
p += 1
elif (kind == FIFF.FIFF_LOWPASS):
tag = read_tag(fid, pos)
lowpass = float(tag.data)
elif (kind == FIFF.FIFF_HIGHPASS):
tag = read_tag(fid, pos)
highpass = float(tag.data)
elif (kind == FIFF.FIFF_MEAS_DATE):
tag = read_tag(fid, pos)
meas_date = tag.data
elif (kind == FIFF.FIFF_COORD_TRANS):
tag = read_tag(fid, pos)
cand = tag.data
if ((cand['from'] == FIFF.FIFFV_COORD_DEVICE) and (cand['to'] == FIFF.FIFFV_COORD_HEAD)):
dev_head_t = cand
elif ((cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD) and (cand['to'] == FIFF.FIFFV_COORD_HEAD)):
ctf_head_t = cand
elif (kind == FIFF.FIFF_EXPERIMENTER):
tag = read_tag(fid, pos)
experimenter = tag.data
elif (kind == FIFF.FIFF_DESCRIPTION):
tag = read_tag(fid, pos)
description = tag.data
elif (kind == FIFF.FIFF_PROJ_ID):
tag = read_tag(fid, pos)
proj_id = tag.data
elif (kind == FIFF.FIFF_PROJ_NAME):
tag = read_tag(fid, pos)
proj_name = tag.data
elif (kind == FIFF.FIFF_LINE_FREQ):
tag = read_tag(fid, pos)
line_freq = float(tag.data)
if (nchan is None):
raise ValueError('Number of channels in not defined')
if (sfreq is None):
raise ValueError('Sampling frequency is not defined')
if (len(chs) == 0):
raise ValueError('Channel information not defined')
if (len(chs) != nchan):
raise ValueError('Incorrect number of channel definitions found')
if ((dev_head_t is None) or (ctf_head_t is None)):
hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
if (len(hpi_result) == 1):
hpi_result = hpi_result[0]
for k in range(hpi_result['nent']):
kind = hpi_result['directory'][k].kind
pos = hpi_result['directory'][k].pos
if (kind == FIFF.FIFF_COORD_TRANS):
tag = read_tag(fid, pos)
cand = tag.data
if ((cand['from'] == FIFF.FIFFV_COORD_DEVICE) and (cand['to'] == FIFF.FIFFV_COORD_HEAD)):
dev_head_t = cand
elif ((cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD) and (cand['to'] == FIFF.FIFFV_COORD_HEAD)):
ctf_head_t = cand
isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
dig = None
if (len(isotrak) == 0):
logger.info('Isotrak not found')
elif (len(isotrak) > 1):
warn('Multiple Isotrak found')
else:
isotrak = isotrak[0]
dig = []
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if (kind == FIFF.FIFF_DIG_POINT):
tag = read_tag(fid, pos)
dig.append(tag.data)
dig[(- 1)]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)
acq_pars = None
acq_stim = None
if (len(acqpars) == 1):
acqpars = acqpars[0]
for k in range(acqpars['nent']):
kind = acqpars['directory'][k].kind
pos = acqpars['directory'][k].pos
if (kind == FIFF.FIFF_DACQ_PARS):
tag = read_tag(fid, pos)
acq_pars = tag.data
elif (kind == FIFF.FIFF_DACQ_STIM):
tag = read_tag(fid, pos)
acq_stim = tag.data
projs = read_proj(fid, meas_info)
comps = read_ctf_comp(fid, meas_info, chs)
bads = read_bad_channels(fid, meas_info)
if (tree['id'] is not None):
info = Info(file_id=tree['id'])
else:
info = Info(file_id=None)
subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)
if (len(subject_info) == 1):
subject_info = subject_info[0]
si = dict()
for k in range(subject_info['nent']):
kind = subject_info['directory'][k].kind
pos = subject_info['directory'][k].pos
if (kind == FIFF.FIFF_SUBJ_ID):
tag = read_tag(fid, pos)
si['id'] = int(tag.data)
elif (kind == FIFF.FIFF_SUBJ_HIS_ID):
tag = read_tag(fid, pos)
si['his_id'] = str(tag.data)
elif (kind == FIFF.FIFF_SUBJ_LAST_NAME):
tag = read_tag(fid, pos)
si['last_name'] = str(tag.data)
elif (kind == FIFF.FIFF_SUBJ_FIRST_NAME):
tag = read_tag(fid, pos)
si['first_name'] = str(tag.data)
elif (kind == FIFF.FIFF_SUBJ_BIRTH_DAY):
tag = read_tag(fid, pos)
si['birthday'] = tag.data
elif (kind == FIFF.FIFF_SUBJ_SEX):
tag = read_tag(fid, pos)
si['sex'] = int(tag.data)
elif (kind == FIFF.FIFF_SUBJ_HAND):
tag = read_tag(fid, pos)
si['hand'] = int(tag.data)
else:
si = None
info['subject_info'] = si
read_extra_meas_info(fid, tree, info)
if (meas_info['parent_id'] is None):
if (meas_info['id'] is None):
if (meas['id'] is None):
if (meas['parent_id'] is None):
info['meas_id'] = info['file_id']
else:
info['meas_id'] = meas['parent_id']
else:
info['meas_id'] = meas['id']
else:
info['meas_id'] = meas_info['id']
else:
info['meas_id'] = meas_info['parent_id']
info['experimenter'] = experimenter
info['description'] = description
info['proj_id'] = proj_id
info['proj_name'] = proj_name
if (meas_date is None):
info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']]
else:
info['meas_date'] = meas_date
info['nchan'] = nchan
info['sfreq'] = sfreq
info['highpass'] = (highpass if (highpass is not None) else 0)
info['lowpass'] = (lowpass if (lowpass is not None) else (info['sfreq'] / 2.0))
info['line_freq'] = line_freq
info['chs'] = chs
info['ch_names'] = [ch['ch_name'] for ch in chs]
info['dev_head_t'] = dev_head_t
info['ctf_head_t'] = ctf_head_t
if ((dev_head_t is not None) and (ctf_head_t is not None)):
head_ctf_trans = linalg.inv(ctf_head_t['trans'])
dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])
info['dev_ctf_t'] = {'from': FIFF.FIFFV_COORD_DEVICE, 'to': FIFF.FIFFV_MNE_COORD_CTF_HEAD, 'trans': dev_ctf_trans}
else:
info['dev_ctf_t'] = None
info['dig'] = dig
info['bads'] = bads
info['projs'] = projs
info['comps'] = comps
info['acq_pars'] = acq_pars
info['acq_stim'] = acq_stim
return (info, meas)
|
def read_extra_meas_info(fid, tree, info):
'Read extra blocks from fid'
blocks = [FIFF.FIFFB_EVENTS, FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS, FIFF.FIFFB_PROCESSING_HISTORY]
info['orig_blocks'] = blocks
fid_str = BytesIO()
fid_str = start_file(fid_str)
start_block(fid_str, FIFF.FIFFB_MEAS_INFO)
for block in blocks:
nodes = dir_tree_find(tree, block)
copy_tree(fid, tree['id'], nodes, fid_str)
info['orig_fid_str'] = fid_str
| -7,852,157,372,996,325,000
|
Read extra blocks from fid
|
mne/fiff/meas_info.py
|
read_extra_meas_info
|
Anevar/mne-python
|
python
|
def read_extra_meas_info(fid, tree, info):
blocks = [FIFF.FIFFB_EVENTS, FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS, FIFF.FIFFB_PROCESSING_HISTORY]
info['orig_blocks'] = blocks
fid_str = BytesIO()
fid_str = start_file(fid_str)
start_block(fid_str, FIFF.FIFFB_MEAS_INFO)
for block in blocks:
nodes = dir_tree_find(tree, block)
copy_tree(fid, tree['id'], nodes, fid_str)
info['orig_fid_str'] = fid_str
|
def write_extra_meas_info(fid, info):
'Write otherwise left out blocks of data'
if (('orig_blocks' in info) and (info['orig_blocks'] is not None)):
blocks = info['orig_blocks']
(fid_str, tree, _) = fiff_open(info['orig_fid_str'])
for block in blocks:
nodes = dir_tree_find(tree, block)
copy_tree(fid_str, tree['id'], nodes, fid)
| 1,894,005,886,610,068,200
|
Write otherwise left out blocks of data
|
mne/fiff/meas_info.py
|
write_extra_meas_info
|
Anevar/mne-python
|
python
|
def write_extra_meas_info(fid, info):
if (('orig_blocks' in info) and (info['orig_blocks'] is not None)):
blocks = info['orig_blocks']
(fid_str, tree, _) = fiff_open(info['orig_fid_str'])
for block in blocks:
nodes = dir_tree_find(tree, block)
copy_tree(fid_str, tree['id'], nodes, fid)
|
def write_meas_info(fid, info, data_type=None, reset_range=True):
"Write measurement info into a file id (from a fif file)\n\n Parameters\n ----------\n fid : file\n Open file descriptor\n info : instance of mne.fiff.meas_info.Info\n The measurement info structure\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n\n Note\n ----\n Tags are written in a particular order for compatibility with maxfilter\n "
start_block(fid, FIFF.FIFFB_MEAS_INFO)
write_extra_meas_info(fid, info)
if (info['dig'] is not None):
start_block(fid, FIFF.FIFFB_ISOTRAK)
for d in info['dig']:
write_dig_point(fid, d)
end_block(fid, FIFF.FIFFB_ISOTRAK)
if ((info['acq_pars'] is not None) or (info['acq_stim'] is not None)):
start_block(fid, FIFF.FIFFB_DACQ_PARS)
if (info['acq_pars'] is not None):
write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
if (info['acq_stim'] is not None):
write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
end_block(fid, FIFF.FIFFB_DACQ_PARS)
if (info['dev_head_t'] is not None):
write_coord_trans(fid, info['dev_head_t'])
if (info['ctf_head_t'] is not None):
write_coord_trans(fid, info['ctf_head_t'])
write_proj(fid, info['projs'])
write_ctf_comp(fid, info['comps'])
if (len(info['bads']) > 0):
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
if (info.get('experimenter') is not None):
write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
if (info.get('description') is not None):
write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
if (info.get('proj_id') is not None):
write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
if (info.get('proj_name') is not None):
write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
if (info.get('meas_date') is not None):
write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])
write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
if (info.get('line_freq') is not None):
write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
if (data_type is not None):
write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
for (k, c) in enumerate(info['chs']):
c = deepcopy(c)
c['scanno'] = (k + 1)
if (reset_range is True):
c['range'] = 1.0
write_ch_info(fid, c)
if (info.get('subject_info') is not None):
start_block(fid, FIFF.FIFFB_SUBJECT)
si = info['subject_info']
if (si.get('id') is not None):
write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
if (si.get('his_id') is not None):
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
if (si.get('last_name') is not None):
write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
if (si.get('first_name') is not None):
write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
if (si.get('birthday') is not None):
write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
if (si.get('sex') is not None):
write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
if (si.get('hand') is not None):
write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
end_block(fid, FIFF.FIFFB_SUBJECT)
end_block(fid, FIFF.FIFFB_MEAS_INFO)
| -3,615,014,654,560,701,400
|
Write measurement info into a file id (from a fif file)
Parameters
----------
fid : file
Open file descriptor
info : instance of mne.fiff.meas_info.Info
The measurement info structure
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
Note
----
Tags are written in a particular order for compatibility with maxfilter
|
mne/fiff/meas_info.py
|
write_meas_info
|
Anevar/mne-python
|
python
|
def write_meas_info(fid, info, data_type=None, reset_range=True):
"Write measurement info into a file id (from a fif file)\n\n Parameters\n ----------\n fid : file\n Open file descriptor\n info : instance of mne.fiff.meas_info.Info\n The measurement info structure\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n\n Note\n ----\n Tags are written in a particular order for compatibility with maxfilter\n "
start_block(fid, FIFF.FIFFB_MEAS_INFO)
write_extra_meas_info(fid, info)
if (info['dig'] is not None):
start_block(fid, FIFF.FIFFB_ISOTRAK)
for d in info['dig']:
write_dig_point(fid, d)
end_block(fid, FIFF.FIFFB_ISOTRAK)
if ((info['acq_pars'] is not None) or (info['acq_stim'] is not None)):
start_block(fid, FIFF.FIFFB_DACQ_PARS)
if (info['acq_pars'] is not None):
write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
if (info['acq_stim'] is not None):
write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
end_block(fid, FIFF.FIFFB_DACQ_PARS)
if (info['dev_head_t'] is not None):
write_coord_trans(fid, info['dev_head_t'])
if (info['ctf_head_t'] is not None):
write_coord_trans(fid, info['ctf_head_t'])
write_proj(fid, info['projs'])
write_ctf_comp(fid, info['comps'])
if (len(info['bads']) > 0):
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
if (info.get('experimenter') is not None):
write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
if (info.get('description') is not None):
write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
if (info.get('proj_id') is not None):
write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
if (info.get('proj_name') is not None):
write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
if (info.get('meas_date') is not None):
write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])
write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
if (info.get('line_freq') is not None):
write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
if (data_type is not None):
write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
for (k, c) in enumerate(info['chs']):
c = deepcopy(c)
c['scanno'] = (k + 1)
if (reset_range is True):
c['range'] = 1.0
write_ch_info(fid, c)
if (info.get('subject_info') is not None):
start_block(fid, FIFF.FIFFB_SUBJECT)
si = info['subject_info']
if (si.get('id') is not None):
write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
if (si.get('his_id') is not None):
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
if (si.get('last_name') is not None):
write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
if (si.get('first_name') is not None):
write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
if (si.get('birthday') is not None):
write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
if (si.get('sex') is not None):
write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
if (si.get('hand') is not None):
write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
end_block(fid, FIFF.FIFFB_SUBJECT)
end_block(fid, FIFF.FIFFB_MEAS_INFO)
|
def write_info(fname, info, data_type=None, reset_range=True):
"Write measurement info in fif file.\n\n Parameters\n ----------\n fname : str\n The name of the file. Should end by -info.fif.\n info : instance of mne.fiff.meas_info.Info\n The measurement info structure\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n "
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MEAS)
write_meas_info(fid, info, data_type, reset_range)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
| -2,834,309,715,596,339,700
|
Write measurement info in fif file.
Parameters
----------
fname : str
The name of the file. Should end by -info.fif.
info : instance of mne.fiff.meas_info.Info
The measurement info structure
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
|
mne/fiff/meas_info.py
|
write_info
|
Anevar/mne-python
|
python
|
def write_info(fname, info, data_type=None, reset_range=True):
"Write measurement info in fif file.\n\n Parameters\n ----------\n fname : str\n The name of the file. Should end by -info.fif.\n info : instance of mne.fiff.meas_info.Info\n The measurement info structure\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n "
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MEAS)
write_meas_info(fid, info, data_type, reset_range)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
|
def __repr__(self):
'Summarize info instead of printing all'
strs = ['<Info | %s non-empty fields']
non_empty = 0
for (k, v) in self.items():
if (k in ['bads', 'ch_names']):
entr = (', '.join((b for (ii, b) in enumerate(v) if (ii < 10))) if v else '0 items')
if (len(entr) >= 56):
entr = _summarize_str(entr)
elif ((k == 'filename') and v):
(path, fname) = op.split(v)
entr = ((path[:10] + '.../') + fname)
elif ((k == 'projs') and v):
entr = ', '.join(((p['desc'] + (': o%s' % {0: 'ff', 1: 'n'}[p['active']])) for p in v))
if (len(entr) >= 56):
entr = _summarize_str(entr)
elif ((k == 'meas_date') and np.iterable(v)):
entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')
else:
this_len = (len(v) if hasattr(v, '__len__') else (('%s' % v) if (v is not None) else None))
entr = (('%d items' % this_len) if isinstance(this_len, int) else (('%s' % this_len) if this_len else ''))
if entr:
non_empty += 1
entr = (' | ' + entr)
strs.append(('%s : %s%s' % (k, str(type(v))[7:(- 2)], entr)))
strs_non_empty = sorted((s for s in strs if ('|' in s)))
strs_empty = sorted((s for s in strs if ('|' not in s)))
st = '\n '.join((strs_non_empty + strs_empty))
st += '\n>'
st %= non_empty
return st
| -5,143,204,878,215,623,000
|
Summarize info instead of printing all
|
mne/fiff/meas_info.py
|
__repr__
|
Anevar/mne-python
|
python
|
def __repr__(self):
strs = ['<Info | %s non-empty fields']
non_empty = 0
for (k, v) in self.items():
if (k in ['bads', 'ch_names']):
entr = (', '.join((b for (ii, b) in enumerate(v) if (ii < 10))) if v else '0 items')
if (len(entr) >= 56):
entr = _summarize_str(entr)
elif ((k == 'filename') and v):
(path, fname) = op.split(v)
entr = ((path[:10] + '.../') + fname)
elif ((k == 'projs') and v):
entr = ', '.join(((p['desc'] + (': o%s' % {0: 'ff', 1: 'n'}[p['active']])) for p in v))
if (len(entr) >= 56):
entr = _summarize_str(entr)
elif ((k == 'meas_date') and np.iterable(v)):
entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')
else:
this_len = (len(v) if hasattr(v, '__len__') else (('%s' % v) if (v is not None) else None))
entr = (('%d items' % this_len) if isinstance(this_len, int) else (('%s' % this_len) if this_len else ))
if entr:
non_empty += 1
entr = (' | ' + entr)
strs.append(('%s : %s%s' % (k, str(type(v))[7:(- 2)], entr)))
strs_non_empty = sorted((s for s in strs if ('|' in s)))
strs_empty = sorted((s for s in strs if ('|' not in s)))
st = '\n '.join((strs_non_empty + strs_empty))
st += '\n>'
st %= non_empty
return st
|
def generate_code(root_path, gen_dict=None):
'Generate pyleecan Classes code according to doc in root_path\n\n Parameters\n ----------\n root_path : str\n Path to the main folder of Pyleecan\n gen_dict : dict\n Generation dictionnary (contains all the csv data)\n Returns\n -------\n None\n '
CLASS_DIR = join(root_path, 'Classes')
FUNC_DIR = join(root_path, 'Functions')
DOC_DIR = join(root_path, 'Generator', 'ClassesRef')
print(('Reading classes csv in: ' + DOC_DIR))
print(('Saving generated files in: ' + CLASS_DIR))
path = __file__[__file__.index(package_name):]
path = path.replace('\\', '/')
print('Deleting old class files...')
for file_name in listdir(CLASS_DIR):
if (file_name[0] != '_'):
remove(join(CLASS_DIR, file_name))
import_file = open(join(CLASS_DIR, 'import_all.py'), 'w')
import_file.write('# -*- coding: utf-8 -*-\n\n')
import_file.write('"""File generated by generate_code() - \n')
import_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
load_file = open(join(FUNC_DIR, 'load_switch.py'), 'w')
load_file.write('# -*- coding: utf-8 -*-\n')
load_file.write('"""File generated by generate_code() - \n')
load_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
load_file.write('from ..Classes.import_all import *\n\n')
load_file.write('load_switch = {\n')
if (gen_dict is None):
gen_dict = read_all(DOC_DIR)
for (class_name, _) in iter(sorted(list(gen_dict.items()))):
import_file.write((((('from ..Classes.' + class_name) + ' import ') + class_name) + '\n'))
load_file.write(((((' "' + class_name) + '": ') + class_name) + ',\n'))
print((('Generation of ' + class_name) + ' class'))
generate_class(gen_dict, class_name, CLASS_DIR)
import_file.close()
load_file.write('}\n')
load_file.close()
print('Generation of load_switch.py')
print('Generation of import_all.py')
class_dict_file = join(CLASS_DIR, 'Class_Dict.json')
with open(class_dict_file, 'w') as json_file:
json.dump(gen_dict, json_file, sort_keys=True, indent=4, separators=(',', ': '))
| -3,105,398,278,533,187,000
|
Generate pyleecan Classes code according to doc in root_path
Parameters
----------
root_path : str
Path to the main folder of Pyleecan
gen_dict : dict
Generation dictionnary (contains all the csv data)
Returns
-------
None
|
pyleecan/Generator/run_generate_classes.py
|
generate_code
|
IrakozeFD/pyleecan
|
python
|
def generate_code(root_path, gen_dict=None):
'Generate pyleecan Classes code according to doc in root_path\n\n Parameters\n ----------\n root_path : str\n Path to the main folder of Pyleecan\n gen_dict : dict\n Generation dictionnary (contains all the csv data)\n Returns\n -------\n None\n '
CLASS_DIR = join(root_path, 'Classes')
FUNC_DIR = join(root_path, 'Functions')
DOC_DIR = join(root_path, 'Generator', 'ClassesRef')
print(('Reading classes csv in: ' + DOC_DIR))
print(('Saving generated files in: ' + CLASS_DIR))
path = __file__[__file__.index(package_name):]
path = path.replace('\\', '/')
print('Deleting old class files...')
for file_name in listdir(CLASS_DIR):
if (file_name[0] != '_'):
remove(join(CLASS_DIR, file_name))
import_file = open(join(CLASS_DIR, 'import_all.py'), 'w')
import_file.write('# -*- coding: utf-8 -*-\n\n')
import_file.write('"File generated by generate_code() - \n')
import_file.write('WARNING! All changes made in this file will be lost!\n"\n\n')
load_file = open(join(FUNC_DIR, 'load_switch.py'), 'w')
load_file.write('# -*- coding: utf-8 -*-\n')
load_file.write('"File generated by generate_code() - \n')
load_file.write('WARNING! All changes made in this file will be lost!\n"\n\n')
load_file.write('from ..Classes.import_all import *\n\n')
load_file.write('load_switch = {\n')
if (gen_dict is None):
gen_dict = read_all(DOC_DIR)
for (class_name, _) in iter(sorted(list(gen_dict.items()))):
import_file.write((((('from ..Classes.' + class_name) + ' import ') + class_name) + '\n'))
load_file.write(((((' "' + class_name) + '": ') + class_name) + ',\n'))
print((('Generation of ' + class_name) + ' class'))
generate_class(gen_dict, class_name, CLASS_DIR)
import_file.close()
load_file.write('}\n')
load_file.close()
print('Generation of load_switch.py')
print('Generation of import_all.py')
class_dict_file = join(CLASS_DIR, 'Class_Dict.json')
with open(class_dict_file, 'w') as json_file:
json.dump(gen_dict, json_file, sort_keys=True, indent=4, separators=(',', ': '))
|
@property
def action_space(self):
'See class definition.'
max_decel = self.env_params.additional_params['max_decel']
max_accel = self.env_params.additional_params['max_accel']
lb = ([1, (- 0.2)] * self.num_rl)
ub = ([2, 0.2] * self.num_rl)
return Box(np.array(lb), np.array(ub), dtype=np.float32)
| 54,758,527,748,066,650
|
See class definition.
|
traci_pedestrian_crossing/movexy_ped.py
|
action_space
|
KarlRong/Safe-RL-for-Driving
|
python
|
@property
def action_space(self):
max_decel = self.env_params.additional_params['max_decel']
max_accel = self.env_params.additional_params['max_accel']
lb = ([1, (- 0.2)] * self.num_rl)
ub = ([2, 0.2] * self.num_rl)
return Box(np.array(lb), np.array(ub), dtype=np.float32)
|
@property
def observation_space(self):
'See class definition.'
return Box(low=(- 1000), high=3000, shape=((((4 * self.num_rl) * self.num_lanes) + (2 * self.num_rl)),), dtype=np.float32)
| 5,053,630,444,488,890,000
|
See class definition.
|
traci_pedestrian_crossing/movexy_ped.py
|
observation_space
|
KarlRong/Safe-RL-for-Driving
|
python
|
@property
def observation_space(self):
return Box(low=(- 1000), high=3000, shape=((((4 * self.num_rl) * self.num_lanes) + (2 * self.num_rl)),), dtype=np.float32)
|
def compute_reward(self, rl_actions, **kwargs):
'See class definition.'
reward = 0
rl_velocity = np.array(self.k.vehicle.get_speed(self.rl_veh))
target_vel = self.env_params.additional_params['target_velocity']
max_cost = np.array(([target_vel] * self.num_rl))
max_cost = np.linalg.norm(max_cost)
cost = (rl_velocity - target_vel)
cost = np.linalg.norm(cost)
eps = np.finfo(np.float32).eps
reward += (max((max_cost - cost), 0) / (max_cost + eps))
gain = 0.5
thresh = 0.3
penalize = len(rl_velocity[(rl_velocity < thresh)])
reward -= (gain * penalize)
for veh_id in self.rl_veh:
if (self.k.vehicle.get_last_lc(veh_id) == self.time_counter):
reward -= 10
if self.stuck:
reward -= 100
return reward
| 589,851,366,946,258,200
|
See class definition.
|
traci_pedestrian_crossing/movexy_ped.py
|
compute_reward
|
KarlRong/Safe-RL-for-Driving
|
python
|
def compute_reward(self, rl_actions, **kwargs):
reward = 0
rl_velocity = np.array(self.k.vehicle.get_speed(self.rl_veh))
target_vel = self.env_params.additional_params['target_velocity']
max_cost = np.array(([target_vel] * self.num_rl))
max_cost = np.linalg.norm(max_cost)
cost = (rl_velocity - target_vel)
cost = np.linalg.norm(cost)
eps = np.finfo(np.float32).eps
reward += (max((max_cost - cost), 0) / (max_cost + eps))
gain = 0.5
thresh = 0.3
penalize = len(rl_velocity[(rl_velocity < thresh)])
reward -= (gain * penalize)
for veh_id in self.rl_veh:
if (self.k.vehicle.get_last_lc(veh_id) == self.time_counter):
reward -= 10
if self.stuck:
reward -= 100
return reward
|
def _apply_rl_actions(self, actions):
'See class definition.'
acceleration = actions[::2]
direction = actions[1::2]
for (i, veh_id) in enumerate(self.rl_veh):
if (self.time_counter <= (self.env_params.additional_params['lane_change_duration'] + self.k.vehicle.get_last_lc(veh_id))):
direction[i] = 0
(x, y) = self.k.vehicle.kernel_api.vehicle.getPosition(veh_id)
print(x, y)
print('edgeID', self.k.vehicle.get_edge(veh_id))
print('lane', self.k.vehicle.get_lane(veh_id))
self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=veh_id, edgeID='highway_1', lane=1, x=(x + acceleration[i]), y=(y + direction[i]), keepRoute=2)
for x in np.nditer(direction, op_flags=['readwrite']):
if (x > 0.7):
x[...] = 1
elif (x < (- 0.7)):
x[...] = (- 1)
else:
x[...] = 0
| 3,311,372,121,974,978,600
|
See class definition.
|
traci_pedestrian_crossing/movexy_ped.py
|
_apply_rl_actions
|
KarlRong/Safe-RL-for-Driving
|
python
|
def _apply_rl_actions(self, actions):
acceleration = actions[::2]
direction = actions[1::2]
for (i, veh_id) in enumerate(self.rl_veh):
if (self.time_counter <= (self.env_params.additional_params['lane_change_duration'] + self.k.vehicle.get_last_lc(veh_id))):
direction[i] = 0
(x, y) = self.k.vehicle.kernel_api.vehicle.getPosition(veh_id)
print(x, y)
print('edgeID', self.k.vehicle.get_edge(veh_id))
print('lane', self.k.vehicle.get_lane(veh_id))
self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=veh_id, edgeID='highway_1', lane=1, x=(x + acceleration[i]), y=(y + direction[i]), keepRoute=2)
for x in np.nditer(direction, op_flags=['readwrite']):
if (x > 0.7):
x[...] = 1
elif (x < (- 0.7)):
x[...] = (- 1)
else:
x[...] = 0
|
def get_state(self):
'See class definition.'
obs = [0 for _ in range((((4 * self.num_rl) * self.num_lanes) + (2 * self.num_rl)))]
self.visible = []
self.update_veh_id()
speeds = []
for (i, rl_id) in enumerate(self.rl_veh):
x = self.k.vehicle.get_x_by_id(rl_id)
if (x == (- 1001)):
continue
speed = self.k.vehicle.get_speed(rl_id)
obs[(((- 2) * i) - 1)] = speed
speeds.append(speed)
obs[(((- 2) * i) - 2)] = x
max_length = self.k.network.length()
max_speed = self.k.network.max_speed()
headway = ([1] * self.num_lanes)
tailway = ([1] * self.num_lanes)
vel_in_front = ([0] * self.num_lanes)
vel_behind = ([0] * self.num_lanes)
lane_leaders = self.k.vehicle.get_lane_leaders(rl_id)
lane_followers = self.k.vehicle.get_lane_followers(rl_id)
lane_headways = self.k.vehicle.get_lane_headways(rl_id)
lane_tailways = self.k.vehicle.get_lane_tailways(rl_id)
headway[0:len(lane_headways)] = lane_headways
tailway[0:len(lane_tailways)] = lane_tailways
for (j, lane_leader) in enumerate(lane_leaders):
if (lane_leader != ''):
lane_headways[j] /= max_length
vel_in_front[j] = (self.k.vehicle.get_speed(lane_leader) / max_speed)
self.visible.extend([lane_leader])
for (j, lane_follower) in enumerate(lane_followers):
if (lane_follower != ''):
lane_headways[j] /= max_length
vel_behind[j] = (self.k.vehicle.get_speed(lane_follower) / max_speed)
self.visible.extend([lane_follower])
obs[((4 * self.num_lanes) * i):((4 * self.num_lanes) * (i + 1))] = np.concatenate((headway, tailway, vel_in_front, vel_behind))
obs = np.array(obs)
np.clip(obs, (- 1000), 3000, out=obs)
return obs
| -5,605,300,636,699,024,000
|
See class definition.
|
traci_pedestrian_crossing/movexy_ped.py
|
get_state
|
KarlRong/Safe-RL-for-Driving
|
python
|
def get_state(self):
obs = [0 for _ in range((((4 * self.num_rl) * self.num_lanes) + (2 * self.num_rl)))]
self.visible = []
self.update_veh_id()
speeds = []
for (i, rl_id) in enumerate(self.rl_veh):
x = self.k.vehicle.get_x_by_id(rl_id)
if (x == (- 1001)):
continue
speed = self.k.vehicle.get_speed(rl_id)
obs[(((- 2) * i) - 1)] = speed
speeds.append(speed)
obs[(((- 2) * i) - 2)] = x
max_length = self.k.network.length()
max_speed = self.k.network.max_speed()
headway = ([1] * self.num_lanes)
tailway = ([1] * self.num_lanes)
vel_in_front = ([0] * self.num_lanes)
vel_behind = ([0] * self.num_lanes)
lane_leaders = self.k.vehicle.get_lane_leaders(rl_id)
lane_followers = self.k.vehicle.get_lane_followers(rl_id)
lane_headways = self.k.vehicle.get_lane_headways(rl_id)
lane_tailways = self.k.vehicle.get_lane_tailways(rl_id)
headway[0:len(lane_headways)] = lane_headways
tailway[0:len(lane_tailways)] = lane_tailways
for (j, lane_leader) in enumerate(lane_leaders):
if (lane_leader != ):
lane_headways[j] /= max_length
vel_in_front[j] = (self.k.vehicle.get_speed(lane_leader) / max_speed)
self.visible.extend([lane_leader])
for (j, lane_follower) in enumerate(lane_followers):
if (lane_follower != ):
lane_headways[j] /= max_length
vel_behind[j] = (self.k.vehicle.get_speed(lane_follower) / max_speed)
self.visible.extend([lane_follower])
obs[((4 * self.num_lanes) * i):((4 * self.num_lanes) * (i + 1))] = np.concatenate((headway, tailway, vel_in_front, vel_behind))
obs = np.array(obs)
np.clip(obs, (- 1000), 3000, out=obs)
return obs
|
def checkWaitingPersons(self):
'check whether a person has requested to cross the street'
for edge in self.WALKINGAREAS:
peds = self.k.kernel_api.edge.getLastStepPersonIDs(edge)
for ped in peds:
if ((self.k.kernel_api.person.getWaitingTime(ped) == 1) and (self.k.kernel_api.person.getNextEdge(ped) in self.CROSSINGS)):
numWaiting = self.k.kernel_api.trafficlight.getServedPersonCount(self.TLSID, self.PEDESTRIAN_GREEN_PHASE)
print(('%s: pedestrian %s pushes the button (waiting: %s)' % (self.k.kernel_api.simulation.getTime(), ped, numWaiting)))
return True
return False
| 3,743,804,071,605,469,000
|
check whether a person has requested to cross the street
|
traci_pedestrian_crossing/movexy_ped.py
|
checkWaitingPersons
|
KarlRong/Safe-RL-for-Driving
|
python
|
def checkWaitingPersons(self):
for edge in self.WALKINGAREAS:
peds = self.k.kernel_api.edge.getLastStepPersonIDs(edge)
for ped in peds:
if ((self.k.kernel_api.person.getWaitingTime(ped) == 1) and (self.k.kernel_api.person.getNextEdge(ped) in self.CROSSINGS)):
numWaiting = self.k.kernel_api.trafficlight.getServedPersonCount(self.TLSID, self.PEDESTRIAN_GREEN_PHASE)
print(('%s: pedestrian %s pushes the button (waiting: %s)' % (self.k.kernel_api.simulation.getTime(), ped, numWaiting)))
return True
return False
|
def step(self, rl_actions):
"Advance the environment by one step.\n\n Assigns actions to autonomous and human-driven agents (i.e. vehicles,\n traffic lights, etc...). Actions that are not assigned are left to the\n control of the simulator. The actions are then used to advance the\n simulator by the number of time steps requested per environment step.\n\n Results from the simulations are processed through various classes,\n such as the Vehicle and TrafficLight kernels, to produce standardized\n methods for identifying specific network state features. Finally,\n results from the simulator are used to generate appropriate\n observations.\n\n Parameters\n ----------\n rl_actions : array_like\n an list of actions provided by the rl algorithm\n\n Returns\n -------\n observation : array_like\n agent's observation of the current environment\n reward : float\n amount of reward associated with the previous state/action pair\n done : bool\n indicates whether the episode has ended\n info : dict\n contains other diagnostic information from the previous action\n "
for _ in range(self.env_params.sims_per_step):
self.time_counter += 1
self.step_counter += 1
if (len(self.k.vehicle.get_controlled_ids()) > 0):
accel = []
for veh_id in self.k.vehicle.get_controlled_ids():
action = self.k.vehicle.get_acc_controller(veh_id).get_action(self)
accel.append(action)
self.k.vehicle.apply_acceleration(self.k.vehicle.get_controlled_ids(), accel)
if (len(self.k.vehicle.get_controlled_lc_ids()) > 0):
direction = []
for veh_id in self.k.vehicle.get_controlled_lc_ids():
target_lane = self.k.vehicle.get_lane_changing_controller(veh_id).get_action(self)
direction.append(target_lane)
self.k.vehicle.apply_lane_change(self.k.vehicle.get_controlled_lc_ids(), direction=direction)
routing_ids = []
routing_actions = []
for veh_id in self.k.vehicle.get_ids():
if (self.k.vehicle.get_routing_controller(veh_id) is not None):
routing_ids.append(veh_id)
route_contr = self.k.vehicle.get_routing_controller(veh_id)
routing_actions.append(route_contr.choose_route(self))
self.k.vehicle.choose_routes(routing_ids, routing_actions)
self.apply_rl_actions(rl_actions)
self.additional_command()
self.k.simulation.simulation_step()
self.k.update(reset=False)
if self.sim_params.render:
self.k.vehicle.update_vehicle_colors()
crash = self.k.simulation.check_collision()
if crash:
break
self.render()
states = self.get_state()
self.state = np.asarray(states).T
next_observation = np.copy(states)
done = ((self.time_counter >= (self.env_params.warmup_steps + self.env_params.horizon)) or self.stuck)
if done:
print('done')
if self.stuck:
print('stuck')
else:
print('time up')
infos = {}
if self.env_params.clip_actions:
rl_clipped = self.clip_actions(rl_actions)
reward = self.compute_reward(rl_clipped, fail=crash)
else:
reward = self.compute_reward(rl_actions, fail=crash)
return (next_observation, reward, done, infos)
| 2,799,618,293,451,251,000
|
Advance the environment by one step.
Assigns actions to autonomous and human-driven agents (i.e. vehicles,
traffic lights, etc...). Actions that are not assigned are left to the
control of the simulator. The actions are then used to advance the
simulator by the number of time steps requested per environment step.
Results from the simulations are processed through various classes,
such as the Vehicle and TrafficLight kernels, to produce standardized
methods for identifying specific network state features. Finally,
results from the simulator are used to generate appropriate
observations.
Parameters
----------
rl_actions : array_like
an list of actions provided by the rl algorithm
Returns
-------
observation : array_like
agent's observation of the current environment
reward : float
amount of reward associated with the previous state/action pair
done : bool
indicates whether the episode has ended
info : dict
contains other diagnostic information from the previous action
|
traci_pedestrian_crossing/movexy_ped.py
|
step
|
KarlRong/Safe-RL-for-Driving
|
python
|
def step(self, rl_actions):
"Advance the environment by one step.\n\n Assigns actions to autonomous and human-driven agents (i.e. vehicles,\n traffic lights, etc...). Actions that are not assigned are left to the\n control of the simulator. The actions are then used to advance the\n simulator by the number of time steps requested per environment step.\n\n Results from the simulations are processed through various classes,\n such as the Vehicle and TrafficLight kernels, to produce standardized\n methods for identifying specific network state features. Finally,\n results from the simulator are used to generate appropriate\n observations.\n\n Parameters\n ----------\n rl_actions : array_like\n an list of actions provided by the rl algorithm\n\n Returns\n -------\n observation : array_like\n agent's observation of the current environment\n reward : float\n amount of reward associated with the previous state/action pair\n done : bool\n indicates whether the episode has ended\n info : dict\n contains other diagnostic information from the previous action\n "
for _ in range(self.env_params.sims_per_step):
self.time_counter += 1
self.step_counter += 1
if (len(self.k.vehicle.get_controlled_ids()) > 0):
accel = []
for veh_id in self.k.vehicle.get_controlled_ids():
action = self.k.vehicle.get_acc_controller(veh_id).get_action(self)
accel.append(action)
self.k.vehicle.apply_acceleration(self.k.vehicle.get_controlled_ids(), accel)
if (len(self.k.vehicle.get_controlled_lc_ids()) > 0):
direction = []
for veh_id in self.k.vehicle.get_controlled_lc_ids():
target_lane = self.k.vehicle.get_lane_changing_controller(veh_id).get_action(self)
direction.append(target_lane)
self.k.vehicle.apply_lane_change(self.k.vehicle.get_controlled_lc_ids(), direction=direction)
routing_ids = []
routing_actions = []
for veh_id in self.k.vehicle.get_ids():
if (self.k.vehicle.get_routing_controller(veh_id) is not None):
routing_ids.append(veh_id)
route_contr = self.k.vehicle.get_routing_controller(veh_id)
routing_actions.append(route_contr.choose_route(self))
self.k.vehicle.choose_routes(routing_ids, routing_actions)
self.apply_rl_actions(rl_actions)
self.additional_command()
self.k.simulation.simulation_step()
self.k.update(reset=False)
if self.sim_params.render:
self.k.vehicle.update_vehicle_colors()
crash = self.k.simulation.check_collision()
if crash:
break
self.render()
states = self.get_state()
self.state = np.asarray(states).T
next_observation = np.copy(states)
done = ((self.time_counter >= (self.env_params.warmup_steps + self.env_params.horizon)) or self.stuck)
if done:
print('done')
if self.stuck:
print('stuck')
else:
print('time up')
infos = {}
if self.env_params.clip_actions:
rl_clipped = self.clip_actions(rl_actions)
reward = self.compute_reward(rl_clipped, fail=crash)
else:
reward = self.compute_reward(rl_actions, fail=crash)
return (next_observation, reward, done, infos)
|
def reset(self):
'See parent class.\n\n This also includes updating the initial absolute position and previous\n position.\n '
self.rl_queue.clear()
self.rl_veh.clear()
obs = super().reset()
print('reset')
for veh_id in self.k.vehicle.get_ids():
self.absolute_position[veh_id] = self.k.vehicle.get_x_by_id(veh_id)
self.prev_pos[veh_id] = self.k.vehicle.get_x_by_id(veh_id)
self.leader = []
self.follower = []
return obs
| -2,498,678,424,320,711,000
|
See parent class.
This also includes updating the initial absolute position and previous
position.
|
traci_pedestrian_crossing/movexy_ped.py
|
reset
|
KarlRong/Safe-RL-for-Driving
|
python
|
def reset(self):
'See parent class.\n\n This also includes updating the initial absolute position and previous\n position.\n '
self.rl_queue.clear()
self.rl_veh.clear()
obs = super().reset()
print('reset')
for veh_id in self.k.vehicle.get_ids():
self.absolute_position[veh_id] = self.k.vehicle.get_x_by_id(veh_id)
self.prev_pos[veh_id] = self.k.vehicle.get_x_by_id(veh_id)
self.leader = []
self.follower = []
return obs
|
def loss_fn(outputs, labels):
'\n Compute the cross entropy loss given outputs and labels.\n\n Args:\n outputs: (Variable) dimension batch_size x 6 - output of the model\n labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5]\n\n Returns:\n loss (Variable): cross entropy loss for all images in the batch\n\n Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example\n demonstrates how you can easily define a custom loss function.\n '
return nn.CrossEntropyLoss()(outputs, labels)
| -8,691,466,486,941,953,000
|
Compute the cross entropy loss given outputs and labels.
Args:
outputs: (Variable) dimension batch_size x 6 - output of the model
labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5]
Returns:
loss (Variable): cross entropy loss for all images in the batch
Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example
demonstrates how you can easily define a custom loss function.
|
model/studentB.py
|
loss_fn
|
eungbean/knowledge-distillation-cifar10
|
python
|
def loss_fn(outputs, labels):
'\n Compute the cross entropy loss given outputs and labels.\n\n Args:\n outputs: (Variable) dimension batch_size x 6 - output of the model\n labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5]\n\n Returns:\n loss (Variable): cross entropy loss for all images in the batch\n\n Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example\n demonstrates how you can easily define a custom loss function.\n '
return nn.CrossEntropyLoss()(outputs, labels)
|
def loss_fn_kd(outputs, labels, teacher_outputs, params):
'\n Compute the knowledge-distillation (KD) loss given outputs, labels.\n "Hyperparameters": temperature and alpha\n\n NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher\n and student expects the input tensor to be log probabilities! See Issue #2\n '
alpha = params.alpha
T = params.temperature
KD_loss = ((nn.KLDivLoss()(F.log_softmax((outputs / T), dim=1), F.softmax((teacher_outputs / T), dim=1)) * ((alpha * T) * T)) + (F.cross_entropy(outputs, labels) * (1.0 - alpha)))
return KD_loss
| 3,821,292,463,632,088,000
|
Compute the knowledge-distillation (KD) loss given outputs, labels.
"Hyperparameters": temperature and alpha
NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
and student expects the input tensor to be log probabilities! See Issue #2
|
model/studentB.py
|
loss_fn_kd
|
eungbean/knowledge-distillation-cifar10
|
python
|
def loss_fn_kd(outputs, labels, teacher_outputs, params):
'\n Compute the knowledge-distillation (KD) loss given outputs, labels.\n "Hyperparameters": temperature and alpha\n\n NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher\n and student expects the input tensor to be log probabilities! See Issue #2\n '
alpha = params.alpha
T = params.temperature
KD_loss = ((nn.KLDivLoss()(F.log_softmax((outputs / T), dim=1), F.softmax((teacher_outputs / T), dim=1)) * ((alpha * T) * T)) + (F.cross_entropy(outputs, labels) * (1.0 - alpha)))
return KD_loss
|
def accuracy(outputs, labels):
'\n Compute the accuracy, given the outputs and labels for all images.\n\n Args:\n outputs: (np.ndarray) output of the model\n labels: (np.ndarray) [0, 1, ..., num_classes-1]\n\n Returns: (float) accuracy in [0,1]\n '
outputs = np.argmax(outputs, axis=1)
return (np.sum((outputs == labels)) / float(labels.size))
| -2,892,165,881,102,442,500
|
Compute the accuracy, given the outputs and labels for all images.
Args:
outputs: (np.ndarray) output of the model
labels: (np.ndarray) [0, 1, ..., num_classes-1]
Returns: (float) accuracy in [0,1]
|
model/studentB.py
|
accuracy
|
eungbean/knowledge-distillation-cifar10
|
python
|
def accuracy(outputs, labels):
'\n Compute the accuracy, given the outputs and labels for all images.\n\n Args:\n outputs: (np.ndarray) output of the model\n labels: (np.ndarray) [0, 1, ..., num_classes-1]\n\n Returns: (float) accuracy in [0,1]\n '
outputs = np.argmax(outputs, axis=1)
return (np.sum((outputs == labels)) / float(labels.size))
|
def __init__(self, params):
'\n We define an convolutional network that predicts the sign from an image. The components\n required are:\n\n Args:\n params: (Params) contains num_channels\n '
super(studentB, self).__init__()
self.num_channels = params.num_channels
self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(32)
self.conv2_1 = nn.Conv2d(32, 32, 1, stride=1, padding=0)
self.conv2_2 = nn.Conv2d(32, 32, 3, stride=1, padding=1)
self.conv2_3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.conv3_1 = nn.Conv2d(64, 64, 1, stride=1, padding=0)
self.conv3_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(64, 128, 1, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(128)
self.fc1 = nn.Linear(((4 * 4) * 128), 500)
self.fcbn1 = nn.BatchNorm1d(500)
self.fc2 = nn.Linear(500, 10)
self.dropout_rate = params.dropout_rate
| 7,160,409,673,777,569,000
|
We define an convolutional network that predicts the sign from an image. The components
required are:
Args:
params: (Params) contains num_channels
|
model/studentB.py
|
__init__
|
eungbean/knowledge-distillation-cifar10
|
python
|
def __init__(self, params):
'\n We define an convolutional network that predicts the sign from an image. The components\n required are:\n\n Args:\n params: (Params) contains num_channels\n '
super(studentB, self).__init__()
self.num_channels = params.num_channels
self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(32)
self.conv2_1 = nn.Conv2d(32, 32, 1, stride=1, padding=0)
self.conv2_2 = nn.Conv2d(32, 32, 3, stride=1, padding=1)
self.conv2_3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.conv3_1 = nn.Conv2d(64, 64, 1, stride=1, padding=0)
self.conv3_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(64, 128, 1, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(128)
self.fc1 = nn.Linear(((4 * 4) * 128), 500)
self.fcbn1 = nn.BatchNorm1d(500)
self.fc2 = nn.Linear(500, 10)
self.dropout_rate = params.dropout_rate
|
def forward(self, s):
'\n This function defines how we use the components of our network to operate on an input batch.\n\n Args:\n s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 .\n\n Returns:\n out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image.\n\n Note: the dimensions after each step are provided\n '
s = self.bn1(self.conv1(s))
s = F.relu(F.max_pool2d(s, 2))
s = self.conv2_1(s)
s = self.conv2_2(s)
s = self.conv2_3(s)
s = self.bn2(s)
s = F.relu(F.max_pool2d(s, 2))
s = self.conv3_1(s)
s = self.conv3_2(s)
s = self.conv3_3(s)
s = self.bn3(s)
s = F.relu(F.max_pool2d(s, 2))
s = s.view((- 1), ((4 * 4) * 128))
s = F.dropout(F.relu(self.fcbn1(self.fc1(s))), p=self.dropout_rate, training=self.training)
s = self.fc2(s)
return s
| -3,429,025,557,422,772,700
|
This function defines how we use the components of our network to operate on an input batch.
Args:
s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 .
Returns:
out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image.
Note: the dimensions after each step are provided
|
model/studentB.py
|
forward
|
eungbean/knowledge-distillation-cifar10
|
python
|
def forward(self, s):
'\n This function defines how we use the components of our network to operate on an input batch.\n\n Args:\n s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 .\n\n Returns:\n out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image.\n\n Note: the dimensions after each step are provided\n '
s = self.bn1(self.conv1(s))
s = F.relu(F.max_pool2d(s, 2))
s = self.conv2_1(s)
s = self.conv2_2(s)
s = self.conv2_3(s)
s = self.bn2(s)
s = F.relu(F.max_pool2d(s, 2))
s = self.conv3_1(s)
s = self.conv3_2(s)
s = self.conv3_3(s)
s = self.bn3(s)
s = F.relu(F.max_pool2d(s, 2))
s = s.view((- 1), ((4 * 4) * 128))
s = F.dropout(F.relu(self.fcbn1(self.fc1(s))), p=self.dropout_rate, training=self.training)
s = self.fc2(s)
return s
|
def create_or_get_cache_dir(self, module=''):
'create (if not exists) or return cache dir path for module'
cache_dir = '{}/{}'.format(self.__cache_dir, module)
if (not os.path.exists(cache_dir)):
os.makedirs(cache_dir)
return cache_dir
| -3,946,185,517,127,907,300
|
create (if not exists) or return cache dir path for module
|
ods/ods.py
|
create_or_get_cache_dir
|
open-datastudio/ods
|
python
|
def create_or_get_cache_dir(self, module=):
cache_dir = '{}/{}'.format(self.__cache_dir, module)
if (not os.path.exists(cache_dir)):
os.makedirs(cache_dir)
return cache_dir
|
def main():
'Run the simulation that infers an embedding for three groups.'
n_stimuli = 30
n_dim = 4
n_group = 3
n_restart = 1
epochs = 1000
n_trial = 2000
batch_size = 128
model_true = ground_truth(n_stimuli, n_dim, n_group)
generator = psiz.trials.RandomRank(n_stimuli, n_reference=8, n_select=2)
docket = generator.generate(n_trial)
agent_novice = psiz.agents.RankAgent(model_true, groups=[0])
agent_interm = psiz.agents.RankAgent(model_true, groups=[1])
agent_expert = psiz.agents.RankAgent(model_true, groups=[2])
obs_novice = agent_novice.simulate(docket)
obs_interm = agent_interm.simulate(docket)
obs_expert = agent_expert.simulate(docket)
obs = psiz.trials.stack((obs_novice, obs_interm, obs_expert))
(obs_train, obs_val, obs_test) = psiz.utils.standard_split(obs)
ds_obs_train = obs_train.as_dataset().shuffle(buffer_size=obs_train.n_trial, reshuffle_each_iteration=True).batch(batch_size, drop_remainder=False)
ds_obs_val = obs_val.as_dataset().batch(batch_size, drop_remainder=False)
ds_obs_test = obs_test.as_dataset().batch(batch_size, drop_remainder=False)
early_stop = psiz.keras.callbacks.EarlyStoppingRe('val_cce', patience=15, mode='min', restore_best_weights=True)
callbacks = [early_stop]
compile_kwargs = {'loss': tf.keras.losses.CategoricalCrossentropy(), 'optimizer': tf.keras.optimizers.Adam(lr=0.001), 'weighted_metrics': [tf.keras.metrics.CategoricalCrossentropy(name='cce')]}
model_inferred = build_model(n_stimuli, n_dim, n_group)
restarter = psiz.keras.Restarter(model_inferred, compile_kwargs=compile_kwargs, monitor='val_loss', n_restart=n_restart)
restart_record = restarter.fit(x=ds_obs_train, validation_data=ds_obs_val, epochs=epochs, callbacks=callbacks, verbose=0)
model_inferred = restarter.model
simmat_truth = (model_similarity(model_true, groups=[0]), model_similarity(model_true, groups=[1]), model_similarity(model_true, groups=[2]))
simmat_inferred = (model_similarity(model_inferred, groups=[0]), model_similarity(model_inferred, groups=[1]), model_similarity(model_inferred, groups=[2]))
r_squared = np.empty((n_group, n_group))
for i_truth in range(n_group):
for j_infer in range(n_group):
(rho, _) = pearsonr(simmat_truth[i_truth], simmat_inferred[j_infer])
r_squared[(i_truth, j_infer)] = (rho ** 2)
attention_weight = tf.stack([model_inferred.kernel.subnets[0].distance.w, model_inferred.kernel.subnets[1].distance.w, model_inferred.kernel.subnets[2].distance.w], axis=0).numpy()
idx_sorted = np.argsort((- attention_weight[0, :]))
attention_weight = attention_weight[:, idx_sorted]
group_labels = ['Novice', 'Intermediate', 'Expert']
print('\n Attention weights:')
for i_group in range(attention_weight.shape[0]):
print(' {0:>12} | {1}'.format(group_labels[i_group], np.array2string(attention_weight[i_group, :], formatter={'float_kind': (lambda x: ('%.2f' % x))})))
print('\n Model Comparison (R^2)')
print(' ================================')
print(' True | Inferred')
print(' | Novice Interm Expert')
print(' --------+-----------------------')
print(' Novice | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(r_squared[(0, 0)], r_squared[(0, 1)], r_squared[(0, 2)]))
print(' Interm | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(r_squared[(1, 0)], r_squared[(1, 1)], r_squared[(1, 2)]))
print(' Expert | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(r_squared[(2, 0)], r_squared[(2, 1)], r_squared[(2, 2)]))
print('\n')
| -4,177,223,168,496,596,500
|
Run the simulation that infers an embedding for three groups.
|
examples/rank/mle_3g.py
|
main
|
rgerkin/psiz
|
python
|
def main():
n_stimuli = 30
n_dim = 4
n_group = 3
n_restart = 1
epochs = 1000
n_trial = 2000
batch_size = 128
model_true = ground_truth(n_stimuli, n_dim, n_group)
generator = psiz.trials.RandomRank(n_stimuli, n_reference=8, n_select=2)
docket = generator.generate(n_trial)
agent_novice = psiz.agents.RankAgent(model_true, groups=[0])
agent_interm = psiz.agents.RankAgent(model_true, groups=[1])
agent_expert = psiz.agents.RankAgent(model_true, groups=[2])
obs_novice = agent_novice.simulate(docket)
obs_interm = agent_interm.simulate(docket)
obs_expert = agent_expert.simulate(docket)
obs = psiz.trials.stack((obs_novice, obs_interm, obs_expert))
(obs_train, obs_val, obs_test) = psiz.utils.standard_split(obs)
ds_obs_train = obs_train.as_dataset().shuffle(buffer_size=obs_train.n_trial, reshuffle_each_iteration=True).batch(batch_size, drop_remainder=False)
ds_obs_val = obs_val.as_dataset().batch(batch_size, drop_remainder=False)
ds_obs_test = obs_test.as_dataset().batch(batch_size, drop_remainder=False)
early_stop = psiz.keras.callbacks.EarlyStoppingRe('val_cce', patience=15, mode='min', restore_best_weights=True)
callbacks = [early_stop]
compile_kwargs = {'loss': tf.keras.losses.CategoricalCrossentropy(), 'optimizer': tf.keras.optimizers.Adam(lr=0.001), 'weighted_metrics': [tf.keras.metrics.CategoricalCrossentropy(name='cce')]}
model_inferred = build_model(n_stimuli, n_dim, n_group)
restarter = psiz.keras.Restarter(model_inferred, compile_kwargs=compile_kwargs, monitor='val_loss', n_restart=n_restart)
restart_record = restarter.fit(x=ds_obs_train, validation_data=ds_obs_val, epochs=epochs, callbacks=callbacks, verbose=0)
model_inferred = restarter.model
simmat_truth = (model_similarity(model_true, groups=[0]), model_similarity(model_true, groups=[1]), model_similarity(model_true, groups=[2]))
simmat_inferred = (model_similarity(model_inferred, groups=[0]), model_similarity(model_inferred, groups=[1]), model_similarity(model_inferred, groups=[2]))
r_squared = np.empty((n_group, n_group))
for i_truth in range(n_group):
for j_infer in range(n_group):
(rho, _) = pearsonr(simmat_truth[i_truth], simmat_inferred[j_infer])
r_squared[(i_truth, j_infer)] = (rho ** 2)
attention_weight = tf.stack([model_inferred.kernel.subnets[0].distance.w, model_inferred.kernel.subnets[1].distance.w, model_inferred.kernel.subnets[2].distance.w], axis=0).numpy()
idx_sorted = np.argsort((- attention_weight[0, :]))
attention_weight = attention_weight[:, idx_sorted]
group_labels = ['Novice', 'Intermediate', 'Expert']
print('\n Attention weights:')
for i_group in range(attention_weight.shape[0]):
print(' {0:>12} | {1}'.format(group_labels[i_group], np.array2string(attention_weight[i_group, :], formatter={'float_kind': (lambda x: ('%.2f' % x))})))
print('\n Model Comparison (R^2)')
print(' ================================')
print(' True | Inferred')
print(' | Novice Interm Expert')
print(' --------+-----------------------')
print(' Novice | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(r_squared[(0, 0)], r_squared[(0, 1)], r_squared[(0, 2)]))
print(' Interm | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(r_squared[(1, 0)], r_squared[(1, 1)], r_squared[(1, 2)]))
print(' Expert | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(r_squared[(2, 0)], r_squared[(2, 1)], r_squared[(2, 2)]))
print('\n')
|
def ground_truth(n_stimuli, n_dim, n_group):
'Return a ground truth embedding.'
stimuli = tf.keras.layers.Embedding((n_stimuli + 1), n_dim, mask_zero=True, embeddings_initializer=tf.keras.initializers.RandomNormal(stddev=0.17))
shared_similarity = psiz.keras.layers.ExponentialSimilarity(trainable=False, beta_initializer=tf.keras.initializers.Constant(10.0), tau_initializer=tf.keras.initializers.Constant(1.0), gamma_initializer=tf.keras.initializers.Constant(0.0))
kernel_0 = psiz.keras.layers.DistanceBased(distance=psiz.keras.layers.Minkowski(rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.0), w_initializer=tf.keras.initializers.Constant([1.8, 1.8, 0.2, 0.2]), w_constraint=psiz.keras.constraints.NonNegNorm(scale=n_dim, p=1.0)), similarity=shared_similarity)
kernel_1 = psiz.keras.layers.DistanceBased(distance=psiz.keras.layers.Minkowski(rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.0), w_initializer=tf.keras.initializers.Constant([1.0, 1.0, 1.0, 1.0]), w_constraint=psiz.keras.constraints.NonNegNorm(scale=n_dim, p=1.0)), similarity=shared_similarity)
kernel_2 = psiz.keras.layers.DistanceBased(distance=psiz.keras.layers.Minkowski(rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.0), w_initializer=tf.keras.initializers.Constant([0.2, 0.2, 1.8, 1.8]), w_constraint=psiz.keras.constraints.NonNegNorm(scale=n_dim, p=1.0)), similarity=shared_similarity)
kernel_group = psiz.keras.layers.GateMulti(subnets=[kernel_0, kernel_1, kernel_2], group_col=0)
model = psiz.keras.models.Rank(stimuli=stimuli, kernel=kernel_group, use_group_kernel=True)
return model
| 3,894,005,208,590,680,600
|
Return a ground truth embedding.
|
examples/rank/mle_3g.py
|
ground_truth
|
rgerkin/psiz
|
python
|
def ground_truth(n_stimuli, n_dim, n_group):
stimuli = tf.keras.layers.Embedding((n_stimuli + 1), n_dim, mask_zero=True, embeddings_initializer=tf.keras.initializers.RandomNormal(stddev=0.17))
shared_similarity = psiz.keras.layers.ExponentialSimilarity(trainable=False, beta_initializer=tf.keras.initializers.Constant(10.0), tau_initializer=tf.keras.initializers.Constant(1.0), gamma_initializer=tf.keras.initializers.Constant(0.0))
kernel_0 = psiz.keras.layers.DistanceBased(distance=psiz.keras.layers.Minkowski(rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.0), w_initializer=tf.keras.initializers.Constant([1.8, 1.8, 0.2, 0.2]), w_constraint=psiz.keras.constraints.NonNegNorm(scale=n_dim, p=1.0)), similarity=shared_similarity)
kernel_1 = psiz.keras.layers.DistanceBased(distance=psiz.keras.layers.Minkowski(rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.0), w_initializer=tf.keras.initializers.Constant([1.0, 1.0, 1.0, 1.0]), w_constraint=psiz.keras.constraints.NonNegNorm(scale=n_dim, p=1.0)), similarity=shared_similarity)
kernel_2 = psiz.keras.layers.DistanceBased(distance=psiz.keras.layers.Minkowski(rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.0), w_initializer=tf.keras.initializers.Constant([0.2, 0.2, 1.8, 1.8]), w_constraint=psiz.keras.constraints.NonNegNorm(scale=n_dim, p=1.0)), similarity=shared_similarity)
kernel_group = psiz.keras.layers.GateMulti(subnets=[kernel_0, kernel_1, kernel_2], group_col=0)
model = psiz.keras.models.Rank(stimuli=stimuli, kernel=kernel_group, use_group_kernel=True)
return model
|
def build_model(n_stimuli, n_dim, n_group):
'Build model.\n\n Arguments:\n n_stimuli: Integer indicating the number of stimuli in the\n embedding.\n n_dim: Integer indicating the dimensionality of the embedding.\n\n Returns:\n model: A TensorFlow Keras model.\n\n '
stimuli = tf.keras.layers.Embedding((n_stimuli + 1), n_dim, mask_zero=True)
shared_similarity = psiz.keras.layers.ExponentialSimilarity(trainable=False, beta_initializer=tf.keras.initializers.Constant(10.0), tau_initializer=tf.keras.initializers.Constant(1.0), gamma_initializer=tf.keras.initializers.Constant(0.0))
kernel_0 = build_kernel(shared_similarity, n_dim)
kernel_1 = build_kernel(shared_similarity, n_dim)
kernel_2 = build_kernel(shared_similarity, n_dim)
kernel_group = psiz.keras.layers.GateMulti(subnets=[kernel_0, kernel_1, kernel_2], group_col=0)
model = psiz.keras.models.Rank(stimuli=stimuli, kernel=kernel_group, use_group_kernel=True)
return model
| 3,748,000,712,402,987,500
|
Build model.
Arguments:
n_stimuli: Integer indicating the number of stimuli in the
embedding.
n_dim: Integer indicating the dimensionality of the embedding.
Returns:
model: A TensorFlow Keras model.
|
examples/rank/mle_3g.py
|
build_model
|
rgerkin/psiz
|
python
|
def build_model(n_stimuli, n_dim, n_group):
'Build model.\n\n Arguments:\n n_stimuli: Integer indicating the number of stimuli in the\n embedding.\n n_dim: Integer indicating the dimensionality of the embedding.\n\n Returns:\n model: A TensorFlow Keras model.\n\n '
stimuli = tf.keras.layers.Embedding((n_stimuli + 1), n_dim, mask_zero=True)
shared_similarity = psiz.keras.layers.ExponentialSimilarity(trainable=False, beta_initializer=tf.keras.initializers.Constant(10.0), tau_initializer=tf.keras.initializers.Constant(1.0), gamma_initializer=tf.keras.initializers.Constant(0.0))
kernel_0 = build_kernel(shared_similarity, n_dim)
kernel_1 = build_kernel(shared_similarity, n_dim)
kernel_2 = build_kernel(shared_similarity, n_dim)
kernel_group = psiz.keras.layers.GateMulti(subnets=[kernel_0, kernel_1, kernel_2], group_col=0)
model = psiz.keras.models.Rank(stimuli=stimuli, kernel=kernel_group, use_group_kernel=True)
return model
|
def build_kernel(similarity, n_dim):
'Build kernel for single group.'
mink = psiz.keras.layers.Minkowski(rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.0), w_constraint=psiz.keras.constraints.NonNegNorm(scale=n_dim, p=1.0))
kernel = psiz.keras.layers.DistanceBased(distance=mink, similarity=similarity)
return kernel
| -5,725,182,606,263,217,000
|
Build kernel for single group.
|
examples/rank/mle_3g.py
|
build_kernel
|
rgerkin/psiz
|
python
|
def build_kernel(similarity, n_dim):
mink = psiz.keras.layers.Minkowski(rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.0), w_constraint=psiz.keras.constraints.NonNegNorm(scale=n_dim, p=1.0))
kernel = psiz.keras.layers.DistanceBased(distance=mink, similarity=similarity)
return kernel
|
def __repr__(self):
'Return a string representation of the device.'
return '<WeMo LightSwitch "{name}">'.format(name=self.name)
| -6,814,544,005,257,611,000
|
Return a string representation of the device.
|
pywemo/ouimeaux_device/lightswitch.py
|
__repr__
|
GarlicToum/pywemo
|
python
|
def __repr__(self):
return '<WeMo LightSwitch "{name}">'.format(name=self.name)
|
@property
def device_type(self):
'Return what kind of WeMo this device is.'
return 'LightSwitch'
| 1,603,105,175,854,432,300
|
Return what kind of WeMo this device is.
|
pywemo/ouimeaux_device/lightswitch.py
|
device_type
|
GarlicToum/pywemo
|
python
|
@property
def device_type(self):
return 'LightSwitch'
|
def send_single_ans(self, ID, name: str):
'\n Send a single message to specific id with a specific name.\n\n :params ID: User quiz id.\n :type ID: int\n :params name: Name you want on the message.\n :type name: str\n '
self.data = {'userFullName': name, 'userQuizId': 1}
self.data.update(userQuizId=ID)
self.payloadf.update(userQuizId=ID)
try:
req = requests.request('GET', self.url, params=self.payloadf)
questions = json.loads(req.text).get('data').get('questions')
for (j, q) in enumerate(questions):
qval = q.get('choosenOption')
self.data.update({(('questions[' + str(j)) + '][choosenOption]'): qval})
reqi = requests.post(self.url, params=self.payload, data=self.data)
print(('sending post to userQuizId: ' + str(ID)))
except:
print('User not found')
| -2,713,328,840,263,826,000
|
Send a single message to specific id with a specific name.
:params ID: User quiz id.
:type ID: int
:params name: Name you want on the message.
:type name: str
|
buddymojoAPI/BuddyMojoAPI.py
|
send_single_ans
|
jasonjustin/BuddymojoAPI
|
python
|
def send_single_ans(self, ID, name: str):
'\n Send a single message to specific id with a specific name.\n\n :params ID: User quiz id.\n :type ID: int\n :params name: Name you want on the message.\n :type name: str\n '
self.data = {'userFullName': name, 'userQuizId': 1}
self.data.update(userQuizId=ID)
self.payloadf.update(userQuizId=ID)
try:
req = requests.request('GET', self.url, params=self.payloadf)
questions = json.loads(req.text).get('data').get('questions')
for (j, q) in enumerate(questions):
qval = q.get('choosenOption')
self.data.update({(('questions[' + str(j)) + '][choosenOption]'): qval})
reqi = requests.post(self.url, params=self.payload, data=self.data)
print(('sending post to userQuizId: ' + str(ID)))
except:
print('User not found')
|
def send_range_ans(self, start, end, name: str):
'\n Send messages to a range of users id.\n\n :params start: The start user id.\n :type start: int\n :params end: The end user id.\n :type end: int\n :params name: The name you want.\n :type name: str\n '
for i in range(start, end):
data = {'userFullName': name, 'userQuizId': 1}
data.update(userQuizId=i)
self.payloadf.update(userQuizId=i)
try:
req = requests.request('GET', self.url, params=self.payloadf)
questions = json.loads(req.text).get('data').get('questions')
for (j, q) in enumerate(questions):
qval = q.get('choosenOption')
data.update({(('questions[' + str(j)) + '][choosenOption]'): qval})
reqi = requests.post(self.url, params=self.payload, data=data)
print(('sending post to userQuizId: ' + str(i)))
except:
continue
| -2,403,878,059,931,526,700
|
Send messages to a range of users id.
:params start: The start user id.
:type start: int
:params end: The end user id.
:type end: int
:params name: The name you want.
:type name: str
|
buddymojoAPI/BuddyMojoAPI.py
|
send_range_ans
|
jasonjustin/BuddymojoAPI
|
python
|
def send_range_ans(self, start, end, name: str):
'\n Send messages to a range of users id.\n\n :params start: The start user id.\n :type start: int\n :params end: The end user id.\n :type end: int\n :params name: The name you want.\n :type name: str\n '
for i in range(start, end):
data = {'userFullName': name, 'userQuizId': 1}
data.update(userQuizId=i)
self.payloadf.update(userQuizId=i)
try:
req = requests.request('GET', self.url, params=self.payloadf)
questions = json.loads(req.text).get('data').get('questions')
for (j, q) in enumerate(questions):
qval = q.get('choosenOption')
data.update({(('questions[' + str(j)) + '][choosenOption]'): qval})
reqi = requests.post(self.url, params=self.payload, data=data)
print(('sending post to userQuizId: ' + str(i)))
except:
continue
|
def get_userQuizId(self, encUserQuizId):
'\n Returns a user id string of the encUserQuizId.\n '
try:
req = requests.request('GET', str((match + encUserQuizId)))
data = json.loads(req.text)
print(data)
except:
return 'User not found'
| -5,446,436,008,461,802,000
|
Returns a user id string of the encUserQuizId.
|
buddymojoAPI/BuddyMojoAPI.py
|
get_userQuizId
|
jasonjustin/BuddymojoAPI
|
python
|
def get_userQuizId(self, encUserQuizId):
'\n \n '
try:
req = requests.request('GET', str((match + encUserQuizId)))
data = json.loads(req.text)
print(data)
except:
return 'User not found'
|
def get_link(self, ID):
'\n Returns a url string of the id.\n\n :params ID: The id to get the url from.\n :type ID: int\n :returns: A url string.\n :rtype: String\n '
self.payloadf.update(userQuizId=ID)
try:
req = requests.request('GET', self.url, params=self.payloadf)
data = json.loads(req.text).get('data').get('encUserQuizId')
return (self.match + data)
except:
return 'User not found'
| 8,604,263,190,504,289,000
|
Returns a url string of the id.
:params ID: The id to get the url from.
:type ID: int
:returns: A url string.
:rtype: String
|
buddymojoAPI/BuddyMojoAPI.py
|
get_link
|
jasonjustin/BuddymojoAPI
|
python
|
def get_link(self, ID):
'\n Returns a url string of the id.\n\n :params ID: The id to get the url from.\n :type ID: int\n :returns: A url string.\n :rtype: String\n '
self.payloadf.update(userQuizId=ID)
try:
req = requests.request('GET', self.url, params=self.payloadf)
data = json.loads(req.text).get('data').get('encUserQuizId')
return (self.match + data)
except:
return 'User not found'
|
def _detect_thread_group(self, executor):
'\n Detect preferred thread group\n :param executor:\n :return:\n '
tg = self.TG
if (not self.force_ctg):
return tg
msg = 'Thread group detection: %s, regular ThreadGroup will be used'
if (not self.load.duration):
self.log.debug(msg, 'duration not found')
elif self.load.iterations:
self.log.debug(msg, 'iterations are found')
elif (not executor.tool):
msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup'
raise TaurusInternalException((msg % executor.tool_name))
elif (not executor.tool.ctg_plugin_installed()):
self.log.warning((msg % 'plugin for ConcurrentThreadGroup not found'))
else:
tg = self.CTG
return tg
| -4,644,660,773,016,732,000
|
Detect preferred thread group
:param executor:
:return:
|
bzt/jmx/tools.py
|
_detect_thread_group
|
greyfenrir/taurus
|
python
|
def _detect_thread_group(self, executor):
'\n Detect preferred thread group\n :param executor:\n :return:\n '
tg = self.TG
if (not self.force_ctg):
return tg
msg = 'Thread group detection: %s, regular ThreadGroup will be used'
if (not self.load.duration):
self.log.debug(msg, 'duration not found')
elif self.load.iterations:
self.log.debug(msg, 'iterations are found')
elif (not executor.tool):
msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup'
raise TaurusInternalException((msg % executor.tool_name))
elif (not executor.tool.ctg_plugin_installed()):
self.log.warning((msg % 'plugin for ConcurrentThreadGroup not found'))
else:
tg = self.CTG
return tg
|
def _divide_concurrency(self, concurrency_list):
'\n calculate target concurrency for every thread group\n '
total_old_concurrency = sum(concurrency_list)
for (idx, concurrency) in enumerate(concurrency_list):
if (total_old_concurrency and (concurrency_list[idx] != 0)):
part_of_load = (((1.0 * self.load.concurrency) * concurrency) / total_old_concurrency)
concurrency_list[idx] = int(round(part_of_load))
if (concurrency_list[idx] == 0):
concurrency_list[idx] = 1
else:
concurrency_list[idx] = 0
total_new_concurrency = sum(concurrency_list)
leftover = (self.load.concurrency - total_new_concurrency)
if (leftover < 0):
msg = 'Had to add %s more threads to maintain thread group proportion'
self.log.warning(msg, (- leftover))
elif (leftover > 0):
msg = '%s threads left undistributed due to thread group proportion'
self.log.warning(msg, leftover)
| 209,768,109,835,262,200
|
calculate target concurrency for every thread group
|
bzt/jmx/tools.py
|
_divide_concurrency
|
greyfenrir/taurus
|
python
|
def _divide_concurrency(self, concurrency_list):
'\n \n '
total_old_concurrency = sum(concurrency_list)
for (idx, concurrency) in enumerate(concurrency_list):
if (total_old_concurrency and (concurrency_list[idx] != 0)):
part_of_load = (((1.0 * self.load.concurrency) * concurrency) / total_old_concurrency)
concurrency_list[idx] = int(round(part_of_load))
if (concurrency_list[idx] == 0):
concurrency_list[idx] = 1
else:
concurrency_list[idx] = 0
total_new_concurrency = sum(concurrency_list)
leftover = (self.load.concurrency - total_new_concurrency)
if (leftover < 0):
msg = 'Had to add %s more threads to maintain thread group proportion'
self.log.warning(msg, (- leftover))
elif (leftover > 0):
msg = '%s threads left undistributed due to thread group proportion'
self.log.warning(msg, leftover)
|
def _add_shaper(self, jmx):
'\n Add shaper\n :param jmx: JMX\n :return:\n '
if (not self.load.duration):
self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option")
return
etree_shaper = jmx.get_rps_shaper()
if self.load.ramp_up:
if (isinstance(self.load.throughput, numeric_types) and self.load.duration):
start_rps = (self.load.throughput / float(self.load.duration))
start_rps = max(start_rps, 0.001)
start_rps = min(start_rps, 1.0)
else:
start_rps = 1
if (not self.load.steps):
jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up)
else:
step_h = (self.load.throughput / self.load.steps)
step_w = (float(self.load.ramp_up) / self.load.steps)
accum_time = 0
for step in range(1, (self.load.steps + 1)):
jmx.add_rps_shaper_schedule(etree_shaper, (step_h * step), (step_h * step), ((step_w * step) - accum_time))
accum_time += cond_int(((step_w * step) - accum_time))
if self.load.hold:
jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element('hashTree'))
| 5,178,974,408,345,509,000
|
Add shaper
:param jmx: JMX
:return:
|
bzt/jmx/tools.py
|
_add_shaper
|
greyfenrir/taurus
|
python
|
def _add_shaper(self, jmx):
'\n Add shaper\n :param jmx: JMX\n :return:\n '
if (not self.load.duration):
self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option")
return
etree_shaper = jmx.get_rps_shaper()
if self.load.ramp_up:
if (isinstance(self.load.throughput, numeric_types) and self.load.duration):
start_rps = (self.load.throughput / float(self.load.duration))
start_rps = max(start_rps, 0.001)
start_rps = min(start_rps, 1.0)
else:
start_rps = 1
if (not self.load.steps):
jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up)
else:
step_h = (self.load.throughput / self.load.steps)
step_w = (float(self.load.ramp_up) / self.load.steps)
accum_time = 0
for step in range(1, (self.load.steps + 1)):
jmx.add_rps_shaper_schedule(etree_shaper, (step_h * step), (step_h * step), ((step_w * step) - accum_time))
accum_time += cond_int(((step_w * step) - accum_time))
if self.load.hold:
jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element('hashTree'))
|
def __init__(self, executor, original=None):
'\n :type executor: ScenarioExecutor\n :type original: JMX\n '
super(JMeterScenarioBuilder, self).__init__(original)
self.executor = executor
self.scenario = executor.get_scenario()
self.engine = executor.engine
self.system_props = BetterDict()
self.request_compiler = None
self.default_protocol = self.executor.settings.get('default-protocol', 'http')
self.protocol_handlers = {}
for (protocol, cls_name) in iteritems(self.executor.settings.get('protocol-handlers')):
cls_obj = load_class(cls_name)
instance = cls_obj(self.system_props, self.engine)
self.protocol_handlers[protocol] = instance
self.FIELD_KEYSTORE_CONFIG = 'keystore-config'
| 11,199,671,135,209,920
|
:type executor: ScenarioExecutor
:type original: JMX
|
bzt/jmx/tools.py
|
__init__
|
greyfenrir/taurus
|
python
|
def __init__(self, executor, original=None):
'\n :type executor: ScenarioExecutor\n :type original: JMX\n '
super(JMeterScenarioBuilder, self).__init__(original)
self.executor = executor
self.scenario = executor.get_scenario()
self.engine = executor.engine
self.system_props = BetterDict()
self.request_compiler = None
self.default_protocol = self.executor.settings.get('default-protocol', 'http')
self.protocol_handlers = {}
for (protocol, cls_name) in iteritems(self.executor.settings.get('protocol-handlers')):
cls_obj = load_class(cls_name)
instance = cls_obj(self.system_props, self.engine)
self.protocol_handlers[protocol] = instance
self.FIELD_KEYSTORE_CONFIG = 'keystore-config'
|
@staticmethod
def __add_jsr_elements(children, req, get_from_config=True):
'\n :type children: etree.Element\n :type req: Request\n '
jsrs = []
if get_from_config:
jsrs = req.config.get('jsr223', [])
else:
jsrs = req.get('jsr223', [])
if (not isinstance(jsrs, list)):
jsrs = [jsrs]
for (idx, _) in enumerate(jsrs):
jsr = ensure_is_dict(jsrs, idx, sub_key='script-text')
lang = jsr.get('language', 'groovy')
script_file = jsr.get('script-file', None)
script_text = jsr.get('script-text', None)
if ((not script_file) and (not script_text)):
raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'")
parameters = jsr.get('parameters', '')
execute = jsr.get('execute', 'after')
cache_key = str(jsr.get('compile-cache', True)).lower()
children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text, cache_key))
children.append(etree.Element('hashTree'))
| -3,542,814,545,030,569,500
|
:type children: etree.Element
:type req: Request
|
bzt/jmx/tools.py
|
__add_jsr_elements
|
greyfenrir/taurus
|
python
|
@staticmethod
def __add_jsr_elements(children, req, get_from_config=True):
'\n :type children: etree.Element\n :type req: Request\n '
jsrs = []
if get_from_config:
jsrs = req.config.get('jsr223', [])
else:
jsrs = req.get('jsr223', [])
if (not isinstance(jsrs, list)):
jsrs = [jsrs]
for (idx, _) in enumerate(jsrs):
jsr = ensure_is_dict(jsrs, idx, sub_key='script-text')
lang = jsr.get('language', 'groovy')
script_file = jsr.get('script-file', None)
script_text = jsr.get('script-text', None)
if ((not script_file) and (not script_text)):
raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'")
parameters = jsr.get('parameters', )
execute = jsr.get('execute', 'after')
cache_key = str(jsr.get('compile-cache', True)).lower()
children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text, cache_key))
children.append(etree.Element('hashTree'))
|
def compile_request(self, request):
'\n\n :type request: HierarchicHTTPRequest\n :return:\n '
sampler = children = None
protocol_name = request.priority_option('protocol', default=self.default_protocol)
if (protocol_name in self.protocol_handlers):
protocol = self.protocol_handlers[protocol_name]
(sampler, children) = protocol.get_sampler_pair(request)
if (sampler is None):
self.log.warning('Problematic request: %s', request.config)
raise TaurusInternalException('Unable to handle request, please review missing options')
children.extend(self._get_timer(request))
self.__add_assertions(children, request)
self.__add_extractors(children, request)
self.__add_jsr_elements(children, request)
return [sampler, children]
| -1,291,728,201,988,147,500
|
:type request: HierarchicHTTPRequest
:return:
|
bzt/jmx/tools.py
|
compile_request
|
greyfenrir/taurus
|
python
|
def compile_request(self, request):
'\n\n :type request: HierarchicHTTPRequest\n :return:\n '
sampler = children = None
protocol_name = request.priority_option('protocol', default=self.default_protocol)
if (protocol_name in self.protocol_handlers):
protocol = self.protocol_handlers[protocol_name]
(sampler, children) = protocol.get_sampler_pair(request)
if (sampler is None):
self.log.warning('Problematic request: %s', request.config)
raise TaurusInternalException('Unable to handle request, please review missing options')
children.extend(self._get_timer(request))
self.__add_assertions(children, request)
self.__add_extractors(children, request)
self.__add_jsr_elements(children, request)
return [sampler, children]
|
def compile_foreach_block(self, block):
'\n :type block: ForEachBlock\n '
elements = []
controller = JMX._get_foreach_controller(block.input_var, block.loop_var)
children = etree.Element('hashTree')
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
| 3,921,619,715,577,166,300
|
:type block: ForEachBlock
|
bzt/jmx/tools.py
|
compile_foreach_block
|
greyfenrir/taurus
|
python
|
def compile_foreach_block(self, block):
'\n \n '
elements = []
controller = JMX._get_foreach_controller(block.input_var, block.loop_var)
children = etree.Element('hashTree')
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
|
def compile_action_block(self, block):
'\n :type block: ActionBlock\n :return:\n '
actions = {'stop': 0, 'pause': 1, 'stop-now': 2, 'continue': 3}
targets = {'current-thread': 0, 'all-threads': 2}
action = actions[block.action]
target = targets[block.target]
duration = 0
if (block.duration is not None):
duration = int((block.duration * 1000))
test_action = JMX._get_action_block(action, target, duration)
children = etree.Element('hashTree')
self.__add_jsr_elements(children, block)
return [test_action, children]
| 7,389,238,544,759,741,000
|
:type block: ActionBlock
:return:
|
bzt/jmx/tools.py
|
compile_action_block
|
greyfenrir/taurus
|
python
|
def compile_action_block(self, block):
'\n :type block: ActionBlock\n :return:\n '
actions = {'stop': 0, 'pause': 1, 'stop-now': 2, 'continue': 3}
targets = {'current-thread': 0, 'all-threads': 2}
action = actions[block.action]
target = targets[block.target]
duration = 0
if (block.duration is not None):
duration = int((block.duration * 1000))
test_action = JMX._get_action_block(action, target, duration)
children = etree.Element('hashTree')
self.__add_jsr_elements(children, block)
return [test_action, children]
|
def __generate(self):
'\n Generate the test plan\n '
thread_group = JMX.get_thread_group(testname=self.executor.label)
thread_group_ht = etree.Element('hashTree', type='tg')
self.request_compiler = RequestCompiler(self)
for element in self.compile_scenario(self.scenario):
thread_group_ht.append(element)
results_tree = self._get_results_tree()
results_tree_ht = etree.Element('hashTree')
self.append(self.TEST_PLAN_SEL, thread_group)
self.append(self.TEST_PLAN_SEL, thread_group_ht)
self.append(self.TEST_PLAN_SEL, results_tree)
self.append(self.TEST_PLAN_SEL, results_tree_ht)
| -7,969,458,648,921,240,000
|
Generate the test plan
|
bzt/jmx/tools.py
|
__generate
|
greyfenrir/taurus
|
python
|
def __generate(self):
'\n \n '
thread_group = JMX.get_thread_group(testname=self.executor.label)
thread_group_ht = etree.Element('hashTree', type='tg')
self.request_compiler = RequestCompiler(self)
for element in self.compile_scenario(self.scenario):
thread_group_ht.append(element)
results_tree = self._get_results_tree()
results_tree_ht = etree.Element('hashTree')
self.append(self.TEST_PLAN_SEL, thread_group)
self.append(self.TEST_PLAN_SEL, thread_group_ht)
self.append(self.TEST_PLAN_SEL, results_tree)
self.append(self.TEST_PLAN_SEL, results_tree_ht)
|
def save(self, filename):
'\n Generate test plan and save\n\n :type filename: str\n '
self.__generate()
super(JMeterScenarioBuilder, self).save(filename)
| 861,738,620,378,334,000
|
Generate test plan and save
:type filename: str
|
bzt/jmx/tools.py
|
save
|
greyfenrir/taurus
|
python
|
def save(self, filename):
'\n Generate test plan and save\n\n :type filename: str\n '
self.__generate()
super(JMeterScenarioBuilder, self).save(filename)
|
@staticmethod
def __gen_authorization(scenario):
'\n Generates HTTP Authorization Manager\n\n '
elements = []
authorizations = scenario.get('authorization')
if authorizations:
clear_flag = False
if isinstance(authorizations, dict):
if (('clear' in authorizations) or ('list' in authorizations)):
clear_flag = authorizations.get('clear', False)
authorizations = authorizations.get('list', [])
else:
authorizations = [authorizations]
if (not isinstance(authorizations, list)):
raise TaurusConfigError(('Wrong authorization format: %s' % authorizations))
auth_manager = JMX.get_auth_manager(authorizations, clear_flag)
elements.append(auth_manager)
elements.append(etree.Element('hashTree'))
return elements
| 4,335,678,651,450,887,000
|
Generates HTTP Authorization Manager
|
bzt/jmx/tools.py
|
__gen_authorization
|
greyfenrir/taurus
|
python
|
@staticmethod
def __gen_authorization(scenario):
'\n \n\n '
elements = []
authorizations = scenario.get('authorization')
if authorizations:
clear_flag = False
if isinstance(authorizations, dict):
if (('clear' in authorizations) or ('list' in authorizations)):
clear_flag = authorizations.get('clear', False)
authorizations = authorizations.get('list', [])
else:
authorizations = [authorizations]
if (not isinstance(authorizations, list)):
raise TaurusConfigError(('Wrong authorization format: %s' % authorizations))
auth_manager = JMX.get_auth_manager(authorizations, clear_flag)
elements.append(auth_manager)
elements.append(etree.Element('hashTree'))
return elements
|
def __init__(self, label=None, display_order=None, local_vars_configuration=None):
'PropertyGroupUpdate - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label = None
self._display_order = None
self.discriminator = None
if (label is not None):
self.label = label
if (display_order is not None):
self.display_order = display_order
| 5,236,590,290,660,018,000
|
PropertyGroupUpdate - a model defined in OpenAPI
|
hubspot/crm/properties/models/property_group_update.py
|
__init__
|
cclauss/hubspot-api-python
|
python
|
def __init__(self, label=None, display_order=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label = None
self._display_order = None
self.discriminator = None
if (label is not None):
self.label = label
if (display_order is not None):
self.display_order = display_order
|
@property
def label(self):
'Gets the label of this PropertyGroupUpdate. # noqa: E501\n\n A human-readable label that will be shown in HubSpot. # noqa: E501\n\n :return: The label of this PropertyGroupUpdate. # noqa: E501\n :rtype: str\n '
return self._label
| 2,917,488,915,512,171,500
|
Gets the label of this PropertyGroupUpdate. # noqa: E501
A human-readable label that will be shown in HubSpot. # noqa: E501
:return: The label of this PropertyGroupUpdate. # noqa: E501
:rtype: str
|
hubspot/crm/properties/models/property_group_update.py
|
label
|
cclauss/hubspot-api-python
|
python
|
@property
def label(self):
'Gets the label of this PropertyGroupUpdate. # noqa: E501\n\n A human-readable label that will be shown in HubSpot. # noqa: E501\n\n :return: The label of this PropertyGroupUpdate. # noqa: E501\n :rtype: str\n '
return self._label
|
@label.setter
def label(self, label):
'Sets the label of this PropertyGroupUpdate.\n\n A human-readable label that will be shown in HubSpot. # noqa: E501\n\n :param label: The label of this PropertyGroupUpdate. # noqa: E501\n :type: str\n '
self._label = label
| 3,503,763,217,207,940,000
|
Sets the label of this PropertyGroupUpdate.
A human-readable label that will be shown in HubSpot. # noqa: E501
:param label: The label of this PropertyGroupUpdate. # noqa: E501
:type: str
|
hubspot/crm/properties/models/property_group_update.py
|
label
|
cclauss/hubspot-api-python
|
python
|
@label.setter
def label(self, label):
'Sets the label of this PropertyGroupUpdate.\n\n A human-readable label that will be shown in HubSpot. # noqa: E501\n\n :param label: The label of this PropertyGroupUpdate. # noqa: E501\n :type: str\n '
self._label = label
|
@property
def display_order(self):
'Gets the display_order of this PropertyGroupUpdate. # noqa: E501\n\n Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501\n\n :return: The display_order of this PropertyGroupUpdate. # noqa: E501\n :rtype: int\n '
return self._display_order
| 5,386,896,482,861,787,000
|
Gets the display_order of this PropertyGroupUpdate. # noqa: E501
Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501
:return: The display_order of this PropertyGroupUpdate. # noqa: E501
:rtype: int
|
hubspot/crm/properties/models/property_group_update.py
|
display_order
|
cclauss/hubspot-api-python
|
python
|
@property
def display_order(self):
'Gets the display_order of this PropertyGroupUpdate. # noqa: E501\n\n Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501\n\n :return: The display_order of this PropertyGroupUpdate. # noqa: E501\n :rtype: int\n '
return self._display_order
|
@display_order.setter
def display_order(self, display_order):
'Sets the display_order of this PropertyGroupUpdate.\n\n Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501\n\n :param display_order: The display_order of this PropertyGroupUpdate. # noqa: E501\n :type: int\n '
self._display_order = display_order
| -5,371,300,951,071,094,000
|
Sets the display_order of this PropertyGroupUpdate.
Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501
:param display_order: The display_order of this PropertyGroupUpdate. # noqa: E501
:type: int
|
hubspot/crm/properties/models/property_group_update.py
|
display_order
|
cclauss/hubspot-api-python
|
python
|
@display_order.setter
def display_order(self, display_order):
'Sets the display_order of this PropertyGroupUpdate.\n\n Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501\n\n :param display_order: The display_order of this PropertyGroupUpdate. # noqa: E501\n :type: int\n '
self._display_order = display_order
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
| 8,442,519,487,048,767,000
|
Returns the model properties as a dict
|
hubspot/crm/properties/models/property_group_update.py
|
to_dict
|
cclauss/hubspot-api-python
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
hubspot/crm/properties/models/property_group_update.py
|
to_str
|
cclauss/hubspot-api-python
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
hubspot/crm/properties/models/property_group_update.py
|
__repr__
|
cclauss/hubspot-api-python
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, PropertyGroupUpdate)):
return False
return (self.to_dict() == other.to_dict())
| -2,793,007,724,244,214,000
|
Returns true if both objects are equal
|
hubspot/crm/properties/models/property_group_update.py
|
__eq__
|
cclauss/hubspot-api-python
|
python
|
def __eq__(self, other):
if (not isinstance(other, PropertyGroupUpdate)):
return False
return (self.to_dict() == other.to_dict())
|
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, PropertyGroupUpdate)):
return True
return (self.to_dict() != other.to_dict())
| -8,805,428,320,412,282,000
|
Returns true if both objects are not equal
|
hubspot/crm/properties/models/property_group_update.py
|
__ne__
|
cclauss/hubspot-api-python
|
python
|
def __ne__(self, other):
if (not isinstance(other, PropertyGroupUpdate)):
return True
return (self.to_dict() != other.to_dict())
|
@property
def hexagonal_edges(self):
'Gets the three half-edges on the hexagonal boundary incident to a black node and point in ccw direction.'
first = self.half_edge
res = [first]
second = first.opposite.next.opposite.next
res.append(second)
third = second.opposite.next.opposite.next
res.append(third)
for he in res:
assert (he.is_hexagonal and (he.color is 'black'))
return res
| 6,849,385,400,819,632,000
|
Gets the three half-edges on the hexagonal boundary incident to a black node and point in ccw direction.
|
planar_graph_sampler/combinatorial_classes/dissection.py
|
hexagonal_edges
|
petrovp/networkx-related
|
python
|
@property
def hexagonal_edges(self):
first = self.half_edge
res = [first]
second = first.opposite.next.opposite.next
res.append(second)
third = second.opposite.next.opposite.next
res.append(third)
for he in res:
assert (he.is_hexagonal and (he.color is 'black'))
return res
|
def root_at_random_hexagonal_edge(self):
'Selects a random hexagonal half-edge and makes it the root.'
self._half_edge = rnd.choice(self.hexagonal_edges)
| 8,306,759,444,568,594,000
|
Selects a random hexagonal half-edge and makes it the root.
|
planar_graph_sampler/combinatorial_classes/dissection.py
|
root_at_random_hexagonal_edge
|
petrovp/networkx-related
|
python
|
def root_at_random_hexagonal_edge(self):
self._half_edge = rnd.choice(self.hexagonal_edges)
|
@property
def is_admissible_slow(self):
'Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex.'
start_node = self.half_edge
assert (start_node.color is 'black')
end_node = self.half_edge.opposite.next.opposite.next.opposite
assert (end_node.color is 'white')
start_node = start_node.node_nr
end_node = end_node.node_nr
g = self.to_networkx_graph()
paths = nx.shortest_simple_paths(g, start_node, end_node)
path_1 = next(paths)
assert (len(path_1) == 4)
path_2 = next(paths)
assert (len(path_2) == 4)
path_3 = next(paths)
return (len(path_3) > 4)
| 5,505,801,646,970,087,000
|
Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex.
|
planar_graph_sampler/combinatorial_classes/dissection.py
|
is_admissible_slow
|
petrovp/networkx-related
|
python
|
@property
def is_admissible_slow(self):
start_node = self.half_edge
assert (start_node.color is 'black')
end_node = self.half_edge.opposite.next.opposite.next.opposite
assert (end_node.color is 'white')
start_node = start_node.node_nr
end_node = end_node.node_nr
g = self.to_networkx_graph()
paths = nx.shortest_simple_paths(g, start_node, end_node)
path_1 = next(paths)
assert (len(path_1) == 4)
path_2 = next(paths)
assert (len(path_2) == 4)
path_3 = next(paths)
return (len(path_3) > 4)
|
@property
def is_admissible(self):
'Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex.'
start_node = self.half_edge
assert (start_node.color is 'black')
end_node = self.half_edge.opposite.next.opposite.next.opposite
assert (end_node.color is 'white')
queue = deque(list())
queue.append((self.half_edge, 0, False, set()))
while (len(queue) != 0):
top_element = queue.popleft()
top_half_edge = top_element[0]
distance = top_element[1]
has_been_inner_edge_included = top_element[2]
visited_nodes = top_element[3]
visited_nodes.add(top_half_edge.node_nr)
incident_half_edges = top_half_edge.incident_half_edges()
for walker_half_edge in incident_half_edges:
opposite = walker_half_edge.opposite
if (opposite in visited_nodes):
continue
updated_distance = (distance + 1)
new_visited_nodes = set()
new_visited_nodes.update(visited_nodes)
inner_edge_included = (has_been_inner_edge_included or (opposite.is_hexagonal is False))
if (updated_distance < 3):
queue.append((opposite, updated_distance, inner_edge_included, new_visited_nodes))
elif ((opposite.node_nr == end_node.node_nr) and inner_edge_included):
return False
return True
| 4,637,412,663,304,804,000
|
Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex.
|
planar_graph_sampler/combinatorial_classes/dissection.py
|
is_admissible
|
petrovp/networkx-related
|
python
|
@property
def is_admissible(self):
start_node = self.half_edge
assert (start_node.color is 'black')
end_node = self.half_edge.opposite.next.opposite.next.opposite
assert (end_node.color is 'white')
queue = deque(list())
queue.append((self.half_edge, 0, False, set()))
while (len(queue) != 0):
top_element = queue.popleft()
top_half_edge = top_element[0]
distance = top_element[1]
has_been_inner_edge_included = top_element[2]
visited_nodes = top_element[3]
visited_nodes.add(top_half_edge.node_nr)
incident_half_edges = top_half_edge.incident_half_edges()
for walker_half_edge in incident_half_edges:
opposite = walker_half_edge.opposite
if (opposite in visited_nodes):
continue
updated_distance = (distance + 1)
new_visited_nodes = set()
new_visited_nodes.update(visited_nodes)
inner_edge_included = (has_been_inner_edge_included or (opposite.is_hexagonal is False))
if (updated_distance < 3):
queue.append((opposite, updated_distance, inner_edge_included, new_visited_nodes))
elif ((opposite.node_nr == end_node.node_nr) and inner_edge_included):
return False
return True
|
@property
def u_size(self):
'The u-size is the number of inner faces.'
return ((self.number_of_half_edges - 6) / 4)
| 7,695,950,379,170,463,000
|
The u-size is the number of inner faces.
|
planar_graph_sampler/combinatorial_classes/dissection.py
|
u_size
|
petrovp/networkx-related
|
python
|
@property
def u_size(self):
return ((self.number_of_half_edges - 6) / 4)
|
@property
def l_size(self):
'The l-size is the number of black inner vertices.'
node_dict = self.half_edge.node_dict()
black_vertices = len([node_nr for node_nr in node_dict if (node_dict[node_nr][0].color is 'black')])
return (black_vertices - 3)
| -6,482,723,490,751,050,000
|
The l-size is the number of black inner vertices.
|
planar_graph_sampler/combinatorial_classes/dissection.py
|
l_size
|
petrovp/networkx-related
|
python
|
@property
def l_size(self):
node_dict = self.half_edge.node_dict()
black_vertices = len([node_nr for node_nr in node_dict if (node_dict[node_nr][0].color is 'black')])
return (black_vertices - 3)
|
def to_networkx_graph(self, include_unpaired=None):
'Converts to networkx graph, encodes hexagonal nodes with colors.'
from planar_graph_sampler.combinatorial_classes.half_edge_graph import color_scale
nodes = self.half_edge.node_dict()
G = super(IrreducibleDissection, self).to_networkx_graph(include_unpaired=False)
for v in G:
if any([he.is_hexagonal for he in nodes[v]]):
G.nodes[v]['color'] = '#e8f442'
else:
G.nodes[v]['color'] = '#aaaaaa'
if (nodes[v][0].color is 'black'):
G.nodes[v]['color'] = color_scale(G.nodes[v]['color'], 0.5)
return G
| -7,118,483,803,622,384,000
|
Converts to networkx graph, encodes hexagonal nodes with colors.
|
planar_graph_sampler/combinatorial_classes/dissection.py
|
to_networkx_graph
|
petrovp/networkx-related
|
python
|
def to_networkx_graph(self, include_unpaired=None):
from planar_graph_sampler.combinatorial_classes.half_edge_graph import color_scale
nodes = self.half_edge.node_dict()
G = super(IrreducibleDissection, self).to_networkx_graph(include_unpaired=False)
for v in G:
if any([he.is_hexagonal for he in nodes[v]]):
G.nodes[v]['color'] = '#e8f442'
else:
G.nodes[v]['color'] = '#aaaaaa'
if (nodes[v][0].color is 'black'):
G.nodes[v]['color'] = color_scale(G.nodes[v]['color'], 0.5)
return G
|
def read_json(json_file: str, debug=False) -> List[Dict]:
'\n reads the json files, and formats the description that\n is associated with each of the json dictionaries that are read in.\n\n :param json_file: json file to parse from\n :param debug: if set to true, will print the json dictionaries as\n they are read in\n :return: list of all of the json dictionaries\n '
with open(json_file, 'r') as json_desc:
project_list: List[Dict] = json.load(json_desc)
for project in project_list:
project['description'] = ' '.join(project['description'])
if debug:
print(project)
return project_list
| -2,822,950,693,851,630,000
|
reads the json files, and formats the description that
is associated with each of the json dictionaries that are read in.
:param json_file: json file to parse from
:param debug: if set to true, will print the json dictionaries as
they are read in
:return: list of all of the json dictionaries
|
app.py
|
read_json
|
Jim-Shaddix/Personal-Website
|
python
|
def read_json(json_file: str, debug=False) -> List[Dict]:
'\n reads the json files, and formats the description that\n is associated with each of the json dictionaries that are read in.\n\n :param json_file: json file to parse from\n :param debug: if set to true, will print the json dictionaries as\n they are read in\n :return: list of all of the json dictionaries\n '
with open(json_file, 'r') as json_desc:
project_list: List[Dict] = json.load(json_desc)
for project in project_list:
project['description'] = ' '.join(project['description'])
if debug:
print(project)
return project_list
|
def translate_batch(self, batch, fast=False):
"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n fast (bool): enables fast beam search (may not support all features)\n\n Todo:\n Shouldn't need the original dataset.\n "
with torch.no_grad():
return self._fast_translate_batch(batch, self.max_length, min_length=self.min_length)
| -2,044,400,624,652,274,400
|
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
|
src/models/predictor.py
|
translate_batch
|
SebastianVeile/PreSumm
|
python
|
def translate_batch(self, batch, fast=False):
"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n fast (bool): enables fast beam search (may not support all features)\n\n Todo:\n Shouldn't need the original dataset.\n "
with torch.no_grad():
return self._fast_translate_batch(batch, self.max_length, min_length=self.min_length)
|
def log(self, sent_number):
'\n Log translation.\n '
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
output += 'PRED SCORE: {:.4f}\n'.format(best_score)
if (self.gold_sent is not None):
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += 'GOLD SCORE: {:.4f}\n'.format(self.gold_score)
if (len(self.pred_sents) > 1):
output += '\nBEST HYP:\n'
for (score, sent) in zip(self.pred_scores, self.pred_sents):
output += '[{:.4f}] {}\n'.format(score, sent)
return output
| 6,652,500,622,530,272,000
|
Log translation.
|
src/models/predictor.py
|
log
|
SebastianVeile/PreSumm
|
python
|
def log(self, sent_number):
'\n \n '
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
output += 'PRED SCORE: {:.4f}\n'.format(best_score)
if (self.gold_sent is not None):
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += 'GOLD SCORE: {:.4f}\n'.format(self.gold_score)
if (len(self.pred_sents) > 1):
output += '\nBEST HYP:\n'
for (score, sent) in zip(self.pred_scores, self.pred_sents):
output += '[{:.4f}] {}\n'.format(score, sent)
return output
|
def test_get_scalars_with_actual_inf_and_nan(self):
'Test for get_scalars() call that involve inf and nan in user data.'
mock_api_client = mock.Mock()
def stream_experiment_data(request, **kwargs):
self.assertEqual(request.experiment_id, '789')
self.assertEqual(kwargs['metadata'], grpc_util.version_metadata())
response = export_service_pb2.StreamExperimentDataResponse()
response.run_name = 'train'
response.tag_name = 'batch_loss'
response.points.steps.append(0)
response.points.values.append(np.nan)
response.points.wall_times.add(seconds=0, nanos=0)
response.points.steps.append(1)
response.points.values.append(np.inf)
response.points.wall_times.add(seconds=10, nanos=0)
(yield response)
mock_api_client.StreamExperimentData = mock.Mock(wraps=stream_experiment_data)
with mock.patch.object(experiment_from_dev, 'get_api_client', (lambda api_endpoint: mock_api_client)):
experiment = experiment_from_dev.ExperimentFromDev('789')
dataframe = experiment.get_scalars(pivot=True)
expected = pandas.DataFrame({'run': (['train'] * 2), 'step': [0, 1], 'batch_loss': [np.nan, np.inf]})
pandas.testing.assert_frame_equal(dataframe, expected, check_names=True)
| 2,623,809,314,212,357,600
|
Test for get_scalars() call that involve inf and nan in user data.
|
tensorboard/data/experimental/experiment_from_dev_test.py
|
test_get_scalars_with_actual_inf_and_nan
|
AseiSugiyama/tensorboard
|
python
|
def test_get_scalars_with_actual_inf_and_nan(self):
mock_api_client = mock.Mock()
def stream_experiment_data(request, **kwargs):
self.assertEqual(request.experiment_id, '789')
self.assertEqual(kwargs['metadata'], grpc_util.version_metadata())
response = export_service_pb2.StreamExperimentDataResponse()
response.run_name = 'train'
response.tag_name = 'batch_loss'
response.points.steps.append(0)
response.points.values.append(np.nan)
response.points.wall_times.add(seconds=0, nanos=0)
response.points.steps.append(1)
response.points.values.append(np.inf)
response.points.wall_times.add(seconds=10, nanos=0)
(yield response)
mock_api_client.StreamExperimentData = mock.Mock(wraps=stream_experiment_data)
with mock.patch.object(experiment_from_dev, 'get_api_client', (lambda api_endpoint: mock_api_client)):
experiment = experiment_from_dev.ExperimentFromDev('789')
dataframe = experiment.get_scalars(pivot=True)
expected = pandas.DataFrame({'run': (['train'] * 2), 'step': [0, 1], 'batch_loss': [np.nan, np.inf]})
pandas.testing.assert_frame_equal(dataframe, expected, check_names=True)
|
def class_from_module_path(module_path: Text, lookup_path: Optional[Text]=None) -> Type:
'Given the module name and path of a class, tries to retrieve the class.\n\n The loaded class can be used to instantiate new objects.\n\n Args:\n module_path: either an absolute path to a Python class,\n or the name of the class in the local / global scope.\n lookup_path: a path where to load the class from, if it cannot\n be found in the local / global scope.\n\n Returns:\n a Python class\n\n Raises:\n ImportError, in case the Python class cannot be found.\n RasaException, in case the imported result is something other than a class\n '
klass = None
if ('.' in module_path):
(module_name, _, class_name) = module_path.rpartition('.')
m = importlib.import_module(module_name)
klass = getattr(m, class_name, None)
elif lookup_path:
m = importlib.import_module(lookup_path)
klass = getattr(m, module_path, None)
if (klass is None):
raise ImportError(f'Cannot retrieve class from path {module_path}.')
if (not inspect.isclass(klass)):
raise RasaException(f'`class_from_module_path()` is expected to return a class, but for {module_path} we got a {type(klass)}.')
return klass
| -4,786,117,763,749,435,000
|
Given the module name and path of a class, tries to retrieve the class.
The loaded class can be used to instantiate new objects.
Args:
module_path: either an absolute path to a Python class,
or the name of the class in the local / global scope.
lookup_path: a path where to load the class from, if it cannot
be found in the local / global scope.
Returns:
a Python class
Raises:
ImportError, in case the Python class cannot be found.
RasaException, in case the imported result is something other than a class
|
rasa/shared/utils/common.py
|
class_from_module_path
|
GCES-2021-1/rasa
|
python
|
def class_from_module_path(module_path: Text, lookup_path: Optional[Text]=None) -> Type:
'Given the module name and path of a class, tries to retrieve the class.\n\n The loaded class can be used to instantiate new objects.\n\n Args:\n module_path: either an absolute path to a Python class,\n or the name of the class in the local / global scope.\n lookup_path: a path where to load the class from, if it cannot\n be found in the local / global scope.\n\n Returns:\n a Python class\n\n Raises:\n ImportError, in case the Python class cannot be found.\n RasaException, in case the imported result is something other than a class\n '
klass = None
if ('.' in module_path):
(module_name, _, class_name) = module_path.rpartition('.')
m = importlib.import_module(module_name)
klass = getattr(m, class_name, None)
elif lookup_path:
m = importlib.import_module(lookup_path)
klass = getattr(m, module_path, None)
if (klass is None):
raise ImportError(f'Cannot retrieve class from path {module_path}.')
if (not inspect.isclass(klass)):
raise RasaException(f'`class_from_module_path()` is expected to return a class, but for {module_path} we got a {type(klass)}.')
return klass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.