body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
138846f0268a2dbda83d106242e04c86ac869a7ca75c2d4a42b6a358833825f5 | def test_get_sampled_trajectory(self) -> None:
'\n Tests the get sampled method\n '
scene_simple_trajectory = self.scene_simple_trajectory
result = scene_simple_trajectory.get_sampled_trajectory()
self.assertEqual(len(result), 2)
self.assertEqual(result[0].x, 1)
self.assertEqual(result[1].x, 3) | Tests the get sampled method | nuplan/planning/utils/serialization/test/test_scene_simple_trajectory.py | test_get_sampled_trajectory | motional/nuplan-devkit | 128 | python | def test_get_sampled_trajectory(self) -> None:
'\n \n '
scene_simple_trajectory = self.scene_simple_trajectory
result = scene_simple_trajectory.get_sampled_trajectory()
self.assertEqual(len(result), 2)
self.assertEqual(result[0].x, 1)
self.assertEqual(result[1].x, 3) | def test_get_sampled_trajectory(self) -> None:
'\n \n '
scene_simple_trajectory = self.scene_simple_trajectory
result = scene_simple_trajectory.get_sampled_trajectory()
self.assertEqual(len(result), 2)
self.assertEqual(result[0].x, 1)
self.assertEqual(result[1].x, 3)<|docstring|>Tests the get sampled method<|endoftext|> |
f30fd7b1c44a5880ecc7697be49fffc05d7989fd877b493a079a642c0e76f593 | def load_combined_resources_as_df(fnlabel2fn_obj):
'\n load instances of class EnFrame (see dfn_classes.py)\n into dataframe for computing descriptive statistics\n\n\n :param dict fnlabel2fn_obj: frame label -> instance of class EnFrame (see dfn_classes.py)\n\n :rtype: pandas.core.frame.DataFrame\n :return: df for computing statistics\n '
list_of_lists = []
headers = ['frame_label', '# of unique lexemes', '# of unique lemmas', '# of RBN senses']
lu2number_of_dutch_senses = dict()
for (frame_label, frame_obj) in fnlabel2fn_obj.items():
unique_lexemes = set([lexeme_obj.lexeme for lexeme_obj in frame_obj.lexeme_objs])
unique_lemmas = set([lemma_obj.lemma for lemma_obj in frame_obj.lemma_objs])
number_of_rbn_senses = 0
for (lu_id, lu_obj) in frame_obj.lu_id2lu_obj.items():
num_of_lu_senses = 0
if hasattr(lu_obj, 'rbn_senses'):
num_of_lu_senses += len(lu_obj.rbn_senses)
if lu_obj.rbn_senses:
for sense_id in lu_obj.rbn_senses:
number_of_rbn_senses += 1
lu2number_of_dutch_senses[lu_id] = num_of_lu_senses
one_row = [frame_label, len(unique_lexemes), len(unique_lemmas), number_of_rbn_senses]
list_of_lists.append(one_row)
df = pandas.DataFrame(list_of_lists, columns=headers)
return (df, lu2number_of_dutch_senses) | load instances of class EnFrame (see dfn_classes.py)
into dataframe for computing descriptive statistics
:param dict fnlabel2fn_obj: frame label -> instance of class EnFrame (see dfn_classes.py)
:rtype: pandas.core.frame.DataFrame
:return: df for computing statistics | lib/descriptive_stats.py | load_combined_resources_as_df | cltl/Dutch_FrameNet_Lexicon | 0 | python | def load_combined_resources_as_df(fnlabel2fn_obj):
'\n load instances of class EnFrame (see dfn_classes.py)\n into dataframe for computing descriptive statistics\n\n\n :param dict fnlabel2fn_obj: frame label -> instance of class EnFrame (see dfn_classes.py)\n\n :rtype: pandas.core.frame.DataFrame\n :return: df for computing statistics\n '
list_of_lists = []
headers = ['frame_label', '# of unique lexemes', '# of unique lemmas', '# of RBN senses']
lu2number_of_dutch_senses = dict()
for (frame_label, frame_obj) in fnlabel2fn_obj.items():
unique_lexemes = set([lexeme_obj.lexeme for lexeme_obj in frame_obj.lexeme_objs])
unique_lemmas = set([lemma_obj.lemma for lemma_obj in frame_obj.lemma_objs])
number_of_rbn_senses = 0
for (lu_id, lu_obj) in frame_obj.lu_id2lu_obj.items():
num_of_lu_senses = 0
if hasattr(lu_obj, 'rbn_senses'):
num_of_lu_senses += len(lu_obj.rbn_senses)
if lu_obj.rbn_senses:
for sense_id in lu_obj.rbn_senses:
number_of_rbn_senses += 1
lu2number_of_dutch_senses[lu_id] = num_of_lu_senses
one_row = [frame_label, len(unique_lexemes), len(unique_lemmas), number_of_rbn_senses]
list_of_lists.append(one_row)
df = pandas.DataFrame(list_of_lists, columns=headers)
return (df, lu2number_of_dutch_senses) | def load_combined_resources_as_df(fnlabel2fn_obj):
'\n load instances of class EnFrame (see dfn_classes.py)\n into dataframe for computing descriptive statistics\n\n\n :param dict fnlabel2fn_obj: frame label -> instance of class EnFrame (see dfn_classes.py)\n\n :rtype: pandas.core.frame.DataFrame\n :return: df for computing statistics\n '
list_of_lists = []
headers = ['frame_label', '# of unique lexemes', '# of unique lemmas', '# of RBN senses']
lu2number_of_dutch_senses = dict()
for (frame_label, frame_obj) in fnlabel2fn_obj.items():
unique_lexemes = set([lexeme_obj.lexeme for lexeme_obj in frame_obj.lexeme_objs])
unique_lemmas = set([lemma_obj.lemma for lemma_obj in frame_obj.lemma_objs])
number_of_rbn_senses = 0
for (lu_id, lu_obj) in frame_obj.lu_id2lu_obj.items():
num_of_lu_senses = 0
if hasattr(lu_obj, 'rbn_senses'):
num_of_lu_senses += len(lu_obj.rbn_senses)
if lu_obj.rbn_senses:
for sense_id in lu_obj.rbn_senses:
number_of_rbn_senses += 1
lu2number_of_dutch_senses[lu_id] = num_of_lu_senses
one_row = [frame_label, len(unique_lexemes), len(unique_lemmas), number_of_rbn_senses]
list_of_lists.append(one_row)
df = pandas.DataFrame(list_of_lists, columns=headers)
return (df, lu2number_of_dutch_senses)<|docstring|>load instances of class EnFrame (see dfn_classes.py)
into dataframe for computing descriptive statistics
:param dict fnlabel2fn_obj: frame label -> instance of class EnFrame (see dfn_classes.py)
:rtype: pandas.core.frame.DataFrame
:return: df for computing statistics<|endoftext|> |
a069fcb20f614123a4a21d6eb1f6230d37ab0894878e4a1189900c2d6ab357a6 | def setup(self):
'Setup the pesummary.core.finish.FinishingTouches class\n '
if (not os.path.isdir('.outdir')):
os.mkdir('.outdir')
(opts, inputs) = make_argparse()
self.finish = FinishingTouches(inputs) | Setup the pesummary.core.finish.FinishingTouches class | pesummary/tests/finish_test.py | setup | pesummary/pesummary | 1 | python | def setup(self):
'\n '
if (not os.path.isdir('.outdir')):
os.mkdir('.outdir')
(opts, inputs) = make_argparse()
self.finish = FinishingTouches(inputs) | def setup(self):
'\n '
if (not os.path.isdir('.outdir')):
os.mkdir('.outdir')
(opts, inputs) = make_argparse()
self.finish = FinishingTouches(inputs)<|docstring|>Setup the pesummary.core.finish.FinishingTouches class<|endoftext|> |
de5af7d954059952a81a53a34355f871dff845a66a98accec75d7d04d5699144 | def teardown(self):
'Remove the any files generated\n '
if os.path.isdir('.outdir'):
shutil.rmtree('.outdir') | Remove the any files generated | pesummary/tests/finish_test.py | teardown | pesummary/pesummary | 1 | python | def teardown(self):
'\n '
if os.path.isdir('.outdir'):
shutil.rmtree('.outdir') | def teardown(self):
'\n '
if os.path.isdir('.outdir'):
shutil.rmtree('.outdir')<|docstring|>Remove the any files generated<|endoftext|> |
b1f96d5ba683bf6a37c0d643b9798d4c4381a616f0456a681d7beb8c5ef5804f | def test_default_message(self):
'Test the default email message\n '
message = self.finish._email_message()
assert (message is not None) | Test the default email message | pesummary/tests/finish_test.py | test_default_message | pesummary/pesummary | 1 | python | def test_default_message(self):
'\n '
message = self.finish._email_message()
assert (message is not None) | def test_default_message(self):
'\n '
message = self.finish._email_message()
assert (message is not None)<|docstring|>Test the default email message<|endoftext|> |
c2ce3783349a79c6b790c6fceaab5e158ff08cd74717971bebe476cf8e515b17 | def test_custom_message(self):
'Test a custom email message\n '
custom_message = 'This is a test message'
message = self.finish._email_message(message=custom_message)
assert (message == custom_message) | Test a custom email message | pesummary/tests/finish_test.py | test_custom_message | pesummary/pesummary | 1 | python | def test_custom_message(self):
'\n '
custom_message = 'This is a test message'
message = self.finish._email_message(message=custom_message)
assert (message == custom_message) | def test_custom_message(self):
'\n '
custom_message = 'This is a test message'
message = self.finish._email_message(message=custom_message)
assert (message == custom_message)<|docstring|>Test a custom email message<|endoftext|> |
5cc093c2047fe91e61aa389f170f91a4fa3ab8fe6da144c143acd61c0ccf910c | def do_compile_translations(target=('t', ''), i18n_dir=('i', ''), all=('a', False)):
'\n Compiling all the templates in specified application.\n '
if ((not target) and (not all)):
print_status('Please specify target.')
sys.exit(1)
elif (target == 'kay'):
print_status('Compiling builtin languages')
root = path.join(kay.KAY_DIR, 'i18n')
elif all:
targets = get_user_apps()
for target in targets:
do_compile_translations(target=target, i18n_dir=None, all=False)
do_compile_translations(target=kay.PROJECT_DIR, i18n_dir=None, all=False)
sys.exit(0)
else:
if i18n_dir:
root = i18n_dir
else:
root = path.join(target, 'i18n')
if (not path.isdir(root)):
print('i18n folder missing')
sys.exit(1)
print_status(('Compiling %s' % root))
for domain in domains:
for lang in listdir(root):
folder = path.join(root, lang)
translations = path.join(folder, 'LC_MESSAGES', (domain + '.po'))
if path.isfile(translations):
mo_file = open(translations.replace('.po', '.mo'), 'wb')
print_status(('Compiling %s ' % translations))
f = file(translations)
try:
catalog = read_po(f, locale=lang)
finally:
f.close()
write_mo(mo_file, catalog)
mo_file.close()
print_status('All done.') | Compiling all the templates in specified application. | kay/management/compile_translations.py | do_compile_translations | michilu/kay | 1 | python | def do_compile_translations(target=('t', ), i18n_dir=('i', ), all=('a', False)):
'\n \n '
if ((not target) and (not all)):
print_status('Please specify target.')
sys.exit(1)
elif (target == 'kay'):
print_status('Compiling builtin languages')
root = path.join(kay.KAY_DIR, 'i18n')
elif all:
targets = get_user_apps()
for target in targets:
do_compile_translations(target=target, i18n_dir=None, all=False)
do_compile_translations(target=kay.PROJECT_DIR, i18n_dir=None, all=False)
sys.exit(0)
else:
if i18n_dir:
root = i18n_dir
else:
root = path.join(target, 'i18n')
if (not path.isdir(root)):
print('i18n folder missing')
sys.exit(1)
print_status(('Compiling %s' % root))
for domain in domains:
for lang in listdir(root):
folder = path.join(root, lang)
translations = path.join(folder, 'LC_MESSAGES', (domain + '.po'))
if path.isfile(translations):
mo_file = open(translations.replace('.po', '.mo'), 'wb')
print_status(('Compiling %s ' % translations))
f = file(translations)
try:
catalog = read_po(f, locale=lang)
finally:
f.close()
write_mo(mo_file, catalog)
mo_file.close()
print_status('All done.') | def do_compile_translations(target=('t', ), i18n_dir=('i', ), all=('a', False)):
'\n \n '
if ((not target) and (not all)):
print_status('Please specify target.')
sys.exit(1)
elif (target == 'kay'):
print_status('Compiling builtin languages')
root = path.join(kay.KAY_DIR, 'i18n')
elif all:
targets = get_user_apps()
for target in targets:
do_compile_translations(target=target, i18n_dir=None, all=False)
do_compile_translations(target=kay.PROJECT_DIR, i18n_dir=None, all=False)
sys.exit(0)
else:
if i18n_dir:
root = i18n_dir
else:
root = path.join(target, 'i18n')
if (not path.isdir(root)):
print('i18n folder missing')
sys.exit(1)
print_status(('Compiling %s' % root))
for domain in domains:
for lang in listdir(root):
folder = path.join(root, lang)
translations = path.join(folder, 'LC_MESSAGES', (domain + '.po'))
if path.isfile(translations):
mo_file = open(translations.replace('.po', '.mo'), 'wb')
print_status(('Compiling %s ' % translations))
f = file(translations)
try:
catalog = read_po(f, locale=lang)
finally:
f.close()
write_mo(mo_file, catalog)
mo_file.close()
print_status('All done.')<|docstring|>Compiling all the templates in specified application.<|endoftext|> |
3a7e0d350454aa9a4471247b04f353752a4ffdf78bdf5de466dfe7260e90f974 | def test_create_no_args(self):
' should fail with no args '
r = support.create_project(self, '', '', confirm=False)
self.assertTrue(r.failed, 'should have failed') | should fail with no args | cauldron/test/cli/commands/test_create.py | test_create_no_args | selasley/cauldron | 0 | python | def test_create_no_args(self):
' '
r = support.create_project(self, , , confirm=False)
self.assertTrue(r.failed, 'should have failed') | def test_create_no_args(self):
' '
r = support.create_project(self, , , confirm=False)
self.assertTrue(r.failed, 'should have failed')<|docstring|>should fail with no args<|endoftext|> |
3c828abd899141e5cd58d8846257c54418c8109a831ebb9d99b333d9c0b35350 | def test_autocomplete(self):
'\n\n :return:\n '
alias = 'ex'
path = environ.paths.resources('examples')
support.run_command('alias add "{}" "{}" --temporary'.format(alias, path))
result = support.autocomplete('create my_project @home:')
self.assertIsNotNone(result, support.Message('autocomplete result should not be None', result=result))
items = [(e, os.path.join(path, e)) for e in os.listdir(path)]
items = [e for e in items if os.path.isdir(e[1])]
result = support.autocomplete('create my_project @ex:')
self.assertEqual(len(result), len(items), support.Message('should autocomplete from the examples folder', result=result, items=items))
hellos = [e for e in items if e[0].startswith('hell')]
result = support.autocomplete('create my_project @ex:hell')
self.assertEqual(len(result), len(hellos), support.Message('should autocomplete examples that start with "hell"', result=result, items=items)) | :return: | cauldron/test/cli/commands/test_create.py | test_autocomplete | selasley/cauldron | 0 | python | def test_autocomplete(self):
'\n\n \n '
alias = 'ex'
path = environ.paths.resources('examples')
support.run_command('alias add "{}" "{}" --temporary'.format(alias, path))
result = support.autocomplete('create my_project @home:')
self.assertIsNotNone(result, support.Message('autocomplete result should not be None', result=result))
items = [(e, os.path.join(path, e)) for e in os.listdir(path)]
items = [e for e in items if os.path.isdir(e[1])]
result = support.autocomplete('create my_project @ex:')
self.assertEqual(len(result), len(items), support.Message('should autocomplete from the examples folder', result=result, items=items))
hellos = [e for e in items if e[0].startswith('hell')]
result = support.autocomplete('create my_project @ex:hell')
self.assertEqual(len(result), len(hellos), support.Message('should autocomplete examples that start with "hell"', result=result, items=items)) | def test_autocomplete(self):
'\n\n \n '
alias = 'ex'
path = environ.paths.resources('examples')
support.run_command('alias add "{}" "{}" --temporary'.format(alias, path))
result = support.autocomplete('create my_project @home:')
self.assertIsNotNone(result, support.Message('autocomplete result should not be None', result=result))
items = [(e, os.path.join(path, e)) for e in os.listdir(path)]
items = [e for e in items if os.path.isdir(e[1])]
result = support.autocomplete('create my_project @ex:')
self.assertEqual(len(result), len(items), support.Message('should autocomplete from the examples folder', result=result, items=items))
hellos = [e for e in items if e[0].startswith('hell')]
result = support.autocomplete('create my_project @ex:hell')
self.assertEqual(len(result), len(hellos), support.Message('should autocomplete examples that start with "hell"', result=result, items=items))<|docstring|>:return:<|endoftext|> |
b2c4b8ac5827ffff5186a813754cc595ca1068a65a8c43cd02707deb89296a8e | def test_folders(self):
' should create libs and assets folders in project '
libs_folder = 'libs_folder'
assets_folder = 'assets_folder'
result = support.create_project(self, 'marcus', libs=libs_folder, assets=assets_folder)
self.assertFalse(result.failed)
project = cd.project.get_internal_project()
items = os.listdir(project.source_directory)
self.assertIn(libs_folder, items)
self.assertIn(assets_folder, items)
self.assertIn(libs_folder, project.settings.fetch('library_folders'))
self.assertIn(assets_folder, project.settings.fetch('asset_folders'))
with open(project.source_path, 'r') as f:
data = json.load(f)
self.assertEqual(libs_folder, data['library_folders'][0])
self.assertEqual(assets_folder, data['asset_folders'][0]) | should create libs and assets folders in project | cauldron/test/cli/commands/test_create.py | test_folders | selasley/cauldron | 0 | python | def test_folders(self):
' '
libs_folder = 'libs_folder'
assets_folder = 'assets_folder'
result = support.create_project(self, 'marcus', libs=libs_folder, assets=assets_folder)
self.assertFalse(result.failed)
project = cd.project.get_internal_project()
items = os.listdir(project.source_directory)
self.assertIn(libs_folder, items)
self.assertIn(assets_folder, items)
self.assertIn(libs_folder, project.settings.fetch('library_folders'))
self.assertIn(assets_folder, project.settings.fetch('asset_folders'))
with open(project.source_path, 'r') as f:
data = json.load(f)
self.assertEqual(libs_folder, data['library_folders'][0])
self.assertEqual(assets_folder, data['asset_folders'][0]) | def test_folders(self):
' '
libs_folder = 'libs_folder'
assets_folder = 'assets_folder'
result = support.create_project(self, 'marcus', libs=libs_folder, assets=assets_folder)
self.assertFalse(result.failed)
project = cd.project.get_internal_project()
items = os.listdir(project.source_directory)
self.assertIn(libs_folder, items)
self.assertIn(assets_folder, items)
self.assertIn(libs_folder, project.settings.fetch('library_folders'))
self.assertIn(assets_folder, project.settings.fetch('asset_folders'))
with open(project.source_path, 'r') as f:
data = json.load(f)
self.assertEqual(libs_folder, data['library_folders'][0])
self.assertEqual(assets_folder, data['asset_folders'][0])<|docstring|>should create libs and assets folders in project<|endoftext|> |
304b2a363c4e9c3879ba32a369d6395f942245490b03b55a64937c7af4de8f29 | def test_write_fail(self):
' should fail if directory cannot be written '
target = 'cauldron.cli.commands.create.actions.write_project_data'
with patch(target) as func:
func.return_value = False
result = support.create_project(self, 'aurelius', confirm=False)
self.assertTrue(result.failed) | should fail if directory cannot be written | cauldron/test/cli/commands/test_create.py | test_write_fail | selasley/cauldron | 0 | python | def test_write_fail(self):
' '
target = 'cauldron.cli.commands.create.actions.write_project_data'
with patch(target) as func:
func.return_value = False
result = support.create_project(self, 'aurelius', confirm=False)
self.assertTrue(result.failed) | def test_write_fail(self):
' '
target = 'cauldron.cli.commands.create.actions.write_project_data'
with patch(target) as func:
func.return_value = False
result = support.create_project(self, 'aurelius', confirm=False)
self.assertTrue(result.failed)<|docstring|>should fail if directory cannot be written<|endoftext|> |
4ced09f0e7a7b098867424f9c8629284cdbb39da086e1ae316dab3c6aadee524 | def test_create_fail(self):
' should fail if directory cannot be created '
target = '.'.join(['cauldron.cli.commands.create', 'actions.create_project_directories'])
with patch(target) as func:
func.return_value = False
result = support.create_project(self, 'augustus', confirm=False)
self.assertTrue(result.failed) | should fail if directory cannot be created | cauldron/test/cli/commands/test_create.py | test_create_fail | selasley/cauldron | 0 | python | def test_create_fail(self):
' '
target = '.'.join(['cauldron.cli.commands.create', 'actions.create_project_directories'])
with patch(target) as func:
func.return_value = False
result = support.create_project(self, 'augustus', confirm=False)
self.assertTrue(result.failed) | def test_create_fail(self):
' '
target = '.'.join(['cauldron.cli.commands.create', 'actions.create_project_directories'])
with patch(target) as func:
func.return_value = False
result = support.create_project(self, 'augustus', confirm=False)
self.assertTrue(result.failed)<|docstring|>should fail if directory cannot be created<|endoftext|> |
fb0ac994c7ef9326a335d945ec3812ac2f8417fb8dae6669dd0773155cf10c8d | def test_autocomplete_absolute_path(self):
' should properly autocomplete an alias '
directory = os.path.dirname(os.path.realpath(__file__))
result = support.autocomplete('create fake "{}"'.format(directory))
self.assertIsNotNone(result) | should properly autocomplete an alias | cauldron/test/cli/commands/test_create.py | test_autocomplete_absolute_path | selasley/cauldron | 0 | python | def test_autocomplete_absolute_path(self):
' '
directory = os.path.dirname(os.path.realpath(__file__))
result = support.autocomplete('create fake "{}"'.format(directory))
self.assertIsNotNone(result) | def test_autocomplete_absolute_path(self):
' '
directory = os.path.dirname(os.path.realpath(__file__))
result = support.autocomplete('create fake "{}"'.format(directory))
self.assertIsNotNone(result)<|docstring|>should properly autocomplete an alias<|endoftext|> |
ed6bfa13afdc7fac2d6bc4447ba313f211f09ceeb4fbed8c8baadbab636ec130 | def test_autocomplete_empty(self):
' should properly autocomplete an alias '
result = support.autocomplete('create')
self.assertEqual(len(result), 0) | should properly autocomplete an alias | cauldron/test/cli/commands/test_create.py | test_autocomplete_empty | selasley/cauldron | 0 | python | def test_autocomplete_empty(self):
' '
result = support.autocomplete('create')
self.assertEqual(len(result), 0) | def test_autocomplete_empty(self):
' '
result = support.autocomplete('create')
self.assertEqual(len(result), 0)<|docstring|>should properly autocomplete an alias<|endoftext|> |
9385dd3c76f72cc2116382245de3024651eb37c09725199f1ea27c96c082e992 | def test_incomplete_alias(self):
' should properly autocomplete an incomplete alias '
result = support.autocomplete('create fake @ho')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], 'home:') | should properly autocomplete an incomplete alias | cauldron/test/cli/commands/test_create.py | test_incomplete_alias | selasley/cauldron | 0 | python | def test_incomplete_alias(self):
' '
result = support.autocomplete('create fake @ho')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], 'home:') | def test_incomplete_alias(self):
' '
result = support.autocomplete('create fake @ho')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], 'home:')<|docstring|>should properly autocomplete an incomplete alias<|endoftext|> |
fdd6cd1053b4e42cc14708fe6fe9c032f662fa6fad01cc7ac8486f8ac4eb9928 | def test_create_project_directory(self):
' should abort if directory already exists '
path = self.get_temp_path('test-create', 'project-directory-1')
os.makedirs(path)
response = create_actions.create_project_directories('some-name', path)
self.assertTrue(response.success) | should abort if directory already exists | cauldron/test/cli/commands/test_create.py | test_create_project_directory | selasley/cauldron | 0 | python | def test_create_project_directory(self):
' '
path = self.get_temp_path('test-create', 'project-directory-1')
os.makedirs(path)
response = create_actions.create_project_directories('some-name', path)
self.assertTrue(response.success) | def test_create_project_directory(self):
' '
path = self.get_temp_path('test-create', 'project-directory-1')
os.makedirs(path)
response = create_actions.create_project_directories('some-name', path)
self.assertTrue(response.success)<|docstring|>should abort if directory already exists<|endoftext|> |
1f2ca8c48f6cf7b50239fa2d9efade91a2b55271311cd88de01b893a3ccda995 | def test_create_project_directory_fail(self):
' should fail if directory cannot be created '
path = self.get_temp_path('test-create', 'project-directory-2')
with patch('os.makedirs') as make_dirs:
make_dirs.side_effect = IOError('Fake Error')
response = create_actions.create_project_directories('some-name', path)
self.assertFalse(response.success) | should fail if directory cannot be created | cauldron/test/cli/commands/test_create.py | test_create_project_directory_fail | selasley/cauldron | 0 | python | def test_create_project_directory_fail(self):
' '
path = self.get_temp_path('test-create', 'project-directory-2')
with patch('os.makedirs') as make_dirs:
make_dirs.side_effect = IOError('Fake Error')
response = create_actions.create_project_directories('some-name', path)
self.assertFalse(response.success) | def test_create_project_directory_fail(self):
' '
path = self.get_temp_path('test-create', 'project-directory-2')
with patch('os.makedirs') as make_dirs:
make_dirs.side_effect = IOError('Fake Error')
response = create_actions.create_project_directories('some-name', path)
self.assertFalse(response.success)<|docstring|>should fail if directory cannot be created<|endoftext|> |
8f715d3d94494c1e6a7096b7864acc5600c7731a9359380741831ee76901c720 | @patch('cauldron.cli.commands.open.remote.sync_open')
def test_remote(self, sync_open: MagicMock):
' should successfully open project remotely '
sync_open.return_value = environ.Response()
response = support.create_project(self, 'tester', confirm=False, remote_connection=environ.RemoteConnection(True, 'something.url'))
self.assertTrue(response.success)
self.assertGreater(sync_open.call_count, 0) | should successfully open project remotely | cauldron/test/cli/commands/test_create.py | test_remote | selasley/cauldron | 0 | python | @patch('cauldron.cli.commands.open.remote.sync_open')
def test_remote(self, sync_open: MagicMock):
' '
sync_open.return_value = environ.Response()
response = support.create_project(self, 'tester', confirm=False, remote_connection=environ.RemoteConnection(True, 'something.url'))
self.assertTrue(response.success)
self.assertGreater(sync_open.call_count, 0) | @patch('cauldron.cli.commands.open.remote.sync_open')
def test_remote(self, sync_open: MagicMock):
' '
sync_open.return_value = environ.Response()
response = support.create_project(self, 'tester', confirm=False, remote_connection=environ.RemoteConnection(True, 'something.url'))
self.assertTrue(response.success)
self.assertGreater(sync_open.call_count, 0)<|docstring|>should successfully open project remotely<|endoftext|> |
67e52eead445bc4cd609e3f172dd942a3ea33b56b338f090195dd19b957a2591 | def test_write_project_data_failure(self):
' should fail when unable to write definition file '
with patch('builtins.open') as func:
func.side_effect = IOError('FAKE ERROR')
response = create_actions.write_project_data('abc', {})
self.assert_has_error_code(response, 'PROJECT_CREATE_FAILED') | should fail when unable to write definition file | cauldron/test/cli/commands/test_create.py | test_write_project_data_failure | selasley/cauldron | 0 | python | def test_write_project_data_failure(self):
' '
with patch('builtins.open') as func:
func.side_effect = IOError('FAKE ERROR')
response = create_actions.write_project_data('abc', {})
self.assert_has_error_code(response, 'PROJECT_CREATE_FAILED') | def test_write_project_data_failure(self):
' '
with patch('builtins.open') as func:
func.side_effect = IOError('FAKE ERROR')
response = create_actions.write_project_data('abc', {})
self.assert_has_error_code(response, 'PROJECT_CREATE_FAILED')<|docstring|>should fail when unable to write definition file<|endoftext|> |
645fcf19bb35501b5f5b1d29af7d36c9bfaf383a12d2f8053187e92880ec5ddc | @staticmethod
def get_uuid(obj):
' Get UUID of the kubernetes object.'
if obj:
return obj.get('metadata').get('uid')
return None | Get UUID of the kubernetes object. | src/container/kube-manager/kube_manager/common/kube_config_db.py | get_uuid | vganapath/contrail-controller | 37 | python | @staticmethod
def get_uuid(obj):
' '
if obj:
return obj.get('metadata').get('uid')
return None | @staticmethod
def get_uuid(obj):
' '
if obj:
return obj.get('metadata').get('uid')
return None<|docstring|>Get UUID of the kubernetes object.<|endoftext|> |
22ca107c845f31db898ebce1f5d1157c4d5844866862ce852212bbf441a3f8ec | def get_vn_from_annotation(self, annotations):
' Get vn-fq-name if specified in annotations of a k8s object.\n '
vn_ann = annotations.get('opencontrail.org/network', None)
if vn_ann:
return get_vn_fq_name_from_dict_string(vn_ann)
return None | Get vn-fq-name if specified in annotations of a k8s object. | src/container/kube-manager/kube_manager/common/kube_config_db.py | get_vn_from_annotation | vganapath/contrail-controller | 37 | python | def get_vn_from_annotation(self, annotations):
' \n '
vn_ann = annotations.get('opencontrail.org/network', None)
if vn_ann:
return get_vn_fq_name_from_dict_string(vn_ann)
return None | def get_vn_from_annotation(self, annotations):
' \n '
vn_ann = annotations.get('opencontrail.org/network', None)
if vn_ann:
return get_vn_fq_name_from_dict_string(vn_ann)
return None<|docstring|>Get vn-fq-name if specified in annotations of a k8s object.<|endoftext|> |
8381e16857d8b57913becff1c8a9d6658cc844da33b47858005395dc68428fdb | def get_fip_pool_from_annotation(self, annotations):
' Get fip-pool-fq-name if specified in annotations of a k8s object.\n '
fip_pool_ann = annotations.get('opencontrail.org/fip-pool', None)
if fip_pool_ann:
return get_fip_pool_fq_name_from_dict_string(fip_pool_ann)
return None | Get fip-pool-fq-name if specified in annotations of a k8s object. | src/container/kube-manager/kube_manager/common/kube_config_db.py | get_fip_pool_from_annotation | vganapath/contrail-controller | 37 | python | def get_fip_pool_from_annotation(self, annotations):
' \n '
fip_pool_ann = annotations.get('opencontrail.org/fip-pool', None)
if fip_pool_ann:
return get_fip_pool_fq_name_from_dict_string(fip_pool_ann)
return None | def get_fip_pool_from_annotation(self, annotations):
' \n '
fip_pool_ann = annotations.get('opencontrail.org/fip-pool', None)
if fip_pool_ann:
return get_fip_pool_fq_name_from_dict_string(fip_pool_ann)
return None<|docstring|>Get fip-pool-fq-name if specified in annotations of a k8s object.<|endoftext|> |
5010e2998fa5efb6a2a6a28f744b0ee5cb0a148cc64a16b529ab944d714d0566 | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' Reply to Pod DB lookup/introspect request. '
pod_resp = introspect.PodDatabaseListResp(pods=[])
for pod in list(PodKM.values()):
if (req.pod_uuid and (req.pod_uuid != pod.uuid)):
continue
pod_instance = introspect.PodInstance(uuid=pod.uuid, name=pod.name, labels=pod.labels, nodename=pod.nodename, ip=pod.host_ip, phase=pod.phase)
pod_resp.pods.append(pod_instance)
pod_resp.response(req.context()) | Reply to Pod DB lookup/introspect request. | src/container/kube-manager/kube_manager/common/kube_config_db.py | sandesh_handle_db_list_request | vganapath/contrail-controller | 37 | python | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
pod_resp = introspect.PodDatabaseListResp(pods=[])
for pod in list(PodKM.values()):
if (req.pod_uuid and (req.pod_uuid != pod.uuid)):
continue
pod_instance = introspect.PodInstance(uuid=pod.uuid, name=pod.name, labels=pod.labels, nodename=pod.nodename, ip=pod.host_ip, phase=pod.phase)
pod_resp.pods.append(pod_instance)
pod_resp.response(req.context()) | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
pod_resp = introspect.PodDatabaseListResp(pods=[])
for pod in list(PodKM.values()):
if (req.pod_uuid and (req.pod_uuid != pod.uuid)):
continue
pod_instance = introspect.PodInstance(uuid=pod.uuid, name=pod.name, labels=pod.labels, nodename=pod.nodename, ip=pod.host_ip, phase=pod.phase)
pod_resp.pods.append(pod_instance)
pod_resp.response(req.context())<|docstring|>Reply to Pod DB lookup/introspect request.<|endoftext|> |
cc1b76355934ea1bf349fc9d424bd3e9c38704f2cb6f4c1fead0e5c3ffa051ef | def get_vn_fq_name(self):
'Return virtual-network fq-name annotated on this pod.'
return self.pod_vn_fq_name | Return virtual-network fq-name annotated on this pod. | src/container/kube-manager/kube_manager/common/kube_config_db.py | get_vn_fq_name | vganapath/contrail-controller | 37 | python | def get_vn_fq_name(self):
return self.pod_vn_fq_name | def get_vn_fq_name(self):
return self.pod_vn_fq_name<|docstring|>Return virtual-network fq-name annotated on this pod.<|endoftext|> |
3da1115ba605851fa88fd4f62f6d23ad6b768d3969949186b742587fa2b66c8d | @classmethod
def get_namespace_pods(cls, namespace):
' Return a list of pods from a namespace. '
pod_uuids = []
for (pod_uuid, pod) in cls._dict.items():
if (pod.namespace == namespace):
pod_uuids.append(pod_uuid)
return pod_uuids | Return a list of pods from a namespace. | src/container/kube-manager/kube_manager/common/kube_config_db.py | get_namespace_pods | vganapath/contrail-controller | 37 | python | @classmethod
def get_namespace_pods(cls, namespace):
' '
pod_uuids = []
for (pod_uuid, pod) in cls._dict.items():
if (pod.namespace == namespace):
pod_uuids.append(pod_uuid)
return pod_uuids | @classmethod
def get_namespace_pods(cls, namespace):
' '
pod_uuids = []
for (pod_uuid, pod) in cls._dict.items():
if (pod.namespace == namespace):
pod_uuids.append(pod_uuid)
return pod_uuids<|docstring|>Return a list of pods from a namespace.<|endoftext|> |
5d81f3315730a9e7ceacfa075f6b43ac87135fb7c8576c17aaf3dfe48bad9486 | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' Reply to Namespace DB lookup/introspect request. '
ns_resp = introspect.NamespaceDatabaseListResp(namespaces=[])
for ns in list(NamespaceKM.values()):
if (req.namespace_uuid and (req.namespace_uuid != ns.uuid)):
continue
ns_instance = introspect.NamespaceInstance(uuid=ns.uuid, labels=ns.labels, name=ns.name, phase=ns.phase, isolated=ns.isolated)
ns_resp.namespaces.append(ns_instance)
ns_resp.response(req.context()) | Reply to Namespace DB lookup/introspect request. | src/container/kube-manager/kube_manager/common/kube_config_db.py | sandesh_handle_db_list_request | vganapath/contrail-controller | 37 | python | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
ns_resp = introspect.NamespaceDatabaseListResp(namespaces=[])
for ns in list(NamespaceKM.values()):
if (req.namespace_uuid and (req.namespace_uuid != ns.uuid)):
continue
ns_instance = introspect.NamespaceInstance(uuid=ns.uuid, labels=ns.labels, name=ns.name, phase=ns.phase, isolated=ns.isolated)
ns_resp.namespaces.append(ns_instance)
ns_resp.response(req.context()) | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
ns_resp = introspect.NamespaceDatabaseListResp(namespaces=[])
for ns in list(NamespaceKM.values()):
if (req.namespace_uuid and (req.namespace_uuid != ns.uuid)):
continue
ns_instance = introspect.NamespaceInstance(uuid=ns.uuid, labels=ns.labels, name=ns.name, phase=ns.phase, isolated=ns.isolated)
ns_resp.namespaces.append(ns_instance)
ns_resp.response(req.context())<|docstring|>Reply to Namespace DB lookup/introspect request.<|endoftext|> |
4baed68d4c59038d63a4577cd33b9b91aee741bbfb7bad4dedc9aeb869d7d7f4 | def update_labels(self, labels):
'\n Update labels.\n If this update removes or add new labels to a previous list\n cache the diff for futher processing.\n '
self.added_labels = {}
self.removed_labels = {}
new_labels = (labels if labels else {})
for (k, v) in new_labels.items():
if ((k not in self.labels) or (v != self.labels[k])):
self.added_labels[k] = v
for (k, v) in self.labels.items():
if ((k not in new_labels) or (v != new_labels[k])):
self.removed_labels[k] = v
self.labels = new_labels | Update labels.
If this update removes or add new labels to a previous list
cache the diff for futher processing. | src/container/kube-manager/kube_manager/common/kube_config_db.py | update_labels | vganapath/contrail-controller | 37 | python | def update_labels(self, labels):
'\n Update labels.\n If this update removes or add new labels to a previous list\n cache the diff for futher processing.\n '
self.added_labels = {}
self.removed_labels = {}
new_labels = (labels if labels else {})
for (k, v) in new_labels.items():
if ((k not in self.labels) or (v != self.labels[k])):
self.added_labels[k] = v
for (k, v) in self.labels.items():
if ((k not in new_labels) or (v != new_labels[k])):
self.removed_labels[k] = v
self.labels = new_labels | def update_labels(self, labels):
'\n Update labels.\n If this update removes or add new labels to a previous list\n cache the diff for futher processing.\n '
self.added_labels = {}
self.removed_labels = {}
new_labels = (labels if labels else {})
for (k, v) in new_labels.items():
if ((k not in self.labels) or (v != self.labels[k])):
self.added_labels[k] = v
for (k, v) in self.labels.items():
if ((k not in new_labels) or (v != new_labels[k])):
self.removed_labels[k] = v
self.labels = new_labels<|docstring|>Update labels.
If this update removes or add new labels to a previous list
cache the diff for futher processing.<|endoftext|> |
18ef683318668211d9eaca6ba99e90bd4dc80a4d0cb157dfd51cf3977d018d54 | def get_changed_labels(self):
' Return labels changed by the last update. '
return (self.added_labels, self.removed_labels) | Return labels changed by the last update. | src/container/kube-manager/kube_manager/common/kube_config_db.py | get_changed_labels | vganapath/contrail-controller | 37 | python | def get_changed_labels(self):
' '
return (self.added_labels, self.removed_labels) | def get_changed_labels(self):
' '
return (self.added_labels, self.removed_labels)<|docstring|>Return labels changed by the last update.<|endoftext|> |
c9fd07e04bfb6d50dde76b4f23409536f571c0c62fbf28bc1f5a23f90f271dcc | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' Reply to Service DB lookup/introspect request. '
svc_resp = introspect.ServiceDatabaseListResp(services=[])
for svc in list(ServiceKM.values()):
if (req.service_uuid and (req.service_uuid != svc.uuid)):
continue
svc_instance = introspect.ServiceInstance(uuid=svc.uuid, name=svc.name, name_space=svc.namespace, labels=svc.labels, cluster_ip=svc.cluster_ip, service_type=svc.service_type)
svc_resp.services.append(svc_instance)
svc_resp.response(req.context()) | Reply to Service DB lookup/introspect request. | src/container/kube-manager/kube_manager/common/kube_config_db.py | sandesh_handle_db_list_request | vganapath/contrail-controller | 37 | python | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
svc_resp = introspect.ServiceDatabaseListResp(services=[])
for svc in list(ServiceKM.values()):
if (req.service_uuid and (req.service_uuid != svc.uuid)):
continue
svc_instance = introspect.ServiceInstance(uuid=svc.uuid, name=svc.name, name_space=svc.namespace, labels=svc.labels, cluster_ip=svc.cluster_ip, service_type=svc.service_type)
svc_resp.services.append(svc_instance)
svc_resp.response(req.context()) | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
svc_resp = introspect.ServiceDatabaseListResp(services=[])
for svc in list(ServiceKM.values()):
if (req.service_uuid and (req.service_uuid != svc.uuid)):
continue
svc_instance = introspect.ServiceInstance(uuid=svc.uuid, name=svc.name, name_space=svc.namespace, labels=svc.labels, cluster_ip=svc.cluster_ip, service_type=svc.service_type)
svc_resp.services.append(svc_instance)
svc_resp.response(req.context())<|docstring|>Reply to Service DB lookup/introspect request.<|endoftext|> |
308d8a113017088c4178c7ab992d24b7edd8f152907a88fb773bf470e49f0bce | def remove_entry(self):
'\n Handler for pre-delete processing of network policy delete events.\n '
if (self.uuid in self.create_sequence):
self.create_sequence.remove(self.uuid) | Handler for pre-delete processing of network policy delete events. | src/container/kube-manager/kube_manager/common/kube_config_db.py | remove_entry | vganapath/contrail-controller | 37 | python | def remove_entry(self):
'\n \n '
if (self.uuid in self.create_sequence):
self.create_sequence.remove(self.uuid) | def remove_entry(self):
'\n \n '
if (self.uuid in self.create_sequence):
self.create_sequence.remove(self.uuid)<|docstring|>Handler for pre-delete processing of network policy delete events.<|endoftext|> |
5f84ff8f54695b8337f60ca56ec06bf21d0b5cea2007e8141ac4c8ece749ddc6 | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' Reply to Network Policy DB lookup/introspect request. '
np_resp = introspect.NetworkPolicyDatabaseListResp(network_policies=[])
for np in list(NetworkPolicyKM.values()):
if (req.network_policy_uuid and (req.network_policy_uuid != np.uuid)):
continue
np_ingress_list = []
if np.spec.get('ingress'):
for ingress in np.spec.get('ingress'):
from_list = []
if ingress.get('from'):
for each_from in ingress.get('from'):
np_ns_selector = None
if each_from.get('namespaceSelector'):
np_ns_selector = np.get_namespace_selector(each_from.get('namespaceSelector'))
np_pod_selector = None
if each_from.get('podSelector'):
np_pod_selector = np.get_pod_selector(each_from.get('podSelector'))
np_ip_block = None
if each_from.get('ipBlock'):
ipblock = each_from.get('ipBlock')
cidr = ipblock.get('cidr', None)
except_cidr_list = []
for except_cidr in ipblock.get('except', []):
except_cidr_list.append(except_cidr)
np_ip_block = introspect.NetworkPolicyIpBlock(cidr=cidr, except_cidr=except_cidr_list)
from_list.append(introspect.NetworkPolicyFromRules(podSelector=np_pod_selector, namespaceSelector=np_ns_selector, ipBlock=np_ip_block))
np_port_list = []
if ingress.get('ports'):
for port in ingress.get('ports'):
np_port = introspect.NetworkPolicyPort(port=port.get('port').__str__(), protocol=port.get('protocol'))
np_port_list.append(np_port)
np_ingress_list.append(introspect.NetworkPolicyIngressPolicy(fromPolicy=from_list, ports=np_port_list))
np_egress_list = []
if np.spec.get('egress'):
for egress in np.spec.get('egress'):
to_list = []
if egress.get('to'):
for each_to in egress.get('to'):
np_ip_block = None
if each_to.get('ipBlock'):
ipblock = each_to.get('ipBlock')
cidr = ipblock.get('cidr', None)
except_cidr_list = []
for except_cidr in ipblock.get('except', []):
except_cidr_list.append(except_cidr)
np_ip_block = introspect.NetworkPolicyIpBlock(cidr=cidr, except_cidr=except_cidr_list)
to_list.append(introspect.NetworkPolicyToRules(ipBlock=np_ip_block))
np_port_list = []
if egress.get('ports'):
for port in egress.get('ports'):
np_port = introspect.NetworkPolicyPort(port=port.get('port').__str__(), protocol=port.get('protocol'))
np_port_list.append(np_port)
np_egress_list.append(introspect.NetworkPolicyEgressPolicy(toPolicy=to_list, ports=np_port_list))
np_pod_selector = None
if np.spec.get('podSelector'):
pod_selector = np.spec.get('podSelector')
np_pod_selector = introspect.NetworkPolicyLabelSelectors(matchLabels=pod_selector.get('matchLabels'))
np_spec = introspect.NetworkPolicySpec(ingress=np_ingress_list, egress=np_egress_list, podSelector=np_pod_selector)
np_instance = introspect.NetworkPolicyInstance(uuid=np.uuid, name=np.name, name_space=np.namespace, vnc_firewall_policy_fqname=np.get_vnc_fq_name().__str__(), spec_string=np.spec.__str__(), spec=np_spec)
np_resp.network_policies.append(np_instance)
np_resp.response(req.context()) | Reply to Network Policy DB lookup/introspect request. | src/container/kube-manager/kube_manager/common/kube_config_db.py | sandesh_handle_db_list_request | vganapath/contrail-controller | 37 | python | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
np_resp = introspect.NetworkPolicyDatabaseListResp(network_policies=[])
for np in list(NetworkPolicyKM.values()):
if (req.network_policy_uuid and (req.network_policy_uuid != np.uuid)):
continue
np_ingress_list = []
if np.spec.get('ingress'):
for ingress in np.spec.get('ingress'):
from_list = []
if ingress.get('from'):
for each_from in ingress.get('from'):
np_ns_selector = None
if each_from.get('namespaceSelector'):
np_ns_selector = np.get_namespace_selector(each_from.get('namespaceSelector'))
np_pod_selector = None
if each_from.get('podSelector'):
np_pod_selector = np.get_pod_selector(each_from.get('podSelector'))
np_ip_block = None
if each_from.get('ipBlock'):
ipblock = each_from.get('ipBlock')
cidr = ipblock.get('cidr', None)
except_cidr_list = []
for except_cidr in ipblock.get('except', []):
except_cidr_list.append(except_cidr)
np_ip_block = introspect.NetworkPolicyIpBlock(cidr=cidr, except_cidr=except_cidr_list)
from_list.append(introspect.NetworkPolicyFromRules(podSelector=np_pod_selector, namespaceSelector=np_ns_selector, ipBlock=np_ip_block))
np_port_list = []
if ingress.get('ports'):
for port in ingress.get('ports'):
np_port = introspect.NetworkPolicyPort(port=port.get('port').__str__(), protocol=port.get('protocol'))
np_port_list.append(np_port)
np_ingress_list.append(introspect.NetworkPolicyIngressPolicy(fromPolicy=from_list, ports=np_port_list))
np_egress_list = []
if np.spec.get('egress'):
for egress in np.spec.get('egress'):
to_list = []
if egress.get('to'):
for each_to in egress.get('to'):
np_ip_block = None
if each_to.get('ipBlock'):
ipblock = each_to.get('ipBlock')
cidr = ipblock.get('cidr', None)
except_cidr_list = []
for except_cidr in ipblock.get('except', []):
except_cidr_list.append(except_cidr)
np_ip_block = introspect.NetworkPolicyIpBlock(cidr=cidr, except_cidr=except_cidr_list)
to_list.append(introspect.NetworkPolicyToRules(ipBlock=np_ip_block))
np_port_list = []
if egress.get('ports'):
for port in egress.get('ports'):
np_port = introspect.NetworkPolicyPort(port=port.get('port').__str__(), protocol=port.get('protocol'))
np_port_list.append(np_port)
np_egress_list.append(introspect.NetworkPolicyEgressPolicy(toPolicy=to_list, ports=np_port_list))
np_pod_selector = None
if np.spec.get('podSelector'):
pod_selector = np.spec.get('podSelector')
np_pod_selector = introspect.NetworkPolicyLabelSelectors(matchLabels=pod_selector.get('matchLabels'))
np_spec = introspect.NetworkPolicySpec(ingress=np_ingress_list, egress=np_egress_list, podSelector=np_pod_selector)
np_instance = introspect.NetworkPolicyInstance(uuid=np.uuid, name=np.name, name_space=np.namespace, vnc_firewall_policy_fqname=np.get_vnc_fq_name().__str__(), spec_string=np.spec.__str__(), spec=np_spec)
np_resp.network_policies.append(np_instance)
np_resp.response(req.context()) | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
np_resp = introspect.NetworkPolicyDatabaseListResp(network_policies=[])
for np in list(NetworkPolicyKM.values()):
if (req.network_policy_uuid and (req.network_policy_uuid != np.uuid)):
continue
np_ingress_list = []
if np.spec.get('ingress'):
for ingress in np.spec.get('ingress'):
from_list = []
if ingress.get('from'):
for each_from in ingress.get('from'):
np_ns_selector = None
if each_from.get('namespaceSelector'):
np_ns_selector = np.get_namespace_selector(each_from.get('namespaceSelector'))
np_pod_selector = None
if each_from.get('podSelector'):
np_pod_selector = np.get_pod_selector(each_from.get('podSelector'))
np_ip_block = None
if each_from.get('ipBlock'):
ipblock = each_from.get('ipBlock')
cidr = ipblock.get('cidr', None)
except_cidr_list = []
for except_cidr in ipblock.get('except', []):
except_cidr_list.append(except_cidr)
np_ip_block = introspect.NetworkPolicyIpBlock(cidr=cidr, except_cidr=except_cidr_list)
from_list.append(introspect.NetworkPolicyFromRules(podSelector=np_pod_selector, namespaceSelector=np_ns_selector, ipBlock=np_ip_block))
np_port_list = []
if ingress.get('ports'):
for port in ingress.get('ports'):
np_port = introspect.NetworkPolicyPort(port=port.get('port').__str__(), protocol=port.get('protocol'))
np_port_list.append(np_port)
np_ingress_list.append(introspect.NetworkPolicyIngressPolicy(fromPolicy=from_list, ports=np_port_list))
np_egress_list = []
if np.spec.get('egress'):
for egress in np.spec.get('egress'):
to_list = []
if egress.get('to'):
for each_to in egress.get('to'):
np_ip_block = None
if each_to.get('ipBlock'):
ipblock = each_to.get('ipBlock')
cidr = ipblock.get('cidr', None)
except_cidr_list = []
for except_cidr in ipblock.get('except', []):
except_cidr_list.append(except_cidr)
np_ip_block = introspect.NetworkPolicyIpBlock(cidr=cidr, except_cidr=except_cidr_list)
to_list.append(introspect.NetworkPolicyToRules(ipBlock=np_ip_block))
np_port_list = []
if egress.get('ports'):
for port in egress.get('ports'):
np_port = introspect.NetworkPolicyPort(port=port.get('port').__str__(), protocol=port.get('protocol'))
np_port_list.append(np_port)
np_egress_list.append(introspect.NetworkPolicyEgressPolicy(toPolicy=to_list, ports=np_port_list))
np_pod_selector = None
if np.spec.get('podSelector'):
pod_selector = np.spec.get('podSelector')
np_pod_selector = introspect.NetworkPolicyLabelSelectors(matchLabels=pod_selector.get('matchLabels'))
np_spec = introspect.NetworkPolicySpec(ingress=np_ingress_list, egress=np_egress_list, podSelector=np_pod_selector)
np_instance = introspect.NetworkPolicyInstance(uuid=np.uuid, name=np.name, name_space=np.namespace, vnc_firewall_policy_fqname=np.get_vnc_fq_name().__str__(), spec_string=np.spec.__str__(), spec=np_spec)
np_resp.network_policies.append(np_instance)
np_resp.response(req.context())<|docstring|>Reply to Network Policy DB lookup/introspect request.<|endoftext|> |
373bbf900852be5ff53bf847b1afe744d3484e1497781974eda5019e95feeb00 | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' Reply to Ingress DB lookup/introspect request. '
ingress_resp = introspect.IngressDatabaseListResp(ingress=[])
for ingress in list(IngressKM.values()):
if (req.ingress_uuid and (req.ingress_uuid != ingress.uuid)):
continue
def_backend = introspect.IngressBackend(name=ingress.default_backend.get('serviceName'), port=str(ingress.default_backend.get('servicePort')))
rules = []
for rule in ingress.rules:
ingress_rule = introspect.IngressRule(spec=[])
for (key, value) in rule.items():
if (key == 'host'):
ingress_rule.host = value
else:
proto_spec = introspect.IngressProtoSpec(paths=[])
proto_spec.proto = key
for path in value.get('paths', []):
backend = path.get('backend')
proto_backend = None
if backend:
proto_backend = introspect.IngressBackend(name=backend.get('serviceName'), port=str(backend.get('servicePort')))
proto_path = introspect.IngressRuleProtoPath(backend=proto_backend, path=path.get('path'))
proto_spec.paths.append(proto_path)
ingress_rule.spec.append(proto_spec)
rules.append(ingress_rule)
ingress_instance = introspect.IngressInstance(uuid=ingress.uuid, name=ingress.name, labels=ingress.labels, name_space=ingress.namespace, rules=rules, default_backend=def_backend)
ingress_resp.ingress.append(ingress_instance)
ingress_resp.response(req.context()) | Reply to Ingress DB lookup/introspect request. | src/container/kube-manager/kube_manager/common/kube_config_db.py | sandesh_handle_db_list_request | vganapath/contrail-controller | 37 | python | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
ingress_resp = introspect.IngressDatabaseListResp(ingress=[])
for ingress in list(IngressKM.values()):
if (req.ingress_uuid and (req.ingress_uuid != ingress.uuid)):
continue
def_backend = introspect.IngressBackend(name=ingress.default_backend.get('serviceName'), port=str(ingress.default_backend.get('servicePort')))
rules = []
for rule in ingress.rules:
ingress_rule = introspect.IngressRule(spec=[])
for (key, value) in rule.items():
if (key == 'host'):
ingress_rule.host = value
else:
proto_spec = introspect.IngressProtoSpec(paths=[])
proto_spec.proto = key
for path in value.get('paths', []):
backend = path.get('backend')
proto_backend = None
if backend:
proto_backend = introspect.IngressBackend(name=backend.get('serviceName'), port=str(backend.get('servicePort')))
proto_path = introspect.IngressRuleProtoPath(backend=proto_backend, path=path.get('path'))
proto_spec.paths.append(proto_path)
ingress_rule.spec.append(proto_spec)
rules.append(ingress_rule)
ingress_instance = introspect.IngressInstance(uuid=ingress.uuid, name=ingress.name, labels=ingress.labels, name_space=ingress.namespace, rules=rules, default_backend=def_backend)
ingress_resp.ingress.append(ingress_instance)
ingress_resp.response(req.context()) | @staticmethod
def sandesh_handle_db_list_request(cls, req):
' '
ingress_resp = introspect.IngressDatabaseListResp(ingress=[])
for ingress in list(IngressKM.values()):
if (req.ingress_uuid and (req.ingress_uuid != ingress.uuid)):
continue
def_backend = introspect.IngressBackend(name=ingress.default_backend.get('serviceName'), port=str(ingress.default_backend.get('servicePort')))
rules = []
for rule in ingress.rules:
ingress_rule = introspect.IngressRule(spec=[])
for (key, value) in rule.items():
if (key == 'host'):
ingress_rule.host = value
else:
proto_spec = introspect.IngressProtoSpec(paths=[])
proto_spec.proto = key
for path in value.get('paths', []):
backend = path.get('backend')
proto_backend = None
if backend:
proto_backend = introspect.IngressBackend(name=backend.get('serviceName'), port=str(backend.get('servicePort')))
proto_path = introspect.IngressRuleProtoPath(backend=proto_backend, path=path.get('path'))
proto_spec.paths.append(proto_path)
ingress_rule.spec.append(proto_spec)
rules.append(ingress_rule)
ingress_instance = introspect.IngressInstance(uuid=ingress.uuid, name=ingress.name, labels=ingress.labels, name_space=ingress.namespace, rules=rules, default_backend=def_backend)
ingress_resp.ingress.append(ingress_instance)
ingress_resp.response(req.context())<|docstring|>Reply to Ingress DB lookup/introspect request.<|endoftext|> |
ef8694e49430c433871480b88ea62135b3dc75c8d5ba32bbd116fe806c85dcc2 | def _draw(self):
' Draws the sub-elements. '
self._classPicker = ClassPicker(self)
self._namePicker = NamePicker(self) | Draws the sub-elements. | objects/classSelectionMenu/ClassSelectionMenu.py | _draw | jpyankel/Teapot-Wars-2 | 1 | python | def _draw(self):
' '
self._classPicker = ClassPicker(self)
self._namePicker = NamePicker(self) | def _draw(self):
' '
self._classPicker = ClassPicker(self)
self._namePicker = NamePicker(self)<|docstring|>Draws the sub-elements.<|endoftext|> |
b3536585ce222c1d29f892a347440caf19e732a8dacd0feb84b6bea9fdefb480 | def createCharacter(self):
'\n Creates a new localPlayer and spawns it with the given name and\n chosen class.\n If there is no name or class, this will not spawn a player.\n '
self.syncInfo(cName=self._namePicker.getName(), cClass=self._classPicker.getSelected())
if (self._playerInfo.cClass == None):
return
if ((self._playerInfo.cName == None) or (self._playerInfo.cName == NPKR_ENTRY_INITIAL_TEXT)):
return
self._gameManager.createPlayer(self._playerInfo) | Creates a new localPlayer and spawns it with the given name and
chosen class.
If there is no name or class, this will not spawn a player. | objects/classSelectionMenu/ClassSelectionMenu.py | createCharacter | jpyankel/Teapot-Wars-2 | 1 | python | def createCharacter(self):
'\n Creates a new localPlayer and spawns it with the given name and\n chosen class.\n If there is no name or class, this will not spawn a player.\n '
self.syncInfo(cName=self._namePicker.getName(), cClass=self._classPicker.getSelected())
if (self._playerInfo.cClass == None):
return
if ((self._playerInfo.cName == None) or (self._playerInfo.cName == NPKR_ENTRY_INITIAL_TEXT)):
return
self._gameManager.createPlayer(self._playerInfo) | def createCharacter(self):
'\n Creates a new localPlayer and spawns it with the given name and\n chosen class.\n If there is no name or class, this will not spawn a player.\n '
self.syncInfo(cName=self._namePicker.getName(), cClass=self._classPicker.getSelected())
if (self._playerInfo.cClass == None):
return
if ((self._playerInfo.cName == None) or (self._playerInfo.cName == NPKR_ENTRY_INITIAL_TEXT)):
return
self._gameManager.createPlayer(self._playerInfo)<|docstring|>Creates a new localPlayer and spawns it with the given name and
chosen class.
If there is no name or class, this will not spawn a player.<|endoftext|> |
257988f830c715eb1ad9f269d48066cdbab9e7397c389af64fef281b13ff6b75 | def syncInfo(self, cName=None, cClass=None, cColor=None):
' Syncs info with the server/client manager '
if (cName != None):
self._playerInfo.cName = cName
if (cClass != None):
self._playerInfo.cClass = cClass
if (cColor != None):
self._playerInfo.cColor = cColor
self._gameManager.updateLocalInfoAndSync(self._playerInfo) | Syncs info with the server/client manager | objects/classSelectionMenu/ClassSelectionMenu.py | syncInfo | jpyankel/Teapot-Wars-2 | 1 | python | def syncInfo(self, cName=None, cClass=None, cColor=None):
' '
if (cName != None):
self._playerInfo.cName = cName
if (cClass != None):
self._playerInfo.cClass = cClass
if (cColor != None):
self._playerInfo.cColor = cColor
self._gameManager.updateLocalInfoAndSync(self._playerInfo) | def syncInfo(self, cName=None, cClass=None, cColor=None):
' '
if (cName != None):
self._playerInfo.cName = cName
if (cClass != None):
self._playerInfo.cClass = cClass
if (cColor != None):
self._playerInfo.cColor = cColor
self._gameManager.updateLocalInfoAndSync(self._playerInfo)<|docstring|>Syncs info with the server/client manager<|endoftext|> |
72ac74cc3b35a10d537c1c47061f4807ea2305f12155f30a95823c41de95dcd1 | def close(self):
' Close UI '
self._classPicker.close()
self._namePicker.close()
del self | Close UI | objects/classSelectionMenu/ClassSelectionMenu.py | close | jpyankel/Teapot-Wars-2 | 1 | python | def close(self):
' '
self._classPicker.close()
self._namePicker.close()
del self | def close(self):
' '
self._classPicker.close()
self._namePicker.close()
del self<|docstring|>Close UI<|endoftext|> |
d3095611e73af4e55b941ced4c1d70c7b99d47f2cc0e51c797dbb3bbeca767af | def __init__(self, remotes, projects, builds, selectedBuild):
'\n Constructor\n\n @param remotes See Manifest.remotes\n @param projects See Manifest.projects\n @param builds See Manifest.builds\n @param builds See Manifest.builds\n @param selectedBuild See Manifest.selectedBuild\n '
self._remotes = remotes
self._projects = projects
self._builds = builds
self._selectedBuild = selectedBuild | Constructor
@param remotes See Manifest.remotes
@param projects See Manifest.projects
@param builds See Manifest.builds
@param builds See Manifest.builds
@param selectedBuild See Manifest.selectedBuild | du/drepo/manifest/Manifest.py | __init__ | spiricn/DevUtils | 1 | python | def __init__(self, remotes, projects, builds, selectedBuild):
'\n Constructor\n\n @param remotes See Manifest.remotes\n @param projects See Manifest.projects\n @param builds See Manifest.builds\n @param builds See Manifest.builds\n @param selectedBuild See Manifest.selectedBuild\n '
self._remotes = remotes
self._projects = projects
self._builds = builds
self._selectedBuild = selectedBuild | def __init__(self, remotes, projects, builds, selectedBuild):
'\n Constructor\n\n @param remotes See Manifest.remotes\n @param projects See Manifest.projects\n @param builds See Manifest.builds\n @param builds See Manifest.builds\n @param selectedBuild See Manifest.selectedBuild\n '
self._remotes = remotes
self._projects = projects
self._builds = builds
self._selectedBuild = selectedBuild<|docstring|>Constructor
@param remotes See Manifest.remotes
@param projects See Manifest.projects
@param builds See Manifest.builds
@param builds See Manifest.builds
@param selectedBuild See Manifest.selectedBuild<|endoftext|> |
f37b6b28cd266f955d1016b9e3e100e6783985908afb3bc152218f93f24bc06c | @property
def remotes(self):
'\n Gerrit remote server list\n '
return self._remotes | Gerrit remote server list | du/drepo/manifest/Manifest.py | remotes | spiricn/DevUtils | 1 | python | @property
def remotes(self):
'\n \n '
return self._remotes | @property
def remotes(self):
'\n \n '
return self._remotes<|docstring|>Gerrit remote server list<|endoftext|> |
c7e8ce66c5bc2220ccb795b2e55765c82c5cb42998ce5022282e30b285b79151 | @property
def projects(self):
'\n Projects list\n '
return self._projects | Projects list | du/drepo/manifest/Manifest.py | projects | spiricn/DevUtils | 1 | python | @property
def projects(self):
'\n \n '
return self._projects | @property
def projects(self):
'\n \n '
return self._projects<|docstring|>Projects list<|endoftext|> |
6de8e38051810006dd368e918a08be64fb36c4081640ef33cca19f98e0a0ad9f | @property
def builds(self):
'\n Builds list\n '
return self._builds | Builds list | du/drepo/manifest/Manifest.py | builds | spiricn/DevUtils | 1 | python | @property
def builds(self):
'\n \n '
return self._builds | @property
def builds(self):
'\n \n '
return self._builds<|docstring|>Builds list<|endoftext|> |
6c5d3950c35aa30c1abaed93cbebfd7f5f723a5ae761917fcbe0ab824dc86026 | @property
def selectedBuild(self):
'\n Selected build\n '
return self._selectedBuild | Selected build | du/drepo/manifest/Manifest.py | selectedBuild | spiricn/DevUtils | 1 | python | @property
def selectedBuild(self):
'\n \n '
return self._selectedBuild | @property
def selectedBuild(self):
'\n \n '
return self._selectedBuild<|docstring|>Selected build<|endoftext|> |
78251d375753b33c2e3c0f15f18a4f77d6be5f62565e5d8bc12e205eab7318e0 | def __init__(self, error_callback: Callable[([base.BaseViolation], None)], options: ConfigurationOptions) -> None:
'Creates new instance of a name validator.'
self._error_callback = error_callback
self._options = options | Creates new instance of a name validator. | wemake_python_styleguide/visitors/ast/naming.py | __init__ | makarchuk/wemake-python-styleguide | 0 | python | def __init__(self, error_callback: Callable[([base.BaseViolation], None)], options: ConfigurationOptions) -> None:
self._error_callback = error_callback
self._options = options | def __init__(self, error_callback: Callable[([base.BaseViolation], None)], options: ConfigurationOptions) -> None:
self._error_callback = error_callback
self._options = options<|docstring|>Creates new instance of a name validator.<|endoftext|> |
710f7173f6e5f81b7ff1e503a358c58d9803de973e75a35304b261dff936af72 | def __init__(self, *args, **kwargs) -> None:
'Initializes new naming validator for this visitor.'
super().__init__(*args, **kwargs)
self._validator = _NameValidator(self.add_violation, self.options) | Initializes new naming validator for this visitor. | wemake_python_styleguide/visitors/ast/naming.py | __init__ | makarchuk/wemake-python-styleguide | 0 | python | def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._validator = _NameValidator(self.add_violation, self.options) | def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._validator = _NameValidator(self.add_violation, self.options)<|docstring|>Initializes new naming validator for this visitor.<|endoftext|> |
c4b2f9ec85e5c2e4b9f3f085c4e3c38bab757e6d015f0896f14c195c35b73844 | def visit_ClassDef(self, node: ast.ClassDef) -> None:
'\n Used to find upper attribute declarations.\n\n Raises:\n UpperCaseAttributeViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_attribute_name(node)
self._validator.check_name(node, node.name)
self.generic_visit(node) | Used to find upper attribute declarations.
Raises:
UpperCaseAttributeViolation
UnicodeNameViolation
TrailingUnderscoreViolation | wemake_python_styleguide/visitors/ast/naming.py | visit_ClassDef | makarchuk/wemake-python-styleguide | 0 | python | def visit_ClassDef(self, node: ast.ClassDef) -> None:
'\n Used to find upper attribute declarations.\n\n Raises:\n UpperCaseAttributeViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_attribute_name(node)
self._validator.check_name(node, node.name)
self.generic_visit(node) | def visit_ClassDef(self, node: ast.ClassDef) -> None:
'\n Used to find upper attribute declarations.\n\n Raises:\n UpperCaseAttributeViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_attribute_name(node)
self._validator.check_name(node, node.name)
self.generic_visit(node)<|docstring|>Used to find upper attribute declarations.
Raises:
UpperCaseAttributeViolation
UnicodeNameViolation
TrailingUnderscoreViolation<|endoftext|> |
34c74da06757323e71a8df55c58ad15616f83668deb44f9b66ca5fd1613b877f | def visit_any_function(self, node: AnyFunctionDef) -> None:
'\n Used to find wrong function and method parameters.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_name(node, node.name)
self._validator.check_function_signature(node)
self.generic_visit(node) | Used to find wrong function and method parameters.
Raises:
WrongVariableNameViolation
TooShortNameViolation
PrivateNameViolation
TooLongNameViolation
UnicodeNameViolation
TrailingUnderscoreViolation | wemake_python_styleguide/visitors/ast/naming.py | visit_any_function | makarchuk/wemake-python-styleguide | 0 | python | def visit_any_function(self, node: AnyFunctionDef) -> None:
'\n Used to find wrong function and method parameters.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_name(node, node.name)
self._validator.check_function_signature(node)
self.generic_visit(node) | def visit_any_function(self, node: AnyFunctionDef) -> None:
'\n Used to find wrong function and method parameters.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_name(node, node.name)
self._validator.check_function_signature(node)
self.generic_visit(node)<|docstring|>Used to find wrong function and method parameters.
Raises:
WrongVariableNameViolation
TooShortNameViolation
PrivateNameViolation
TooLongNameViolation
UnicodeNameViolation
TrailingUnderscoreViolation<|endoftext|> |
100e4be07123ca2b5a4081629c2de96ca89c54c42481cd09d619f4957542e15d | def visit_Lambda(self, node: ast.Lambda) -> None:
'\n Used to find wrong parameters.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_function_signature(node)
self.generic_visit(node) | Used to find wrong parameters.
Raises:
WrongVariableNameViolation
TooShortNameViolation
PrivateNameViolation
TooLongNameViolation
TrailingUnderscoreViolation | wemake_python_styleguide/visitors/ast/naming.py | visit_Lambda | makarchuk/wemake-python-styleguide | 0 | python | def visit_Lambda(self, node: ast.Lambda) -> None:
'\n Used to find wrong parameters.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_function_signature(node)
self.generic_visit(node) | def visit_Lambda(self, node: ast.Lambda) -> None:
'\n Used to find wrong parameters.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n TrailingUnderscoreViolation\n\n '
self._validator.check_function_signature(node)
self.generic_visit(node)<|docstring|>Used to find wrong parameters.
Raises:
WrongVariableNameViolation
TooShortNameViolation
PrivateNameViolation
TooLongNameViolation
TrailingUnderscoreViolation<|endoftext|> |
5f373d200476f863dfc37b5cf9de8bcaec455f4116e1e024eee0d9c40c4173df | def visit_any_import(self, node: AnyImport) -> None:
'\n Used to check wrong import alias names.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n TrailingUnderscoreViolation\n\n '
for alias_node in node.names:
if alias_node.asname:
self._validator.check_name(node, alias_node.asname)
self.generic_visit(node) | Used to check wrong import alias names.
Raises:
WrongVariableNameViolation
TooShortNameViolation
PrivateNameViolation
TooLongNameViolation
TrailingUnderscoreViolation | wemake_python_styleguide/visitors/ast/naming.py | visit_any_import | makarchuk/wemake-python-styleguide | 0 | python | def visit_any_import(self, node: AnyImport) -> None:
'\n Used to check wrong import alias names.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n TrailingUnderscoreViolation\n\n '
for alias_node in node.names:
if alias_node.asname:
self._validator.check_name(node, alias_node.asname)
self.generic_visit(node) | def visit_any_import(self, node: AnyImport) -> None:
'\n Used to check wrong import alias names.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n TrailingUnderscoreViolation\n\n '
for alias_node in node.names:
if alias_node.asname:
self._validator.check_name(node, alias_node.asname)
self.generic_visit(node)<|docstring|>Used to check wrong import alias names.
Raises:
WrongVariableNameViolation
TooShortNameViolation
PrivateNameViolation
TooLongNameViolation
TrailingUnderscoreViolation<|endoftext|> |
a5a8a5ccd85b5852d2aeeb9ed82351bd4efa99deb61ba35293426fc4f9581995 | def visit_variable(self, node: VariableDef) -> None:
'\n Used to check wrong names of assigned.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
variable_name = name_nodes.get_assigned_name(node)
if (variable_name is not None):
self._validator.check_name(node, variable_name)
self.generic_visit(node) | Used to check wrong names of assigned.
Raises:
WrongVariableNameViolation
TooShortNameViolation
PrivateNameViolation
TooLongNameViolation
UnicodeNameViolation
TrailingUnderscoreViolation | wemake_python_styleguide/visitors/ast/naming.py | visit_variable | makarchuk/wemake-python-styleguide | 0 | python | def visit_variable(self, node: VariableDef) -> None:
'\n Used to check wrong names of assigned.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
variable_name = name_nodes.get_assigned_name(node)
if (variable_name is not None):
self._validator.check_name(node, variable_name)
self.generic_visit(node) | def visit_variable(self, node: VariableDef) -> None:
'\n Used to check wrong names of assigned.\n\n Raises:\n WrongVariableNameViolation\n TooShortNameViolation\n PrivateNameViolation\n TooLongNameViolation\n UnicodeNameViolation\n TrailingUnderscoreViolation\n\n '
variable_name = name_nodes.get_assigned_name(node)
if (variable_name is not None):
self._validator.check_name(node, variable_name)
self.generic_visit(node)<|docstring|>Used to check wrong names of assigned.
Raises:
WrongVariableNameViolation
TooShortNameViolation
PrivateNameViolation
TooLongNameViolation
UnicodeNameViolation
TrailingUnderscoreViolation<|endoftext|> |
ebf3a9a4ee42870e0782c09ec747f43c57fa356243e05c686fc43a983e4fae25 | def visit_Assign(self, node: ast.Assign) -> None:
'\n Used to find the bad metadata variable names.\n\n Raises:\n WrongModuleMetadataViolation\n\n '
self._check_metadata(node)
self.generic_visit(node) | Used to find the bad metadata variable names.
Raises:
WrongModuleMetadataViolation | wemake_python_styleguide/visitors/ast/naming.py | visit_Assign | makarchuk/wemake-python-styleguide | 0 | python | def visit_Assign(self, node: ast.Assign) -> None:
'\n Used to find the bad metadata variable names.\n\n Raises:\n WrongModuleMetadataViolation\n\n '
self._check_metadata(node)
self.generic_visit(node) | def visit_Assign(self, node: ast.Assign) -> None:
'\n Used to find the bad metadata variable names.\n\n Raises:\n WrongModuleMetadataViolation\n\n '
self._check_metadata(node)
self.generic_visit(node)<|docstring|>Used to find the bad metadata variable names.
Raises:
WrongModuleMetadataViolation<|endoftext|> |
399af0b29cf32a144ccc86d3928e3f9d0b43af96e3e29314862b7f421dcae59c | def _create_target_names(self, target: AssignTargets) -> AssignTargetsNameList:
'Creates list with names of targets of assignment.'
target_names = []
for ast_object in target:
if isinstance(ast_object, ast.Name):
target_names.append(getattr(ast_object, 'id', None))
if isinstance(ast_object, ast.Tuple):
target_names.append(getattr(ast_object, 'elts', None))
for (index, _) in enumerate(target_names):
target_names[index] = tuple((name.id for name in target_names[index] if isinstance(name, ast.Name)))
return target_names | Creates list with names of targets of assignment. | wemake_python_styleguide/visitors/ast/naming.py | _create_target_names | makarchuk/wemake-python-styleguide | 0 | python | def _create_target_names(self, target: AssignTargets) -> AssignTargetsNameList:
target_names = []
for ast_object in target:
if isinstance(ast_object, ast.Name):
target_names.append(getattr(ast_object, 'id', None))
if isinstance(ast_object, ast.Tuple):
target_names.append(getattr(ast_object, 'elts', None))
for (index, _) in enumerate(target_names):
target_names[index] = tuple((name.id for name in target_names[index] if isinstance(name, ast.Name)))
return target_names | def _create_target_names(self, target: AssignTargets) -> AssignTargetsNameList:
target_names = []
for ast_object in target:
if isinstance(ast_object, ast.Name):
target_names.append(getattr(ast_object, 'id', None))
if isinstance(ast_object, ast.Tuple):
target_names.append(getattr(ast_object, 'elts', None))
for (index, _) in enumerate(target_names):
target_names[index] = tuple((name.id for name in target_names[index] if isinstance(name, ast.Name)))
return target_names<|docstring|>Creates list with names of targets of assignment.<|endoftext|> |
017e523450796dd06940475172aa2ef634a866bf2b5f8523a5d129bb3d89ad96 | def visit_Assign(self, node: ast.Assign) -> None:
'\n Used to check assignment variable to itself.\n\n Raises:\n ReassigningVariableToItselfViolation\n\n '
self._check_assignment(node)
self.generic_visit(node) | Used to check assignment variable to itself.
Raises:
ReassigningVariableToItselfViolation | wemake_python_styleguide/visitors/ast/naming.py | visit_Assign | makarchuk/wemake-python-styleguide | 0 | python | def visit_Assign(self, node: ast.Assign) -> None:
'\n Used to check assignment variable to itself.\n\n Raises:\n ReassigningVariableToItselfViolation\n\n '
self._check_assignment(node)
self.generic_visit(node) | def visit_Assign(self, node: ast.Assign) -> None:
'\n Used to check assignment variable to itself.\n\n Raises:\n ReassigningVariableToItselfViolation\n\n '
self._check_assignment(node)
self.generic_visit(node)<|docstring|>Used to check assignment variable to itself.
Raises:
ReassigningVariableToItselfViolation<|endoftext|> |
8d6160d48acdaae2271dca4959b368192df8dc3cad88e8a0857d25bdf58bae02 | def buildProbabilities(self, lastPopulation):
'Take the last population (with scores) and recompute the\n sampling vector for proportional selection.\n \n :param lastPopulation: The last population, of size\n ``config.populationSize``\n :type lastPopulation: A list of (point, score) tuples\n \n '
if (lastPopulation is None):
return
try:
population = [(p[0], self.config.modulator(p[1], i, self.config)) for (i, p) in enumerate(lastPopulation)]
except Exception:
population = [(x, self.config.modulator(s)) for (x, s) in lastPopulation]
self.total = sum([s for (x, s) in population])
self.matchedPopulation = []
amt = 0
for (x, s) in population:
amt += s
self.matchedPopulation.append((x, amt)) | Take the last population (with scores) and recompute the
sampling vector for proportional selection.
:param lastPopulation: The last population, of size
``config.populationSize``
:type lastPopulation: A list of (point, score) tuples | pyec/distribution/ec/selectors.py | buildProbabilities | hypernicon/pyec | 2 | python | def buildProbabilities(self, lastPopulation):
'Take the last population (with scores) and recompute the\n sampling vector for proportional selection.\n \n :param lastPopulation: The last population, of size\n ``config.populationSize``\n :type lastPopulation: A list of (point, score) tuples\n \n '
if (lastPopulation is None):
return
try:
population = [(p[0], self.config.modulator(p[1], i, self.config)) for (i, p) in enumerate(lastPopulation)]
except Exception:
population = [(x, self.config.modulator(s)) for (x, s) in lastPopulation]
self.total = sum([s for (x, s) in population])
self.matchedPopulation = []
amt = 0
for (x, s) in population:
amt += s
self.matchedPopulation.append((x, amt)) | def buildProbabilities(self, lastPopulation):
'Take the last population (with scores) and recompute the\n sampling vector for proportional selection.\n \n :param lastPopulation: The last population, of size\n ``config.populationSize``\n :type lastPopulation: A list of (point, score) tuples\n \n '
if (lastPopulation is None):
return
try:
population = [(p[0], self.config.modulator(p[1], i, self.config)) for (i, p) in enumerate(lastPopulation)]
except Exception:
population = [(x, self.config.modulator(s)) for (x, s) in lastPopulation]
self.total = sum([s for (x, s) in population])
self.matchedPopulation = []
amt = 0
for (x, s) in population:
amt += s
self.matchedPopulation.append((x, amt))<|docstring|>Take the last population (with scores) and recompute the
sampling vector for proportional selection.
:param lastPopulation: The last population, of size
``config.populationSize``
:type lastPopulation: A list of (point, score) tuples<|endoftext|> |
ecba756ebb9192ff188419ca9318c75c4e92f95b618dfdf43cd1e293a232fa1b | def __call__(self, rank, popSize):
'\n Determine the probability of choosing the individual with a given rank inside a population of a given size.\n \n :param rank: The rank of the solution under consideration.\n :type rank: int\n :param popSize: The size of the population.\n :type popSize: int\n '
pass | Determine the probability of choosing the individual with a given rank inside a population of a given size.
:param rank: The rank of the solution under consideration.
:type rank: int
:param popSize: The size of the population.
:type popSize: int | pyec/distribution/ec/selectors.py | __call__ | hypernicon/pyec | 2 | python | def __call__(self, rank, popSize):
'\n Determine the probability of choosing the individual with a given rank inside a population of a given size.\n \n :param rank: The rank of the solution under consideration.\n :type rank: int\n :param popSize: The size of the population.\n :type popSize: int\n '
pass | def __call__(self, rank, popSize):
'\n Determine the probability of choosing the individual with a given rank inside a population of a given size.\n \n :param rank: The rank of the solution under consideration.\n :type rank: int\n :param popSize: The size of the population.\n :type popSize: int\n '
pass<|docstring|>Determine the probability of choosing the individual with a given rank inside a population of a given size.
:param rank: The rank of the solution under consideration.
:type rank: int
:param popSize: The size of the population.
:type popSize: int<|endoftext|> |
611077300d047bdb23e795708a8736d0e1e049dadafdd08ad77db31cce79fd5d | def __init__(self, config):
'pressure between 1.0 and 2.0'
self.pressure = config.pressure
self.popSize = config.populationSize | pressure between 1.0 and 2.0 | pyec/distribution/ec/selectors.py | __init__ | hypernicon/pyec | 2 | python | def __init__(self, config):
self.pressure = config.pressure
self.popSize = config.populationSize | def __init__(self, config):
self.pressure = config.pressure
self.popSize = config.populationSize<|docstring|>pressure between 1.0 and 2.0<|endoftext|> |
918dc10e7213b6d121c19afb44bc9b6d678f8c3be4d139192854bfa1ef944fa9 | def __call__(self, rank):
' root is root of (pressure * sum_k=0^(popSize-1) x^k) - popSize * x ^(popSize - 1)'
return (self.root ** (rank - 1.0)) | root is root of (pressure * sum_k=0^(popSize-1) x^k) - popSize * x ^(popSize - 1) | pyec/distribution/ec/selectors.py | __call__ | hypernicon/pyec | 2 | python | def __call__(self, rank):
' '
return (self.root ** (rank - 1.0)) | def __call__(self, rank):
' '
return (self.root ** (rank - 1.0))<|docstring|>root is root of (pressure * sum_k=0^(popSize-1) x^k) - popSize * x ^(popSize - 1)<|endoftext|> |
8a3c1c721aef13812374fa355dce000e4800af52a64b94cf472bf6d5fa265db0 | def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers):
'Get dataloader.'
(width, height) = (data_shape, data_shape)
with autograd.train_mode():
(_, _, anchors) = net(mx.nd.zeros((1, 3, height, width)))
batchify_fn = Tuple(Stack(), Stack(), Stack())
train_loader = gluon.data.DataLoader(train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)), batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=(- 1)))
val_loader = gluon.data.DataLoader(val_dataset.transform(SSDDefaultValTransform(width, height)), batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
return (train_loader, val_loader) | Get dataloader. | scripts/detection/ssd/train_ssd.py | get_dataloader | winnerineast/gluon-cv | 62 | python | def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers):
(width, height) = (data_shape, data_shape)
with autograd.train_mode():
(_, _, anchors) = net(mx.nd.zeros((1, 3, height, width)))
batchify_fn = Tuple(Stack(), Stack(), Stack())
train_loader = gluon.data.DataLoader(train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)), batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=(- 1)))
val_loader = gluon.data.DataLoader(val_dataset.transform(SSDDefaultValTransform(width, height)), batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
return (train_loader, val_loader) | def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers):
(width, height) = (data_shape, data_shape)
with autograd.train_mode():
(_, _, anchors) = net(mx.nd.zeros((1, 3, height, width)))
batchify_fn = Tuple(Stack(), Stack(), Stack())
train_loader = gluon.data.DataLoader(train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)), batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=(- 1)))
val_loader = gluon.data.DataLoader(val_dataset.transform(SSDDefaultValTransform(width, height)), batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
return (train_loader, val_loader)<|docstring|>Get dataloader.<|endoftext|> |
06044a91210506d48300c70f7acc81e2ca5df4fe2d481247a8a103d10bbde4fd | def validate(net, val_data, ctx, eval_metric):
'Test on validation dataset.'
eval_metric.reset()
net.set_nms(nms_thresh=0.45, nms_topk=400)
net.hybridize(static_alloc=True, static_shape=True)
for batch in val_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for (x, y) in zip(data, label):
(ids, scores, bboxes) = net(x)
det_ids.append(ids)
det_scores.append(scores)
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
gt_ids.append(y.slice_axis(axis=(- 1), begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=(- 1), begin=0, end=4))
gt_difficults.append((y.slice_axis(axis=(- 1), begin=5, end=6) if (y.shape[(- 1)] > 5) else None))
eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults)
return eval_metric.get() | Test on validation dataset. | scripts/detection/ssd/train_ssd.py | validate | winnerineast/gluon-cv | 62 | python | def validate(net, val_data, ctx, eval_metric):
eval_metric.reset()
net.set_nms(nms_thresh=0.45, nms_topk=400)
net.hybridize(static_alloc=True, static_shape=True)
for batch in val_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for (x, y) in zip(data, label):
(ids, scores, bboxes) = net(x)
det_ids.append(ids)
det_scores.append(scores)
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
gt_ids.append(y.slice_axis(axis=(- 1), begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=(- 1), begin=0, end=4))
gt_difficults.append((y.slice_axis(axis=(- 1), begin=5, end=6) if (y.shape[(- 1)] > 5) else None))
eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults)
return eval_metric.get() | def validate(net, val_data, ctx, eval_metric):
eval_metric.reset()
net.set_nms(nms_thresh=0.45, nms_topk=400)
net.hybridize(static_alloc=True, static_shape=True)
for batch in val_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for (x, y) in zip(data, label):
(ids, scores, bboxes) = net(x)
det_ids.append(ids)
det_scores.append(scores)
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
gt_ids.append(y.slice_axis(axis=(- 1), begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=(- 1), begin=0, end=4))
gt_difficults.append((y.slice_axis(axis=(- 1), begin=5, end=6) if (y.shape[(- 1)] > 5) else None))
eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults)
return eval_metric.get()<|docstring|>Test on validation dataset.<|endoftext|> |
c9274363e4bea452f3ec254bceebc9db6956be1d369cd1d1cf1308d1336c0652 | def train(net, train_data, val_data, eval_metric, ctx, args):
'Training pipeline'
net.collect_params().reset_ctx(ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': args.lr, 'wd': args.wd, 'momentum': args.momentum})
lr_decay = float(args.lr_decay)
lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
mbox_loss = gcv.loss.SSDMultiBoxLoss()
ce_metric = mx.metric.Loss('CrossEntropy')
smoothl1_metric = mx.metric.Loss('SmoothL1')
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = (args.save_prefix + '_train.log')
log_dir = os.path.dirname(log_file_path)
if (log_dir and (not os.path.exists(log_dir))):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
logger.info(args)
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_map = [0]
for epoch in range(args.start_epoch, args.epochs):
while (lr_steps and (epoch >= lr_steps[0])):
new_lr = (trainer.learning_rate * lr_decay)
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
logger.info('[Epoch {}] Set learning rate to {}'.format(epoch, new_lr))
ce_metric.reset()
smoothl1_metric.reset()
tic = time.time()
btic = time.time()
net.hybridize(static_alloc=True, static_shape=True)
for (i, batch) in enumerate(train_data):
batch_size = batch[0].shape[0]
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
cls_targets = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
box_targets = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
with autograd.record():
cls_preds = []
box_preds = []
for x in data:
(cls_pred, box_pred, _) = net(x)
cls_preds.append(cls_pred)
box_preds.append(box_pred)
(sum_loss, cls_loss, box_loss) = mbox_loss(cls_preds, box_preds, cls_targets, box_targets)
autograd.backward(sum_loss)
trainer.step(1)
ce_metric.update(0, [(l * batch_size) for l in cls_loss])
smoothl1_metric.update(0, [(l * batch_size) for l in box_loss])
if (args.log_interval and (not ((i + 1) % args.log_interval))):
(name1, loss1) = ce_metric.get()
(name2, loss2) = smoothl1_metric.get()
logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}'.format(epoch, i, (batch_size / (time.time() - btic)), name1, loss1, name2, loss2))
btic = time.time()
(name1, loss1) = ce_metric.get()
(name2, loss2) = smoothl1_metric.get()
logger.info('[Epoch {}] Training cost: {:.3f}, {}={:.3f}, {}={:.3f}'.format(epoch, (time.time() - tic), name1, loss1, name2, loss2))
if (((epoch % args.val_interval) == 0) or (args.save_interval and ((epoch % args.save_interval) == 0))):
(map_name, mean_ap) = validate(net, val_data, ctx, eval_metric)
val_msg = '\n'.join(['{}={}'.format(k, v) for (k, v) in zip(map_name, mean_ap)])
logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[(- 1)])
else:
current_map = 0.0
save_params(net, best_map, current_map, epoch, args.save_interval, args.save_prefix) | Training pipeline | scripts/detection/ssd/train_ssd.py | train | winnerineast/gluon-cv | 62 | python | def train(net, train_data, val_data, eval_metric, ctx, args):
net.collect_params().reset_ctx(ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': args.lr, 'wd': args.wd, 'momentum': args.momentum})
lr_decay = float(args.lr_decay)
lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
mbox_loss = gcv.loss.SSDMultiBoxLoss()
ce_metric = mx.metric.Loss('CrossEntropy')
smoothl1_metric = mx.metric.Loss('SmoothL1')
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = (args.save_prefix + '_train.log')
log_dir = os.path.dirname(log_file_path)
if (log_dir and (not os.path.exists(log_dir))):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
logger.info(args)
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_map = [0]
for epoch in range(args.start_epoch, args.epochs):
while (lr_steps and (epoch >= lr_steps[0])):
new_lr = (trainer.learning_rate * lr_decay)
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
logger.info('[Epoch {}] Set learning rate to {}'.format(epoch, new_lr))
ce_metric.reset()
smoothl1_metric.reset()
tic = time.time()
btic = time.time()
net.hybridize(static_alloc=True, static_shape=True)
for (i, batch) in enumerate(train_data):
batch_size = batch[0].shape[0]
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
cls_targets = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
box_targets = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
with autograd.record():
cls_preds = []
box_preds = []
for x in data:
(cls_pred, box_pred, _) = net(x)
cls_preds.append(cls_pred)
box_preds.append(box_pred)
(sum_loss, cls_loss, box_loss) = mbox_loss(cls_preds, box_preds, cls_targets, box_targets)
autograd.backward(sum_loss)
trainer.step(1)
ce_metric.update(0, [(l * batch_size) for l in cls_loss])
smoothl1_metric.update(0, [(l * batch_size) for l in box_loss])
if (args.log_interval and (not ((i + 1) % args.log_interval))):
(name1, loss1) = ce_metric.get()
(name2, loss2) = smoothl1_metric.get()
logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}'.format(epoch, i, (batch_size / (time.time() - btic)), name1, loss1, name2, loss2))
btic = time.time()
(name1, loss1) = ce_metric.get()
(name2, loss2) = smoothl1_metric.get()
logger.info('[Epoch {}] Training cost: {:.3f}, {}={:.3f}, {}={:.3f}'.format(epoch, (time.time() - tic), name1, loss1, name2, loss2))
if (((epoch % args.val_interval) == 0) or (args.save_interval and ((epoch % args.save_interval) == 0))):
(map_name, mean_ap) = validate(net, val_data, ctx, eval_metric)
val_msg = '\n'.join(['{}={}'.format(k, v) for (k, v) in zip(map_name, mean_ap)])
logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[(- 1)])
else:
current_map = 0.0
save_params(net, best_map, current_map, epoch, args.save_interval, args.save_prefix) | def train(net, train_data, val_data, eval_metric, ctx, args):
net.collect_params().reset_ctx(ctx)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': args.lr, 'wd': args.wd, 'momentum': args.momentum})
lr_decay = float(args.lr_decay)
lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
mbox_loss = gcv.loss.SSDMultiBoxLoss()
ce_metric = mx.metric.Loss('CrossEntropy')
smoothl1_metric = mx.metric.Loss('SmoothL1')
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = (args.save_prefix + '_train.log')
log_dir = os.path.dirname(log_file_path)
if (log_dir and (not os.path.exists(log_dir))):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
logger.info(args)
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_map = [0]
for epoch in range(args.start_epoch, args.epochs):
while (lr_steps and (epoch >= lr_steps[0])):
new_lr = (trainer.learning_rate * lr_decay)
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
logger.info('[Epoch {}] Set learning rate to {}'.format(epoch, new_lr))
ce_metric.reset()
smoothl1_metric.reset()
tic = time.time()
btic = time.time()
net.hybridize(static_alloc=True, static_shape=True)
for (i, batch) in enumerate(train_data):
batch_size = batch[0].shape[0]
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
cls_targets = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
box_targets = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
with autograd.record():
cls_preds = []
box_preds = []
for x in data:
(cls_pred, box_pred, _) = net(x)
cls_preds.append(cls_pred)
box_preds.append(box_pred)
(sum_loss, cls_loss, box_loss) = mbox_loss(cls_preds, box_preds, cls_targets, box_targets)
autograd.backward(sum_loss)
trainer.step(1)
ce_metric.update(0, [(l * batch_size) for l in cls_loss])
smoothl1_metric.update(0, [(l * batch_size) for l in box_loss])
if (args.log_interval and (not ((i + 1) % args.log_interval))):
(name1, loss1) = ce_metric.get()
(name2, loss2) = smoothl1_metric.get()
logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}'.format(epoch, i, (batch_size / (time.time() - btic)), name1, loss1, name2, loss2))
btic = time.time()
(name1, loss1) = ce_metric.get()
(name2, loss2) = smoothl1_metric.get()
logger.info('[Epoch {}] Training cost: {:.3f}, {}={:.3f}, {}={:.3f}'.format(epoch, (time.time() - tic), name1, loss1, name2, loss2))
if (((epoch % args.val_interval) == 0) or (args.save_interval and ((epoch % args.save_interval) == 0))):
(map_name, mean_ap) = validate(net, val_data, ctx, eval_metric)
val_msg = '\n'.join(['{}={}'.format(k, v) for (k, v) in zip(map_name, mean_ap)])
logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[(- 1)])
else:
current_map = 0.0
save_params(net, best_map, current_map, epoch, args.save_interval, args.save_prefix)<|docstring|>Training pipeline<|endoftext|> |
dd7cc5bdfa784c784be37d44e08cdaed171dae7b5be5fecea69f06e8c2bb6fb2 | @autotvm.register_topi_compute(nn.conv3d, ['cuda', 'gpu'], ['direct'])
def conv3d_cuda(cfg, data, kernel, strides, padding, dilation, layout='NCDHW', out_dtype='float32'):
'Conv3D operator for cuda backend.\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n data : tvm.Tensor\n 5-D with shape [batch, in_channel, in_depth, in_height, in_width]\n\n kernel : tvm.Tensor\n 5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]\n\n strides : int or a list/tuple of three ints\n stride size, or [stride_depth, stride_height, stride_width]\n\n padding : int or a list/tuple of 3 or 6 ints\n padding size, or\n [pad_depth, pad_height, pad_width] for 3 ints, or\n [pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right] for 6 ints\n\n dilation: int or a list/tuple of three ints\n dilation size, or [dilation_depth, dilation_height, dilation_width]\n\n layout : str\n layout of data\n\n out_dtype: str\n The output type. This is used for mixed precision.\n\n Returns\n -------\n output : tvm.Tensor\n 5-D with shape [batch, out_channel, out_depth, out_height, out_width]\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
if (layout == 'NCDHW'):
tensor_format = 0
(N, _, D, H, W) = get_const_tuple(data.shape)
elif (layout == 'NDHWC'):
tensor_format = 1
(N, D, H, W, _) = get_const_tuple(data.shape)
else:
raise ValueError(('Unsupported layout %s in cudnn' % layout))
(CO, CI, KD, KH, KW) = get_const_tuple(kernel.shape)
(stride_d, stride_h, stride_w) = ((strides, strides, strides) if isinstance(strides, int) else strides)
if (isinstance(padding, (list, tuple)) and (len(padding) > 3)):
raise ValueError("Cudnn doesn't support asymmetric padding.")
(pf, pt, pl, pk, pb, pr) = get_pad_tuple3d(padding, (KD, KH, KW))
(dilation_d, dilation_h, dilation_w) = ((dilation, dilation, dilation) if isinstance(dilation, int) else dilation)
OD = (((((D + pf) + pk) - KD) // stride_d) + 1)
OH = (((((H + pt) + pb) - KH) // stride_h) + 1)
OW = (((((W + pl) + pr) - KW) // stride_w) + 1)
cfg.add_flop((((((((((2 * N) * OD) * OH) * OW) * CO) * CI) * (((KD - 1) * dilation_d) + 1)) * (((KH - 1) * dilation_h) + 1)) * (((KW - 1) * dilation_w) + 1)))
return cudnn.conv_forward(data, kernel, [pf, pt, pl], [stride_d, stride_h, stride_w], [dilation_d, dilation_h, dilation_w], conv_mode=1, tensor_format=tensor_format, algo=(- 1), conv_dtype=data.dtype)
if (layout == 'NCDHW'):
return nn.conv3d_ncdhw(data, kernel, strides, padding, dilation, out_dtype)
raise ValueError('not support this layout {} yet'.format(layout)) | Conv3D operator for cuda backend.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
kernel : tvm.Tensor
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of 3 or 6 ints
padding size, or
[pad_depth, pad_height, pad_width] for 3 ints, or
[pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right] for 6 ints
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
layout : str
layout of data
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width] | 3rdparty/tvm/topi/python/topi/cuda/conv3d.py | conv3d_cuda | liuxiaotiao/Mxnet_DGT | 3 | python | @autotvm.register_topi_compute(nn.conv3d, ['cuda', 'gpu'], ['direct'])
def conv3d_cuda(cfg, data, kernel, strides, padding, dilation, layout='NCDHW', out_dtype='float32'):
'Conv3D operator for cuda backend.\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n data : tvm.Tensor\n 5-D with shape [batch, in_channel, in_depth, in_height, in_width]\n\n kernel : tvm.Tensor\n 5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]\n\n strides : int or a list/tuple of three ints\n stride size, or [stride_depth, stride_height, stride_width]\n\n padding : int or a list/tuple of 3 or 6 ints\n padding size, or\n [pad_depth, pad_height, pad_width] for 3 ints, or\n [pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right] for 6 ints\n\n dilation: int or a list/tuple of three ints\n dilation size, or [dilation_depth, dilation_height, dilation_width]\n\n layout : str\n layout of data\n\n out_dtype: str\n The output type. This is used for mixed precision.\n\n Returns\n -------\n output : tvm.Tensor\n 5-D with shape [batch, out_channel, out_depth, out_height, out_width]\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
if (layout == 'NCDHW'):
tensor_format = 0
(N, _, D, H, W) = get_const_tuple(data.shape)
elif (layout == 'NDHWC'):
tensor_format = 1
(N, D, H, W, _) = get_const_tuple(data.shape)
else:
raise ValueError(('Unsupported layout %s in cudnn' % layout))
(CO, CI, KD, KH, KW) = get_const_tuple(kernel.shape)
(stride_d, stride_h, stride_w) = ((strides, strides, strides) if isinstance(strides, int) else strides)
if (isinstance(padding, (list, tuple)) and (len(padding) > 3)):
raise ValueError("Cudnn doesn't support asymmetric padding.")
(pf, pt, pl, pk, pb, pr) = get_pad_tuple3d(padding, (KD, KH, KW))
(dilation_d, dilation_h, dilation_w) = ((dilation, dilation, dilation) if isinstance(dilation, int) else dilation)
OD = (((((D + pf) + pk) - KD) // stride_d) + 1)
OH = (((((H + pt) + pb) - KH) // stride_h) + 1)
OW = (((((W + pl) + pr) - KW) // stride_w) + 1)
cfg.add_flop((((((((((2 * N) * OD) * OH) * OW) * CO) * CI) * (((KD - 1) * dilation_d) + 1)) * (((KH - 1) * dilation_h) + 1)) * (((KW - 1) * dilation_w) + 1)))
return cudnn.conv_forward(data, kernel, [pf, pt, pl], [stride_d, stride_h, stride_w], [dilation_d, dilation_h, dilation_w], conv_mode=1, tensor_format=tensor_format, algo=(- 1), conv_dtype=data.dtype)
if (layout == 'NCDHW'):
return nn.conv3d_ncdhw(data, kernel, strides, padding, dilation, out_dtype)
raise ValueError('not support this layout {} yet'.format(layout)) | @autotvm.register_topi_compute(nn.conv3d, ['cuda', 'gpu'], ['direct'])
def conv3d_cuda(cfg, data, kernel, strides, padding, dilation, layout='NCDHW', out_dtype='float32'):
'Conv3D operator for cuda backend.\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n data : tvm.Tensor\n 5-D with shape [batch, in_channel, in_depth, in_height, in_width]\n\n kernel : tvm.Tensor\n 5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]\n\n strides : int or a list/tuple of three ints\n stride size, or [stride_depth, stride_height, stride_width]\n\n padding : int or a list/tuple of 3 or 6 ints\n padding size, or\n [pad_depth, pad_height, pad_width] for 3 ints, or\n [pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right] for 6 ints\n\n dilation: int or a list/tuple of three ints\n dilation size, or [dilation_depth, dilation_height, dilation_width]\n\n layout : str\n layout of data\n\n out_dtype: str\n The output type. This is used for mixed precision.\n\n Returns\n -------\n output : tvm.Tensor\n 5-D with shape [batch, out_channel, out_depth, out_height, out_width]\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
if (layout == 'NCDHW'):
tensor_format = 0
(N, _, D, H, W) = get_const_tuple(data.shape)
elif (layout == 'NDHWC'):
tensor_format = 1
(N, D, H, W, _) = get_const_tuple(data.shape)
else:
raise ValueError(('Unsupported layout %s in cudnn' % layout))
(CO, CI, KD, KH, KW) = get_const_tuple(kernel.shape)
(stride_d, stride_h, stride_w) = ((strides, strides, strides) if isinstance(strides, int) else strides)
if (isinstance(padding, (list, tuple)) and (len(padding) > 3)):
raise ValueError("Cudnn doesn't support asymmetric padding.")
(pf, pt, pl, pk, pb, pr) = get_pad_tuple3d(padding, (KD, KH, KW))
(dilation_d, dilation_h, dilation_w) = ((dilation, dilation, dilation) if isinstance(dilation, int) else dilation)
OD = (((((D + pf) + pk) - KD) // stride_d) + 1)
OH = (((((H + pt) + pb) - KH) // stride_h) + 1)
OW = (((((W + pl) + pr) - KW) // stride_w) + 1)
cfg.add_flop((((((((((2 * N) * OD) * OH) * OW) * CO) * CI) * (((KD - 1) * dilation_d) + 1)) * (((KH - 1) * dilation_h) + 1)) * (((KW - 1) * dilation_w) + 1)))
return cudnn.conv_forward(data, kernel, [pf, pt, pl], [stride_d, stride_h, stride_w], [dilation_d, dilation_h, dilation_w], conv_mode=1, tensor_format=tensor_format, algo=(- 1), conv_dtype=data.dtype)
if (layout == 'NCDHW'):
return nn.conv3d_ncdhw(data, kernel, strides, padding, dilation, out_dtype)
raise ValueError('not support this layout {} yet'.format(layout))<|docstring|>Conv3D operator for cuda backend.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
kernel : tvm.Tensor
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of 3 or 6 ints
padding size, or
[pad_depth, pad_height, pad_width] for 3 ints, or
[pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right] for 6 ints
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
layout : str
layout of data
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]<|endoftext|> |
cbe13bfcbad7b94f10a74b20f5e1fdb749a58cb107653cb8f81ae8219c70a885 | @autotvm.register_topi_schedule(generic.schedule_conv3d_ncdhw, ['cuda', 'gpu'], ['direct'])
def schedule_conv3d_ncdhw_cuda(cfg, outs):
'TOPI schedule callback of conv3d for cuda gpu\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n outs: Array of Tensor\n The computation graph description of conv2d\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for conv2d.\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
return generic.schedule_extern(outs)
outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs)
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if (op.tag == 'conv3d_ncdhw'):
schedule_direct_3d_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s | TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d. | 3rdparty/tvm/topi/python/topi/cuda/conv3d.py | schedule_conv3d_ncdhw_cuda | liuxiaotiao/Mxnet_DGT | 3 | python | @autotvm.register_topi_schedule(generic.schedule_conv3d_ncdhw, ['cuda', 'gpu'], ['direct'])
def schedule_conv3d_ncdhw_cuda(cfg, outs):
'TOPI schedule callback of conv3d for cuda gpu\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n outs: Array of Tensor\n The computation graph description of conv2d\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for conv2d.\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
return generic.schedule_extern(outs)
outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs)
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if (op.tag == 'conv3d_ncdhw'):
schedule_direct_3d_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s | @autotvm.register_topi_schedule(generic.schedule_conv3d_ncdhw, ['cuda', 'gpu'], ['direct'])
def schedule_conv3d_ncdhw_cuda(cfg, outs):
'TOPI schedule callback of conv3d for cuda gpu\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n outs: Array of Tensor\n The computation graph description of conv2d\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for conv2d.\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
return generic.schedule_extern(outs)
outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs)
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if (op.tag == 'conv3d_ncdhw'):
schedule_direct_3d_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s<|docstring|>TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.<|endoftext|> |
90e71584d7535724687f1520c54177a646c654833375515c422e30e7aefbf6c4 | @autotvm.register_topi_schedule(generic.schedule_conv3d_ndhwc, ['cuda', 'gpu'], ['direct'])
def schedule_conv3d_ndhwc_cuda(cfg, outs):
'TOPI schedule callback of conv3d for cuda gpu\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n outs: Array of Tensor\n The computation graph description of conv2d\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for conv2d.\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
return generic.schedule_extern(outs)
outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs)
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if (op.tag == 'conv3d_ndhwc'):
schedule_direct_3d_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s | TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d. | 3rdparty/tvm/topi/python/topi/cuda/conv3d.py | schedule_conv3d_ndhwc_cuda | liuxiaotiao/Mxnet_DGT | 3 | python | @autotvm.register_topi_schedule(generic.schedule_conv3d_ndhwc, ['cuda', 'gpu'], ['direct'])
def schedule_conv3d_ndhwc_cuda(cfg, outs):
'TOPI schedule callback of conv3d for cuda gpu\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n outs: Array of Tensor\n The computation graph description of conv2d\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for conv2d.\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
return generic.schedule_extern(outs)
outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs)
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if (op.tag == 'conv3d_ndhwc'):
schedule_direct_3d_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s | @autotvm.register_topi_schedule(generic.schedule_conv3d_ndhwc, ['cuda', 'gpu'], ['direct'])
def schedule_conv3d_ndhwc_cuda(cfg, outs):
'TOPI schedule callback of conv3d for cuda gpu\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config for this template\n\n outs: Array of Tensor\n The computation graph description of conv2d\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for conv2d.\n '
target = tvm.target.current_target()
if ('cudnn' in target.libs):
return generic.schedule_extern(outs)
outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs)
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if (op.tag == 'conv3d_ndhwc'):
schedule_direct_3d_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s<|docstring|>TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.<|endoftext|> |
2e24bc9053713db77e033f32fd94eca940325517c3e87557c4b91f710ee6b2b4 | def split_path(uri):
'\n Split the URI into path[?query][#fragment]\n\n :param uri: the URI to split\n :return: tuple of path, query, fragment\n '
parsed = urlparse(uri)
return (parsed.path, parsed.query, parsed.fragment) | Split the URI into path[?query][#fragment]
:param uri: the URI to split
:return: tuple of path, query, fragment | assertions/protocol_details.py | split_path | DMTF/Redfish-Protocol-Validator | 2 | python | def split_path(uri):
'\n Split the URI into path[?query][#fragment]\n\n :param uri: the URI to split\n :return: tuple of path, query, fragment\n '
parsed = urlparse(uri)
return (parsed.path, parsed.query, parsed.fragment) | def split_path(uri):
'\n Split the URI into path[?query][#fragment]\n\n :param uri: the URI to split\n :return: tuple of path, query, fragment\n '
parsed = urlparse(uri)
return (parsed.path, parsed.query, parsed.fragment)<|docstring|>Split the URI into path[?query][#fragment]
:param uri: the URI to split
:return: tuple of path, query, fragment<|endoftext|> |
37a031f0f39dde988080356a0f4ef8ecca64e7ebd76e28be5a3cec1666fbcc5a | def safe_uri(uri):
'\n Determine if URI is safe (does not use RFC 1738 unsafe character)\n\n :param uri: URI to check\n :return: True if URI is safe, False otherwise\n '
(path, query, frag) = split_path(uri)
safe = True
for part in (path, query, frag):
safe = (safe and safe_chars_regex.search(part))
return safe | Determine if URI is safe (does not use RFC 1738 unsafe character)
:param uri: URI to check
:return: True if URI is safe, False otherwise | assertions/protocol_details.py | safe_uri | DMTF/Redfish-Protocol-Validator | 2 | python | def safe_uri(uri):
'\n Determine if URI is safe (does not use RFC 1738 unsafe character)\n\n :param uri: URI to check\n :return: True if URI is safe, False otherwise\n '
(path, query, frag) = split_path(uri)
safe = True
for part in (path, query, frag):
safe = (safe and safe_chars_regex.search(part))
return safe | def safe_uri(uri):
'\n Determine if URI is safe (does not use RFC 1738 unsafe character)\n\n :param uri: URI to check\n :return: True if URI is safe, False otherwise\n '
(path, query, frag) = split_path(uri)
safe = True
for part in (path, query, frag):
safe = (safe and safe_chars_regex.search(part))
return safe<|docstring|>Determine if URI is safe (does not use RFC 1738 unsafe character)
:param uri: URI to check
:return: True if URI is safe, False otherwise<|endoftext|> |
587b61a67288abcc934bceebfd6fb64a220231638ba0b0009a405b28d45fd977 | def encoded_char_in_uri(uri):
'\n Determine if path or frag of URI contain any percent-encoded characters\n\n :param uri: URI to check\n :return: True if encoded chars found in path or frag, False otherwise\n '
(path, query, frag) = split_path(uri)
encoded = False
for part in (path, frag):
encoded = (encoded or encoded_char_regex.search(part))
return encoded | Determine if path or frag of URI contain any percent-encoded characters
:param uri: URI to check
:return: True if encoded chars found in path or frag, False otherwise | assertions/protocol_details.py | encoded_char_in_uri | DMTF/Redfish-Protocol-Validator | 2 | python | def encoded_char_in_uri(uri):
'\n Determine if path or frag of URI contain any percent-encoded characters\n\n :param uri: URI to check\n :return: True if encoded chars found in path or frag, False otherwise\n '
(path, query, frag) = split_path(uri)
encoded = False
for part in (path, frag):
encoded = (encoded or encoded_char_regex.search(part))
return encoded | def encoded_char_in_uri(uri):
'\n Determine if path or frag of URI contain any percent-encoded characters\n\n :param uri: URI to check\n :return: True if encoded chars found in path or frag, False otherwise\n '
(path, query, frag) = split_path(uri)
encoded = False
for part in (path, frag):
encoded = (encoded or encoded_char_regex.search(part))
return encoded<|docstring|>Determine if path or frag of URI contain any percent-encoded characters
:param uri: URI to check
:return: True if encoded chars found in path or frag, False otherwise<|endoftext|> |
e081bf28ac429488fb97c7c57c732c6e7b87d1cb9eadc564d3426b59b421075b | def test_uri(sut: SystemUnderTest, uri, response):
'Perform tests on the URI format and encoding.'
safe = safe_uri(uri)
result = (Result.PASS if safe else Result.FAIL)
msg = ('Test passed' if safe else 'URI contains one or more unsafe chars')
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_SAFE_CHARS, msg)
encoded = encoded_char_in_uri(uri)
result = (Result.PASS if (not encoded) else Result.FAIL)
msg = ('Test passed' if (not encoded) else 'URI contains one or more percent-encoded chars')
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_NO_ENCODED_CHARS, msg)
(result, msg) = check_relative_ref(uri)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_RELATIVE_REFS, msg) | Perform tests on the URI format and encoding. | assertions/protocol_details.py | test_uri | DMTF/Redfish-Protocol-Validator | 2 | python | def test_uri(sut: SystemUnderTest, uri, response):
safe = safe_uri(uri)
result = (Result.PASS if safe else Result.FAIL)
msg = ('Test passed' if safe else 'URI contains one or more unsafe chars')
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_SAFE_CHARS, msg)
encoded = encoded_char_in_uri(uri)
result = (Result.PASS if (not encoded) else Result.FAIL)
msg = ('Test passed' if (not encoded) else 'URI contains one or more percent-encoded chars')
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_NO_ENCODED_CHARS, msg)
(result, msg) = check_relative_ref(uri)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_RELATIVE_REFS, msg) | def test_uri(sut: SystemUnderTest, uri, response):
safe = safe_uri(uri)
result = (Result.PASS if safe else Result.FAIL)
msg = ('Test passed' if safe else 'URI contains one or more unsafe chars')
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_SAFE_CHARS, msg)
encoded = encoded_char_in_uri(uri)
result = (Result.PASS if (not encoded) else Result.FAIL)
msg = ('Test passed' if (not encoded) else 'URI contains one or more percent-encoded chars')
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_NO_ENCODED_CHARS, msg)
(result, msg) = check_relative_ref(uri)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_URI_RELATIVE_REFS, msg)<|docstring|>Perform tests on the URI format and encoding.<|endoftext|> |
27299d3aace663a1f9d11abb9b29ddd498d5286d8785616935f091377edd22c1 | def test_http_supported_methods(sut: SystemUnderTest):
'Perform tests on the supported HTTP methods.'
for method in ['GET', 'POST', 'PATCH', 'DELETE']:
responses = sut.get_responses_by_method(method)
if (not responses):
sut.log(Result.NOT_TESTED, method, '', '', Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('%s not tested' % method))
continue
passed = False
for (uri, response) in responses.items():
if (200 <= response.status_code < 300):
sut.log(Result.PASS, method, response.status_code, '', Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('%s supported' % method))
passed = True
break
if (not passed):
sut.log(Result.FAIL, method, '', '', Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('No %s requests had a successful response' % method)) | Perform tests on the supported HTTP methods. | assertions/protocol_details.py | test_http_supported_methods | DMTF/Redfish-Protocol-Validator | 2 | python | def test_http_supported_methods(sut: SystemUnderTest):
for method in ['GET', 'POST', 'PATCH', 'DELETE']:
responses = sut.get_responses_by_method(method)
if (not responses):
sut.log(Result.NOT_TESTED, method, , , Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('%s not tested' % method))
continue
passed = False
for (uri, response) in responses.items():
if (200 <= response.status_code < 300):
sut.log(Result.PASS, method, response.status_code, , Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('%s supported' % method))
passed = True
break
if (not passed):
sut.log(Result.FAIL, method, , , Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('No %s requests had a successful response' % method)) | def test_http_supported_methods(sut: SystemUnderTest):
for method in ['GET', 'POST', 'PATCH', 'DELETE']:
responses = sut.get_responses_by_method(method)
if (not responses):
sut.log(Result.NOT_TESTED, method, , , Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('%s not tested' % method))
continue
passed = False
for (uri, response) in responses.items():
if (200 <= response.status_code < 300):
sut.log(Result.PASS, method, response.status_code, , Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('%s supported' % method))
passed = True
break
if (not passed):
sut.log(Result.FAIL, method, , , Assertion.PROTO_HTTP_SUPPORTED_METHODS, ('No %s requests had a successful response' % method))<|docstring|>Perform tests on the supported HTTP methods.<|endoftext|> |
c12ba9571c086678acf2f5ee2f8b39bd003830bd95c580ad3d4a04934493e8bd | def test_http_unsupported_methods(sut: SystemUnderTest):
'Perform tests on unsupported HTTP methods.'
uri = '/redfish/v1/'
response = sut.get_response('DELETE', uri)
if (response is None):
sut.log(Result.NOT_TESTED, 'DELETE', '', uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, 'No response found for TRACE method request')
elif ((response.status_code == requests.codes.METHOD_NOT_ALLOWED) or (response.status_code == requests.codes.NOT_IMPLEMENTED)):
sut.log(Result.PASS, 'DELETE', response.status_code, uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, 'Test passed')
else:
sut.log(Result.FAIL, 'DELETE', response.status_code, uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, ('DELETE method returned status %s; expected status %s' % (response.status_code, requests.codes.METHOD_NOT_ALLOWED))) | Perform tests on unsupported HTTP methods. | assertions/protocol_details.py | test_http_unsupported_methods | DMTF/Redfish-Protocol-Validator | 2 | python | def test_http_unsupported_methods(sut: SystemUnderTest):
uri = '/redfish/v1/'
response = sut.get_response('DELETE', uri)
if (response is None):
sut.log(Result.NOT_TESTED, 'DELETE', , uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, 'No response found for TRACE method request')
elif ((response.status_code == requests.codes.METHOD_NOT_ALLOWED) or (response.status_code == requests.codes.NOT_IMPLEMENTED)):
sut.log(Result.PASS, 'DELETE', response.status_code, uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, 'Test passed')
else:
sut.log(Result.FAIL, 'DELETE', response.status_code, uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, ('DELETE method returned status %s; expected status %s' % (response.status_code, requests.codes.METHOD_NOT_ALLOWED))) | def test_http_unsupported_methods(sut: SystemUnderTest):
uri = '/redfish/v1/'
response = sut.get_response('DELETE', uri)
if (response is None):
sut.log(Result.NOT_TESTED, 'DELETE', , uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, 'No response found for TRACE method request')
elif ((response.status_code == requests.codes.METHOD_NOT_ALLOWED) or (response.status_code == requests.codes.NOT_IMPLEMENTED)):
sut.log(Result.PASS, 'DELETE', response.status_code, uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, 'Test passed')
else:
sut.log(Result.FAIL, 'DELETE', response.status_code, uri, Assertion.PROTO_HTTP_UNSUPPORTED_METHODS, ('DELETE method returned status %s; expected status %s' % (response.status_code, requests.codes.METHOD_NOT_ALLOWED)))<|docstring|>Perform tests on unsupported HTTP methods.<|endoftext|> |
eca42f0d2fabb719083f0b245c2da9fadfbcf5cbf7eb64a1aefa603881827b38 | def test_media_types(sut: SystemUnderTest, uri, response):
'Perform tests of the supported media types.'
if ((uri != '/redfish/v1/$metadata') and (response.request.method != 'HEAD') and (response.status_code in [requests.codes.OK, requests.codes.CREATED])):
if ((response.status_code == requests.codes.CREATED) and (response.request.method == 'POST') and (len(response.text) == 0)):
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ALL_RESOURCES, 'No response body')
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_RFC, 'No response body')
else:
(result, msg) = response_content_type_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ALL_RESOURCES, msg)
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_RFC, msg)
if response.request.body:
if (response.status_code in [requests.codes.OK, requests.codes.CREATED, requests.codes.NOT_ACCEPTABLE, requests.codes.UNSUPPORTED_MEDIA_TYPE]):
if ((response.status_code == requests.codes.CREATED) and (response.request.method == 'POST') and (len(response.text) == 0)):
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ACCEPTED, 'No response body')
else:
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ACCEPTED, msg) | Perform tests of the supported media types. | assertions/protocol_details.py | test_media_types | DMTF/Redfish-Protocol-Validator | 2 | python | def test_media_types(sut: SystemUnderTest, uri, response):
if ((uri != '/redfish/v1/$metadata') and (response.request.method != 'HEAD') and (response.status_code in [requests.codes.OK, requests.codes.CREATED])):
if ((response.status_code == requests.codes.CREATED) and (response.request.method == 'POST') and (len(response.text) == 0)):
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ALL_RESOURCES, 'No response body')
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_RFC, 'No response body')
else:
(result, msg) = response_content_type_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ALL_RESOURCES, msg)
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_RFC, msg)
if response.request.body:
if (response.status_code in [requests.codes.OK, requests.codes.CREATED, requests.codes.NOT_ACCEPTABLE, requests.codes.UNSUPPORTED_MEDIA_TYPE]):
if ((response.status_code == requests.codes.CREATED) and (response.request.method == 'POST') and (len(response.text) == 0)):
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ACCEPTED, 'No response body')
else:
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ACCEPTED, msg) | def test_media_types(sut: SystemUnderTest, uri, response):
if ((uri != '/redfish/v1/$metadata') and (response.request.method != 'HEAD') and (response.status_code in [requests.codes.OK, requests.codes.CREATED])):
if ((response.status_code == requests.codes.CREATED) and (response.request.method == 'POST') and (len(response.text) == 0)):
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ALL_RESOURCES, 'No response body')
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_RFC, 'No response body')
else:
(result, msg) = response_content_type_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ALL_RESOURCES, msg)
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_RFC, msg)
if response.request.body:
if (response.status_code in [requests.codes.OK, requests.codes.CREATED, requests.codes.NOT_ACCEPTABLE, requests.codes.UNSUPPORTED_MEDIA_TYPE]):
if ((response.status_code == requests.codes.CREATED) and (response.request.method == 'POST') and (len(response.text) == 0)):
sut.log(Result.NOT_TESTED, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ACCEPTED, 'No response body')
else:
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_JSON_ACCEPTED, msg)<|docstring|>Perform tests of the supported media types.<|endoftext|> |
ea44ffc498cf72fe8e02d1dc8b8ff854439e8f7ffe6e200bd9ffa00f9b098167 | def test_valid_etag(sut: SystemUnderTest, uri, response):
'Perform tests for RFC7232 ETag support.'
if ((response.request.method != 'HEAD') and (response.status_code in [requests.codes.OK, requests.codes.CREATED])):
etag = response.headers.get('ETag')
source = 'header'
if ((etag is None) and (utils.get_response_media_type(response) == 'application/json')):
data = response.json()
if ('@odata.etag' in data):
source = 'property'
etag = data.get('@odata.etag')
if (etag is not None):
if check_etag_valid(etag):
sut.log(Result.PASS, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_RFC7232, 'Test passed')
else:
msg = ('Response from %s request to URI %s returned invalid ETag %s value %s' % (response.request.method, uri, source, etag))
sut.log(Result.FAIL, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_RFC7232, msg) | Perform tests for RFC7232 ETag support. | assertions/protocol_details.py | test_valid_etag | DMTF/Redfish-Protocol-Validator | 2 | python | def test_valid_etag(sut: SystemUnderTest, uri, response):
if ((response.request.method != 'HEAD') and (response.status_code in [requests.codes.OK, requests.codes.CREATED])):
etag = response.headers.get('ETag')
source = 'header'
if ((etag is None) and (utils.get_response_media_type(response) == 'application/json')):
data = response.json()
if ('@odata.etag' in data):
source = 'property'
etag = data.get('@odata.etag')
if (etag is not None):
if check_etag_valid(etag):
sut.log(Result.PASS, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_RFC7232, 'Test passed')
else:
msg = ('Response from %s request to URI %s returned invalid ETag %s value %s' % (response.request.method, uri, source, etag))
sut.log(Result.FAIL, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_RFC7232, msg) | def test_valid_etag(sut: SystemUnderTest, uri, response):
if ((response.request.method != 'HEAD') and (response.status_code in [requests.codes.OK, requests.codes.CREATED])):
etag = response.headers.get('ETag')
source = 'header'
if ((etag is None) and (utils.get_response_media_type(response) == 'application/json')):
data = response.json()
if ('@odata.etag' in data):
source = 'property'
etag = data.get('@odata.etag')
if (etag is not None):
if check_etag_valid(etag):
sut.log(Result.PASS, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_RFC7232, 'Test passed')
else:
msg = ('Response from %s request to URI %s returned invalid ETag %s value %s' % (response.request.method, uri, source, etag))
sut.log(Result.FAIL, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_RFC7232, msg)<|docstring|>Perform tests for RFC7232 ETag support.<|endoftext|> |
dfc7a7c52bcbc5c42996b0947c2657dcde7cfb28c0021c2e393c598b45301f4e | def test_account_etags(sut: SystemUnderTest):
'Perform tests for ETag support on ManagerAccount GET.'
responses = sut.get_responses_by_method('GET', resource_type=ResourceType.MANAGER_ACCOUNT)
for (uri, response) in responses.items():
if (response.status_code == requests.codes.OK):
(result, msg) = check_etag_present(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_ON_GET_ACCOUNT, msg) | Perform tests for ETag support on ManagerAccount GET. | assertions/protocol_details.py | test_account_etags | DMTF/Redfish-Protocol-Validator | 2 | python | def test_account_etags(sut: SystemUnderTest):
responses = sut.get_responses_by_method('GET', resource_type=ResourceType.MANAGER_ACCOUNT)
for (uri, response) in responses.items():
if (response.status_code == requests.codes.OK):
(result, msg) = check_etag_present(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_ON_GET_ACCOUNT, msg) | def test_account_etags(sut: SystemUnderTest):
responses = sut.get_responses_by_method('GET', resource_type=ResourceType.MANAGER_ACCOUNT)
for (uri, response) in responses.items():
if (response.status_code == requests.codes.OK):
(result, msg) = check_etag_present(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_ETAG_ON_GET_ACCOUNT, msg)<|docstring|>Perform tests for ETag support on ManagerAccount GET.<|endoftext|> |
496d72cf310e946bee5b3d02a22bcc6ee9d07ce65745b4e4ccaa9d1876c32675 | def test_standard_uris(sut: SystemUnderTest, uri, response):
'Perform tests on the standard, spec-defined URIs.'
if (response.request.method == 'GET'):
if (uri == '/redfish/v1/'):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_SERVICE_ROOT, msg)
if (uri == '/redfish'):
(result, msg) = check_slash_redfish(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_VERSION, msg)
if (uri in ['/redfish', '/redfish/v1/', '/redfish/v1/odata']):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URIS_SUPPORTED, msg)
if (uri == '/redfish/v1/$metadata'):
(result, msg) = response_is_xml(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URIS_SUPPORTED, msg)
if (uri == '/redfish/v1'):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_SERVICE_ROOT_REDIRECT, msg) | Perform tests on the standard, spec-defined URIs. | assertions/protocol_details.py | test_standard_uris | DMTF/Redfish-Protocol-Validator | 2 | python | def test_standard_uris(sut: SystemUnderTest, uri, response):
if (response.request.method == 'GET'):
if (uri == '/redfish/v1/'):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_SERVICE_ROOT, msg)
if (uri == '/redfish'):
(result, msg) = check_slash_redfish(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_VERSION, msg)
if (uri in ['/redfish', '/redfish/v1/', '/redfish/v1/odata']):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URIS_SUPPORTED, msg)
if (uri == '/redfish/v1/$metadata'):
(result, msg) = response_is_xml(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URIS_SUPPORTED, msg)
if (uri == '/redfish/v1'):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_SERVICE_ROOT_REDIRECT, msg) | def test_standard_uris(sut: SystemUnderTest, uri, response):
if (response.request.method == 'GET'):
if (uri == '/redfish/v1/'):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_SERVICE_ROOT, msg)
if (uri == '/redfish'):
(result, msg) = check_slash_redfish(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_VERSION, msg)
if (uri in ['/redfish', '/redfish/v1/', '/redfish/v1/odata']):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URIS_SUPPORTED, msg)
if (uri == '/redfish/v1/$metadata'):
(result, msg) = response_is_xml(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URIS_SUPPORTED, msg)
if (uri == '/redfish/v1'):
(result, msg) = response_is_json(uri, response)
sut.log(result, response.request.method, response.status_code, uri, Assertion.PROTO_STD_URI_SERVICE_ROOT_REDIRECT, msg)<|docstring|>Perform tests on the standard, spec-defined URIs.<|endoftext|> |
c559c8f1faaf5b10adc077e701ca6b05ffc3d7756e003df00d08b669facf811e | def test_protocol_details(sut: SystemUnderTest):
"Perform tests from the 'Protocol details' section of the spec."
for (uri, response) in sut.get_all_responses():
test_uri(sut, uri, response)
test_media_types(sut, uri, response)
test_valid_etag(sut, uri, response)
test_standard_uris(sut, uri, response)
test_http_supported_methods(sut)
test_http_unsupported_methods(sut)
test_account_etags(sut) | Perform tests from the 'Protocol details' section of the spec. | assertions/protocol_details.py | test_protocol_details | DMTF/Redfish-Protocol-Validator | 2 | python | def test_protocol_details(sut: SystemUnderTest):
for (uri, response) in sut.get_all_responses():
test_uri(sut, uri, response)
test_media_types(sut, uri, response)
test_valid_etag(sut, uri, response)
test_standard_uris(sut, uri, response)
test_http_supported_methods(sut)
test_http_unsupported_methods(sut)
test_account_etags(sut) | def test_protocol_details(sut: SystemUnderTest):
for (uri, response) in sut.get_all_responses():
test_uri(sut, uri, response)
test_media_types(sut, uri, response)
test_valid_etag(sut, uri, response)
test_standard_uris(sut, uri, response)
test_http_supported_methods(sut)
test_http_unsupported_methods(sut)
test_account_etags(sut)<|docstring|>Perform tests from the 'Protocol details' section of the spec.<|endoftext|> |
3cfe6f64c90757b969fcc67075f7648367da507e27dcbeb8fccdd1edb721a61e | def MakeBackup(infile, report=False):
'Make backup of files or directories by checking if file (or backup as _*) is there and rename'
if (os.path.isfile(infile) or os.path.isdir(infile)):
i = 1
while (i < 100):
if (os.path.isfile(((infile + '_') + str(i))) or os.path.isdir(((infile + '_') + str(i)))):
i = (i + 1)
else:
os.rename(infile, ((infile + '_') + str(i)))
break
if (report == True):
return infile | Make backup of files or directories by checking if file (or backup as _*) is there and rename | 3d-dart/server/system/Utils.py | MakeBackup | LiYuzhu12138/3D-DART-server | 2 | python | def MakeBackup(infile, report=False):
if (os.path.isfile(infile) or os.path.isdir(infile)):
i = 1
while (i < 100):
if (os.path.isfile(((infile + '_') + str(i))) or os.path.isdir(((infile + '_') + str(i)))):
i = (i + 1)
else:
os.rename(infile, ((infile + '_') + str(i)))
break
if (report == True):
return infile | def MakeBackup(infile, report=False):
if (os.path.isfile(infile) or os.path.isdir(infile)):
i = 1
while (i < 100):
if (os.path.isfile(((infile + '_') + str(i))) or os.path.isdir(((infile + '_') + str(i)))):
i = (i + 1)
else:
os.rename(infile, ((infile + '_') + str(i)))
break
if (report == True):
return infile<|docstring|>Make backup of files or directories by checking if file (or backup as _*) is there and rename<|endoftext|> |
e85c0952cc1f5f0170eb08a355ef7ee65f403eece679b5f8b1ecddf17e0e4bee | def FileRootRename(infile, extension, basename):
'Rename a file but preserve extension'
outfile = (basename + extension)
os.rename(infile, outfile) | Rename a file but preserve extension | 3d-dart/server/system/Utils.py | FileRootRename | LiYuzhu12138/3D-DART-server | 2 | python | def FileRootRename(infile, extension, basename):
outfile = (basename + extension)
os.rename(infile, outfile) | def FileRootRename(infile, extension, basename):
outfile = (basename + extension)
os.rename(infile, outfile)<|docstring|>Rename a file but preserve extension<|endoftext|> |
ac37f15bbe7ef7e4811a999ff1eea57045e626e1615a05d6ac520bfbd1d7efb1 | def TransformDash(n):
'Transform dashes often found in 3DNA tables to floats'
if ((n == '---') or (n == '----')):
return float(0)
else:
return float(n) | Transform dashes often found in 3DNA tables to floats | 3d-dart/server/system/Utils.py | TransformDash | LiYuzhu12138/3D-DART-server | 2 | python | def TransformDash(n):
if ((n == '---') or (n == '----')):
return float(0)
else:
return float(n) | def TransformDash(n):
if ((n == '---') or (n == '----')):
return float(0)
else:
return float(n)<|docstring|>Transform dashes often found in 3DNA tables to floats<|endoftext|> |
179fc01bbaa0d41db7f7714b7fe19c25a05bfb512d3df684d66d6a8b3d60aff4 | def GetFullPath(inputfile):
'Return the full path of the file(s)'
try:
filelist = []
for files in inputfile:
filelist.append(os.path.abspath(files))
return filelist
except:
return os.path.abspath(files) | Return the full path of the file(s) | 3d-dart/server/system/Utils.py | GetFullPath | LiYuzhu12138/3D-DART-server | 2 | python | def GetFullPath(inputfile):
try:
filelist = []
for files in inputfile:
filelist.append(os.path.abspath(files))
return filelist
except:
return os.path.abspath(files) | def GetFullPath(inputfile):
try:
filelist = []
for files in inputfile:
filelist.append(os.path.abspath(files))
return filelist
except:
return os.path.abspath(files)<|docstring|>Return the full path of the file(s)<|endoftext|> |
87e6361bf3e91b3f93bd0cd6a76292cd14d55f2fe6b893aa068db6961d6ebfb4 | def RenameFilepath(inputfile, path=None, basename=None, extension=None):
'Rename a filepath in several ways: path, basename and/or extension'
orpath = os.path.dirname(inputfile)
orbasename = os.path.splitext(os.path.basename(inputfile))[0]
orextension = os.path.splitext(os.path.basename(inputfile))[1]
newfile = ''
if (path == None):
newfile = (orpath + '/')
else:
newfile = (path + '/')
if (basename == None):
newfile = (newfile + orbasename)
else:
newfile = (newfile + basename)
if (extension == None):
newfile = (newfile + orextension)
else:
newfile = (newfile + extension)
return newfile | Rename a filepath in several ways: path, basename and/or extension | 3d-dart/server/system/Utils.py | RenameFilepath | LiYuzhu12138/3D-DART-server | 2 | python | def RenameFilepath(inputfile, path=None, basename=None, extension=None):
orpath = os.path.dirname(inputfile)
orbasename = os.path.splitext(os.path.basename(inputfile))[0]
orextension = os.path.splitext(os.path.basename(inputfile))[1]
newfile =
if (path == None):
newfile = (orpath + '/')
else:
newfile = (path + '/')
if (basename == None):
newfile = (newfile + orbasename)
else:
newfile = (newfile + basename)
if (extension == None):
newfile = (newfile + orextension)
else:
newfile = (newfile + extension)
return newfile | def RenameFilepath(inputfile, path=None, basename=None, extension=None):
orpath = os.path.dirname(inputfile)
orbasename = os.path.splitext(os.path.basename(inputfile))[0]
orextension = os.path.splitext(os.path.basename(inputfile))[1]
newfile =
if (path == None):
newfile = (orpath + '/')
else:
newfile = (path + '/')
if (basename == None):
newfile = (newfile + orbasename)
else:
newfile = (newfile + basename)
if (extension == None):
newfile = (newfile + orextension)
else:
newfile = (newfile + extension)
return newfile<|docstring|>Rename a filepath in several ways: path, basename and/or extension<|endoftext|> |
d81534d4532d671b76a2fee51c32439c29de8600d0ede300610cc9d89abb7c49 | def blockname(ch):
"Return the Unicode block name for ch, or None if ch has no block.\n\n >>> blockname(u'a')\n 'Basic Latin'\n >>> blockname(unichr(0x0b80))\n 'Tamil'\n >>> block(unichr(2048))\n None\n "
assert (isinstance(ch, text_type) and (len(ch) == 1)), repr(ch)
cp = ord(ch)
i = (bisect_right(_starts, cp) - 1)
end = _ends[i]
if (cp > end):
return None
return _names[i] | Return the Unicode block name for ch, or None if ch has no block.
>>> blockname(u'a')
'Basic Latin'
>>> blockname(unichr(0x0b80))
'Tamil'
>>> block(unichr(2048))
None | oscar/lib/python3.6/site-packages/whoosh/support/unicode.py | blockname | Razvancbalaci/djangooscar | 319 | python | def blockname(ch):
"Return the Unicode block name for ch, or None if ch has no block.\n\n >>> blockname(u'a')\n 'Basic Latin'\n >>> blockname(unichr(0x0b80))\n 'Tamil'\n >>> block(unichr(2048))\n None\n "
assert (isinstance(ch, text_type) and (len(ch) == 1)), repr(ch)
cp = ord(ch)
i = (bisect_right(_starts, cp) - 1)
end = _ends[i]
if (cp > end):
return None
return _names[i] | def blockname(ch):
"Return the Unicode block name for ch, or None if ch has no block.\n\n >>> blockname(u'a')\n 'Basic Latin'\n >>> blockname(unichr(0x0b80))\n 'Tamil'\n >>> block(unichr(2048))\n None\n "
assert (isinstance(ch, text_type) and (len(ch) == 1)), repr(ch)
cp = ord(ch)
i = (bisect_right(_starts, cp) - 1)
end = _ends[i]
if (cp > end):
return None
return _names[i]<|docstring|>Return the Unicode block name for ch, or None if ch has no block.
>>> blockname(u'a')
'Basic Latin'
>>> blockname(unichr(0x0b80))
'Tamil'
>>> block(unichr(2048))
None<|endoftext|> |
2b60f68791222b2f1ddfb88e8c05cb10e3da9647d91cb7224c1bfed35105d352 | def blocknum(ch):
"Returns the unicode block number for ch, or None if ch has no block.\n\n >>> blocknum(u'a')\n 0\n >>> blocknum(unichr(0x0b80))\n 22\n >>> blocknum(unichr(2048))\n None\n "
cp = ord(ch)
i = (bisect_right(_starts, cp) - 1)
end = _ends[i]
if (cp > end):
return None
return i | Returns the unicode block number for ch, or None if ch has no block.
>>> blocknum(u'a')
0
>>> blocknum(unichr(0x0b80))
22
>>> blocknum(unichr(2048))
None | oscar/lib/python3.6/site-packages/whoosh/support/unicode.py | blocknum | Razvancbalaci/djangooscar | 319 | python | def blocknum(ch):
"Returns the unicode block number for ch, or None if ch has no block.\n\n >>> blocknum(u'a')\n 0\n >>> blocknum(unichr(0x0b80))\n 22\n >>> blocknum(unichr(2048))\n None\n "
cp = ord(ch)
i = (bisect_right(_starts, cp) - 1)
end = _ends[i]
if (cp > end):
return None
return i | def blocknum(ch):
"Returns the unicode block number for ch, or None if ch has no block.\n\n >>> blocknum(u'a')\n 0\n >>> blocknum(unichr(0x0b80))\n 22\n >>> blocknum(unichr(2048))\n None\n "
cp = ord(ch)
i = (bisect_right(_starts, cp) - 1)
end = _ends[i]
if (cp > end):
return None
return i<|docstring|>Returns the unicode block number for ch, or None if ch has no block.
>>> blocknum(u'a')
0
>>> blocknum(unichr(0x0b80))
22
>>> blocknum(unichr(2048))
None<|endoftext|> |
2fe80b9c0527bd9f888d897482b19dd764441e367a0ab6206f495f1378cdafb9 | def trivial_basis_extension(basis, U, copy_basis=True, copy_U=True):
'Trivially extend basis by simply appending the new vectors.\n\n We check if the new vectors are already contained in the basis, but we do\n not check for linear independence.\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend.\n U\n |VectorArray| containing the new basis vectors.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n copy_U\n If `copy_U` is `False`, the new basis vectors are removed from `U`.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n Is raised if all vectors in `U` are already contained in the basis.\n '
if (basis is None):
basis = U.empty(reserve=len(U))
old_basis_length = len(basis)
remove = set()
for i in xrange(len(U)):
if np.any(U.almost_equal(basis, ind=i)):
remove.add(i)
new_basis = (basis.copy() if copy_basis else basis)
new_basis.append(U, o_ind=[i for i in range(len(U)) if (i not in remove)], remove_from_other=(not copy_U))
if (len(new_basis) == old_basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True}) | Trivially extend basis by simply appending the new vectors.
We check if the new vectors are already contained in the basis, but we do
not check for linear independence.
Parameters
----------
basis
|VectorArray| containing the basis to extend.
U
|VectorArray| containing the new basis vectors.
copy_basis
If `copy_basis` is `False`, the old basis is extended in-place.
copy_U
If `copy_U` is `False`, the new basis vectors are removed from `U`.
Returns
-------
new_basis
The extended basis.
extension_data
Dict containing the following fields:
:hierarchic: `True` if `new_basis` contains `basis` as its first vectors.
Raises
------
ExtensionError
Is raised if all vectors in `U` are already contained in the basis. | src/pymor/algorithms/basisextension.py | trivial_basis_extension | andreasbuhr/pymor | 0 | python | def trivial_basis_extension(basis, U, copy_basis=True, copy_U=True):
'Trivially extend basis by simply appending the new vectors.\n\n We check if the new vectors are already contained in the basis, but we do\n not check for linear independence.\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend.\n U\n |VectorArray| containing the new basis vectors.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n copy_U\n If `copy_U` is `False`, the new basis vectors are removed from `U`.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n Is raised if all vectors in `U` are already contained in the basis.\n '
if (basis is None):
basis = U.empty(reserve=len(U))
old_basis_length = len(basis)
remove = set()
for i in xrange(len(U)):
if np.any(U.almost_equal(basis, ind=i)):
remove.add(i)
new_basis = (basis.copy() if copy_basis else basis)
new_basis.append(U, o_ind=[i for i in range(len(U)) if (i not in remove)], remove_from_other=(not copy_U))
if (len(new_basis) == old_basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True}) | def trivial_basis_extension(basis, U, copy_basis=True, copy_U=True):
'Trivially extend basis by simply appending the new vectors.\n\n We check if the new vectors are already contained in the basis, but we do\n not check for linear independence.\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend.\n U\n |VectorArray| containing the new basis vectors.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n copy_U\n If `copy_U` is `False`, the new basis vectors are removed from `U`.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n Is raised if all vectors in `U` are already contained in the basis.\n '
if (basis is None):
basis = U.empty(reserve=len(U))
old_basis_length = len(basis)
remove = set()
for i in xrange(len(U)):
if np.any(U.almost_equal(basis, ind=i)):
remove.add(i)
new_basis = (basis.copy() if copy_basis else basis)
new_basis.append(U, o_ind=[i for i in range(len(U)) if (i not in remove)], remove_from_other=(not copy_U))
if (len(new_basis) == old_basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True})<|docstring|>Trivially extend basis by simply appending the new vectors.
We check if the new vectors are already contained in the basis, but we do
not check for linear independence.
Parameters
----------
basis
|VectorArray| containing the basis to extend.
U
|VectorArray| containing the new basis vectors.
copy_basis
If `copy_basis` is `False`, the old basis is extended in-place.
copy_U
If `copy_U` is `False`, the new basis vectors are removed from `U`.
Returns
-------
new_basis
The extended basis.
extension_data
Dict containing the following fields:
:hierarchic: `True` if `new_basis` contains `basis` as its first vectors.
Raises
------
ExtensionError
Is raised if all vectors in `U` are already contained in the basis.<|endoftext|> |
f356fc149def33b1784d029e9bddc6da338570ff5e2d9eb41f6e23397cbacc3f | def gram_schmidt_basis_extension(basis, U, product=None, copy_basis=True, copy_U=True):
'Extend basis using Gram-Schmidt orthonormalization.\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend.\n U\n |VectorArray| containing the new basis vectors.\n product\n The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean\n product is used.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n copy_U\n If `copy_U` is `False`, the new basis vectors are removed from `U`.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n Gram-Schmidt orthonormalization fails. This is the case when no\n vector in `U` is linearly independent from the basis.\n '
if (basis is None):
basis = U.empty(reserve=len(U))
basis_length = len(basis)
new_basis = (basis.copy() if copy_basis else basis)
new_basis.append(U, remove_from_other=(not copy_U))
gram_schmidt(new_basis, offset=basis_length, product=product, copy=False)
if (len(new_basis) <= basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True}) | Extend basis using Gram-Schmidt orthonormalization.
Parameters
----------
basis
|VectorArray| containing the basis to extend.
U
|VectorArray| containing the new basis vectors.
product
The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean
product is used.
copy_basis
If `copy_basis` is `False`, the old basis is extended in-place.
copy_U
If `copy_U` is `False`, the new basis vectors are removed from `U`.
Returns
-------
new_basis
The extended basis.
extension_data
Dict containing the following fields:
:hierarchic: `True` if `new_basis` contains `basis` as its first vectors.
Raises
------
ExtensionError
Gram-Schmidt orthonormalization fails. This is the case when no
vector in `U` is linearly independent from the basis. | src/pymor/algorithms/basisextension.py | gram_schmidt_basis_extension | andreasbuhr/pymor | 0 | python | def gram_schmidt_basis_extension(basis, U, product=None, copy_basis=True, copy_U=True):
'Extend basis using Gram-Schmidt orthonormalization.\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend.\n U\n |VectorArray| containing the new basis vectors.\n product\n The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean\n product is used.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n copy_U\n If `copy_U` is `False`, the new basis vectors are removed from `U`.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n Gram-Schmidt orthonormalization fails. This is the case when no\n vector in `U` is linearly independent from the basis.\n '
if (basis is None):
basis = U.empty(reserve=len(U))
basis_length = len(basis)
new_basis = (basis.copy() if copy_basis else basis)
new_basis.append(U, remove_from_other=(not copy_U))
gram_schmidt(new_basis, offset=basis_length, product=product, copy=False)
if (len(new_basis) <= basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True}) | def gram_schmidt_basis_extension(basis, U, product=None, copy_basis=True, copy_U=True):
'Extend basis using Gram-Schmidt orthonormalization.\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend.\n U\n |VectorArray| containing the new basis vectors.\n product\n The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean\n product is used.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n copy_U\n If `copy_U` is `False`, the new basis vectors are removed from `U`.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n Gram-Schmidt orthonormalization fails. This is the case when no\n vector in `U` is linearly independent from the basis.\n '
if (basis is None):
basis = U.empty(reserve=len(U))
basis_length = len(basis)
new_basis = (basis.copy() if copy_basis else basis)
new_basis.append(U, remove_from_other=(not copy_U))
gram_schmidt(new_basis, offset=basis_length, product=product, copy=False)
if (len(new_basis) <= basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True})<|docstring|>Extend basis using Gram-Schmidt orthonormalization.
Parameters
----------
basis
|VectorArray| containing the basis to extend.
U
|VectorArray| containing the new basis vectors.
product
The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean
product is used.
copy_basis
If `copy_basis` is `False`, the old basis is extended in-place.
copy_U
If `copy_U` is `False`, the new basis vectors are removed from `U`.
Returns
-------
new_basis
The extended basis.
extension_data
Dict containing the following fields:
:hierarchic: `True` if `new_basis` contains `basis` as its first vectors.
Raises
------
ExtensionError
Gram-Schmidt orthonormalization fails. This is the case when no
vector in `U` is linearly independent from the basis.<|endoftext|> |
c226dbdaad82305fb712987f0ada2897bdf3f4c2f36c03fa8b213d6a46262713 | def pod_basis_extension(basis, U, count=1, copy_basis=True, product=None, orthonormalize=True):
'Extend basis with the first `count` POD modes of the projection of `U` onto the\n orthogonal complement of the basis.\n\n Note that the provided basis is assumed to be orthonormal w.r.t. the provided\n scalar product!\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend. The basis is expected to be\n orthonormal w.r.t. `product`.\n U\n |VectorArray| containing the vectors to which the POD is applied.\n count\n Number of POD modes that are to be appended to the basis.\n product\n The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean\n product is used.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n orthonormalize\n If `True`, re-orthonormalize the new basis vectors obtained by the POD\n in order to improve numerical accuracy.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n POD produces no new vectors. This is the case when no vector in `U`\n is linearly independent from the basis.\n '
if (basis is None):
return (pod(U, modes=count, product=product)[0], {'hierarchic': True})
basis_length = len(basis)
new_basis = (basis.copy() if copy_basis else basis)
if (product is None):
U_proj_err = (U - basis.lincomb(U.dot(basis)))
else:
U_proj_err = (U - basis.lincomb(product.apply2(U, basis)))
new_basis.append(pod(U_proj_err, modes=count, product=product, orthonormalize=False)[0])
if orthonormalize:
gram_schmidt(new_basis, offset=len(basis), product=product, copy=False)
if (len(new_basis) <= basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True}) | Extend basis with the first `count` POD modes of the projection of `U` onto the
orthogonal complement of the basis.
Note that the provided basis is assumed to be orthonormal w.r.t. the provided
scalar product!
Parameters
----------
basis
|VectorArray| containing the basis to extend. The basis is expected to be
orthonormal w.r.t. `product`.
U
|VectorArray| containing the vectors to which the POD is applied.
count
Number of POD modes that are to be appended to the basis.
product
The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean
product is used.
copy_basis
If `copy_basis` is `False`, the old basis is extended in-place.
orthonormalize
If `True`, re-orthonormalize the new basis vectors obtained by the POD
in order to improve numerical accuracy.
Returns
-------
new_basis
The extended basis.
extension_data
Dict containing the following fields:
:hierarchic: `True` if `new_basis` contains `basis` as its first vectors.
Raises
------
ExtensionError
POD produces no new vectors. This is the case when no vector in `U`
is linearly independent from the basis. | src/pymor/algorithms/basisextension.py | pod_basis_extension | andreasbuhr/pymor | 0 | python | def pod_basis_extension(basis, U, count=1, copy_basis=True, product=None, orthonormalize=True):
'Extend basis with the first `count` POD modes of the projection of `U` onto the\n orthogonal complement of the basis.\n\n Note that the provided basis is assumed to be orthonormal w.r.t. the provided\n scalar product!\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend. The basis is expected to be\n orthonormal w.r.t. `product`.\n U\n |VectorArray| containing the vectors to which the POD is applied.\n count\n Number of POD modes that are to be appended to the basis.\n product\n The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean\n product is used.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n orthonormalize\n If `True`, re-orthonormalize the new basis vectors obtained by the POD\n in order to improve numerical accuracy.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n POD produces no new vectors. This is the case when no vector in `U`\n is linearly independent from the basis.\n '
if (basis is None):
return (pod(U, modes=count, product=product)[0], {'hierarchic': True})
basis_length = len(basis)
new_basis = (basis.copy() if copy_basis else basis)
if (product is None):
U_proj_err = (U - basis.lincomb(U.dot(basis)))
else:
U_proj_err = (U - basis.lincomb(product.apply2(U, basis)))
new_basis.append(pod(U_proj_err, modes=count, product=product, orthonormalize=False)[0])
if orthonormalize:
gram_schmidt(new_basis, offset=len(basis), product=product, copy=False)
if (len(new_basis) <= basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True}) | def pod_basis_extension(basis, U, count=1, copy_basis=True, product=None, orthonormalize=True):
'Extend basis with the first `count` POD modes of the projection of `U` onto the\n orthogonal complement of the basis.\n\n Note that the provided basis is assumed to be orthonormal w.r.t. the provided\n scalar product!\n\n Parameters\n ----------\n basis\n |VectorArray| containing the basis to extend. The basis is expected to be\n orthonormal w.r.t. `product`.\n U\n |VectorArray| containing the vectors to which the POD is applied.\n count\n Number of POD modes that are to be appended to the basis.\n product\n The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean\n product is used.\n copy_basis\n If `copy_basis` is `False`, the old basis is extended in-place.\n orthonormalize\n If `True`, re-orthonormalize the new basis vectors obtained by the POD\n in order to improve numerical accuracy.\n\n Returns\n -------\n new_basis\n The extended basis.\n extension_data\n Dict containing the following fields:\n\n :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.\n\n Raises\n ------\n ExtensionError\n POD produces no new vectors. This is the case when no vector in `U`\n is linearly independent from the basis.\n '
if (basis is None):
return (pod(U, modes=count, product=product)[0], {'hierarchic': True})
basis_length = len(basis)
new_basis = (basis.copy() if copy_basis else basis)
if (product is None):
U_proj_err = (U - basis.lincomb(U.dot(basis)))
else:
U_proj_err = (U - basis.lincomb(product.apply2(U, basis)))
new_basis.append(pod(U_proj_err, modes=count, product=product, orthonormalize=False)[0])
if orthonormalize:
gram_schmidt(new_basis, offset=len(basis), product=product, copy=False)
if (len(new_basis) <= basis_length):
raise ExtensionError
return (new_basis, {'hierarchic': True})<|docstring|>Extend basis with the first `count` POD modes of the projection of `U` onto the
orthogonal complement of the basis.
Note that the provided basis is assumed to be orthonormal w.r.t. the provided
scalar product!
Parameters
----------
basis
|VectorArray| containing the basis to extend. The basis is expected to be
orthonormal w.r.t. `product`.
U
|VectorArray| containing the vectors to which the POD is applied.
count
Number of POD modes that are to be appended to the basis.
product
The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean
product is used.
copy_basis
If `copy_basis` is `False`, the old basis is extended in-place.
orthonormalize
If `True`, re-orthonormalize the new basis vectors obtained by the POD
in order to improve numerical accuracy.
Returns
-------
new_basis
The extended basis.
extension_data
Dict containing the following fields:
:hierarchic: `True` if `new_basis` contains `basis` as its first vectors.
Raises
------
ExtensionError
POD produces no new vectors. This is the case when no vector in `U`
is linearly independent from the basis.<|endoftext|> |
3c8e423d3568f295e57f5dc588c4329933ad8db0f5c3f950671a73d63758d181 | def _get_data(self, url, config, send_sc=True):
' Hit a given URL and return the parsed json\n '
try:
resp = requests.get(url, timeout=config.timeout, headers=headers(self.agentConfig))
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.CRITICAL, message='Error {0} when hitting {1}'.format(e, url), tags=config.service_check_tags)
raise
return resp.json() | Hit a given URL and return the parsed json | checks.d/phabricator.py | _get_data | cclauss/datadog-checks | 100 | python | def _get_data(self, url, config, send_sc=True):
' \n '
try:
resp = requests.get(url, timeout=config.timeout, headers=headers(self.agentConfig))
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.CRITICAL, message='Error {0} when hitting {1}'.format(e, url), tags=config.service_check_tags)
raise
return resp.json() | def _get_data(self, url, config, send_sc=True):
' \n '
try:
resp = requests.get(url, timeout=config.timeout, headers=headers(self.agentConfig))
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.CRITICAL, message='Error {0} when hitting {1}'.format(e, url), tags=config.service_check_tags)
raise
return resp.json()<|docstring|>Hit a given URL and return the parsed json<|endoftext|> |
ba79c7fc62dbec13c2205106b25169656d4e904ae09aa7e86f04e65adb422d23 | def has_object_permission(self, request, view, obj):
'Check user is trying to edit their own profile'
if (request.method in permissions.SAFE_METHODS):
return True
return (obj.id == request.user.id) | Check user is trying to edit their own profile | profiles_api/permissions.py | has_object_permission | EmanAly16/profiles-rest-api | 0 | python | def has_object_permission(self, request, view, obj):
if (request.method in permissions.SAFE_METHODS):
return True
return (obj.id == request.user.id) | def has_object_permission(self, request, view, obj):
if (request.method in permissions.SAFE_METHODS):
return True
return (obj.id == request.user.id)<|docstring|>Check user is trying to edit their own profile<|endoftext|> |
0bc2cc5603180a52df9d82e07d6ab69916821222d0d03e53dc7421b2db627cc8 | def __init__(self, symbol, api_key, time_range='10y', interval='1d', region='US'):
'\n Initialize attributes.\n '
self.symbol = symbol.upper()
self._json_resp_chart = yhf.get_chart(interval=interval, symbol=self.symbol, time_range=time_range, region=region, include_pre_post='false', use_yahoo_id='true', include_adj_close='true', events='div,split', api_key=api_key)
if (('events' in self._json_resp_chart['chart']['result'][0]) and ('dividends' in self._json_resp_chart['chart']['result'][0]['events'])):
self._div_data = list(self._json_resp_chart['chart']['result'][0]['events']['dividends'].values())
self.div_df = pd.DataFrame.from_records(self._div_data).rename(columns={'amount': 'div_amount'})
self.div_df['date'] = pd.to_datetime(self.div_df['date'], unit='s').dt.date
self.div_df['div_growth'] = self.div_df['div_amount'].diff()
self.div_df = self.div_df[['date', 'div_amount', 'div_growth']]
self.div_df = self.div_df.fillna(0)
self._hist_data = self._json_resp_chart['chart']['result'][0]['indicators']['quote'][0]
self.hist_df = pd.DataFrame.from_dict(self._hist_data)
self.hist_df['date'] = self._json_resp_chart['chart']['result'][0]['timestamp']
self.hist_df['date'] = pd.to_datetime(self.hist_df['date'], unit='s').dt.date
self.hist_df = self.hist_df[['date', 'volume', 'open', 'low', 'high', 'close']]
self.candlestick = go.Figure(data=[go.Candlestick(x=self.hist_df['date'], open=self.hist_df['open'], low=self.hist_df['low'], high=self.hist_df['high'], close=self.hist_df['close'])])
self.candlestick.update_layout(title=self.symbol, yaxis_title='Stock Price') | Initialize attributes. | src/wxve/stock.py | __init__ | AG3NTZ3R0/wxve | 0 | python | def __init__(self, symbol, api_key, time_range='10y', interval='1d', region='US'):
'\n \n '
self.symbol = symbol.upper()
self._json_resp_chart = yhf.get_chart(interval=interval, symbol=self.symbol, time_range=time_range, region=region, include_pre_post='false', use_yahoo_id='true', include_adj_close='true', events='div,split', api_key=api_key)
if (('events' in self._json_resp_chart['chart']['result'][0]) and ('dividends' in self._json_resp_chart['chart']['result'][0]['events'])):
self._div_data = list(self._json_resp_chart['chart']['result'][0]['events']['dividends'].values())
self.div_df = pd.DataFrame.from_records(self._div_data).rename(columns={'amount': 'div_amount'})
self.div_df['date'] = pd.to_datetime(self.div_df['date'], unit='s').dt.date
self.div_df['div_growth'] = self.div_df['div_amount'].diff()
self.div_df = self.div_df[['date', 'div_amount', 'div_growth']]
self.div_df = self.div_df.fillna(0)
self._hist_data = self._json_resp_chart['chart']['result'][0]['indicators']['quote'][0]
self.hist_df = pd.DataFrame.from_dict(self._hist_data)
self.hist_df['date'] = self._json_resp_chart['chart']['result'][0]['timestamp']
self.hist_df['date'] = pd.to_datetime(self.hist_df['date'], unit='s').dt.date
self.hist_df = self.hist_df[['date', 'volume', 'open', 'low', 'high', 'close']]
self.candlestick = go.Figure(data=[go.Candlestick(x=self.hist_df['date'], open=self.hist_df['open'], low=self.hist_df['low'], high=self.hist_df['high'], close=self.hist_df['close'])])
self.candlestick.update_layout(title=self.symbol, yaxis_title='Stock Price') | def __init__(self, symbol, api_key, time_range='10y', interval='1d', region='US'):
'\n \n '
self.symbol = symbol.upper()
self._json_resp_chart = yhf.get_chart(interval=interval, symbol=self.symbol, time_range=time_range, region=region, include_pre_post='false', use_yahoo_id='true', include_adj_close='true', events='div,split', api_key=api_key)
if (('events' in self._json_resp_chart['chart']['result'][0]) and ('dividends' in self._json_resp_chart['chart']['result'][0]['events'])):
self._div_data = list(self._json_resp_chart['chart']['result'][0]['events']['dividends'].values())
self.div_df = pd.DataFrame.from_records(self._div_data).rename(columns={'amount': 'div_amount'})
self.div_df['date'] = pd.to_datetime(self.div_df['date'], unit='s').dt.date
self.div_df['div_growth'] = self.div_df['div_amount'].diff()
self.div_df = self.div_df[['date', 'div_amount', 'div_growth']]
self.div_df = self.div_df.fillna(0)
self._hist_data = self._json_resp_chart['chart']['result'][0]['indicators']['quote'][0]
self.hist_df = pd.DataFrame.from_dict(self._hist_data)
self.hist_df['date'] = self._json_resp_chart['chart']['result'][0]['timestamp']
self.hist_df['date'] = pd.to_datetime(self.hist_df['date'], unit='s').dt.date
self.hist_df = self.hist_df[['date', 'volume', 'open', 'low', 'high', 'close']]
self.candlestick = go.Figure(data=[go.Candlestick(x=self.hist_df['date'], open=self.hist_df['open'], low=self.hist_df['low'], high=self.hist_df['high'], close=self.hist_df['close'])])
self.candlestick.update_layout(title=self.symbol, yaxis_title='Stock Price')<|docstring|>Initialize attributes.<|endoftext|> |
3f5f7d915e877179a1a281175d43713a0fe1ef7e3158c14dc9294b2d87f8be65 | def __init__(self, dist_frame: pd.DataFrame):
'\n This class solves the traveling salesman problem with the 2-opt algorithm. As input, the distance matrix must\n be given in a pandas dataframe.\n\n :param dist_frame: dataframe\n Dataframe containing the distance matrix for all locations\n '
self.dist_frame = dist_frame
self.num = (len(dist_frame) + 1)
self.init = None
self.set_init()
self.sequence = []
self.sequence_dists = []
self.dist = 0
self.iterated_dists = []
self.iterated_sequences = []
self.best_iterated_sequences = []
self.best_iterated_dist = [] | This class solves the traveling salesman problem with the 2-opt algorithm. As input, the distance matrix must
be given in a pandas dataframe.
:param dist_frame: dataframe
Dataframe containing the distance matrix for all locations | solve_coding_challenge/utils/tsp.py | __init__ | hedrich9000/Coding_Challenge_msg | 1 | python | def __init__(self, dist_frame: pd.DataFrame):
'\n This class solves the traveling salesman problem with the 2-opt algorithm. As input, the distance matrix must\n be given in a pandas dataframe.\n\n :param dist_frame: dataframe\n Dataframe containing the distance matrix for all locations\n '
self.dist_frame = dist_frame
self.num = (len(dist_frame) + 1)
self.init = None
self.set_init()
self.sequence = []
self.sequence_dists = []
self.dist = 0
self.iterated_dists = []
self.iterated_sequences = []
self.best_iterated_sequences = []
self.best_iterated_dist = [] | def __init__(self, dist_frame: pd.DataFrame):
'\n This class solves the traveling salesman problem with the 2-opt algorithm. As input, the distance matrix must\n be given in a pandas dataframe.\n\n :param dist_frame: dataframe\n Dataframe containing the distance matrix for all locations\n '
self.dist_frame = dist_frame
self.num = (len(dist_frame) + 1)
self.init = None
self.set_init()
self.sequence = []
self.sequence_dists = []
self.dist = 0
self.iterated_dists = []
self.iterated_sequences = []
self.best_iterated_sequences = []
self.best_iterated_dist = []<|docstring|>This class solves the traveling salesman problem with the 2-opt algorithm. As input, the distance matrix must
be given in a pandas dataframe.
:param dist_frame: dataframe
Dataframe containing the distance matrix for all locations<|endoftext|> |
6af9c3d9ac9014cecf0e77b89ff5e7ef5390dd591a97ec3025b7f26e9dd8e52e | def solve_opt2(self, scorethresh: int=0.001, iterations: int=20):
'\n This function executes the 2-opt algorithm for optimizing the route with the given distance matrix.\n Here, the iterations, which always start with a new random route, can be set. the scorethresh defines the\n threshold, where the algorithm stops the optimizing process for each iteration. A common default value here is\n 0.0001. A score of 0 describes no opimization between two steps in the algorithm.\n\n :param scorethresh: float\n Lower threshold for the score of each iteration\n :param iterations: int\n Number of iteration with random initial route\n :return:\n '
self.sequence = self.init
(self.dist, sequence_dist) = self._get_fulldist(self.sequence)
logging.debug('Initial distance set: {d}'.format(d=self.dist))
logging.debug('Initial sequence set: {s}'.format(s=self.sequence))
all_sequences = []
all_dists = []
for it in range(iterations):
score = 1
iteration_sequences = []
iteration_dists = []
while (score > scorethresh):
dist_prev = self.dist
for start in range(1, (self.num - 2)):
for stop in range((start + 1), (self.num - 1)):
sequence_new = np.concatenate((self.sequence[0:start], self.sequence[stop:(((- len(self.sequence)) + start) - 1):(- 1)], self.sequence[(stop + 1):len(self.sequence)])).tolist()
(dist_new, sequence_new_dist) = self._get_fulldist(sequence_new)
self.sequence_dists.append(dist_new)
iteration_sequences.append(sequence_new)
iteration_dists.append(dist_new)
if (dist_new < self.dist):
self.sequence = sequence_new
self.dist = dist_new
logging.debug('New best distance set: {d}'.format(d=dist_new))
score = (1 - (self.dist / dist_prev))
all_sequences.append(iteration_sequences)
all_dists.append(iteration_dists)
self.iterated_dists.append(self.dist)
self.iterated_sequences.append(self.sequence)
logging.info('Score of Iteration {i}: {s}, Distance: {d}'.format(i=it, s=score, d=self.dist))
self.set_init(rand=True)
self.sequence = self.init
(self.dist, sequence_dist) = self._get_fulldist(self.sequence)
self.dist = np.min(self.iterated_dists)
try:
ind = np.where((self.iterated_dists == self.dist))[0][0]
except ValueError:
ind = np.where((self.iterated_dists == self.dist))[0]
self.sequence = self.iterated_sequences[ind]
self.best_iterated_sequences = all_sequences[ind]
self.best_iterated_dist = all_dists[ind]
logging.info('Best result: Distance: {d} from Iteration {i}'.format(i=ind, d=self.dist)) | This function executes the 2-opt algorithm for optimizing the route with the given distance matrix.
Here, the iterations, which always start with a new random route, can be set. the scorethresh defines the
threshold, where the algorithm stops the optimizing process for each iteration. A common default value here is
0.0001. A score of 0 describes no opimization between two steps in the algorithm.
:param scorethresh: float
Lower threshold for the score of each iteration
:param iterations: int
Number of iteration with random initial route
:return: | solve_coding_challenge/utils/tsp.py | solve_opt2 | hedrich9000/Coding_Challenge_msg | 1 | python | def solve_opt2(self, scorethresh: int=0.001, iterations: int=20):
'\n This function executes the 2-opt algorithm for optimizing the route with the given distance matrix.\n Here, the iterations, which always start with a new random route, can be set. the scorethresh defines the\n threshold, where the algorithm stops the optimizing process for each iteration. A common default value here is\n 0.0001. A score of 0 describes no opimization between two steps in the algorithm.\n\n :param scorethresh: float\n Lower threshold for the score of each iteration\n :param iterations: int\n Number of iteration with random initial route\n :return:\n '
self.sequence = self.init
(self.dist, sequence_dist) = self._get_fulldist(self.sequence)
logging.debug('Initial distance set: {d}'.format(d=self.dist))
logging.debug('Initial sequence set: {s}'.format(s=self.sequence))
all_sequences = []
all_dists = []
for it in range(iterations):
score = 1
iteration_sequences = []
iteration_dists = []
while (score > scorethresh):
dist_prev = self.dist
for start in range(1, (self.num - 2)):
for stop in range((start + 1), (self.num - 1)):
sequence_new = np.concatenate((self.sequence[0:start], self.sequence[stop:(((- len(self.sequence)) + start) - 1):(- 1)], self.sequence[(stop + 1):len(self.sequence)])).tolist()
(dist_new, sequence_new_dist) = self._get_fulldist(sequence_new)
self.sequence_dists.append(dist_new)
iteration_sequences.append(sequence_new)
iteration_dists.append(dist_new)
if (dist_new < self.dist):
self.sequence = sequence_new
self.dist = dist_new
logging.debug('New best distance set: {d}'.format(d=dist_new))
score = (1 - (self.dist / dist_prev))
all_sequences.append(iteration_sequences)
all_dists.append(iteration_dists)
self.iterated_dists.append(self.dist)
self.iterated_sequences.append(self.sequence)
logging.info('Score of Iteration {i}: {s}, Distance: {d}'.format(i=it, s=score, d=self.dist))
self.set_init(rand=True)
self.sequence = self.init
(self.dist, sequence_dist) = self._get_fulldist(self.sequence)
self.dist = np.min(self.iterated_dists)
try:
ind = np.where((self.iterated_dists == self.dist))[0][0]
except ValueError:
ind = np.where((self.iterated_dists == self.dist))[0]
self.sequence = self.iterated_sequences[ind]
self.best_iterated_sequences = all_sequences[ind]
self.best_iterated_dist = all_dists[ind]
logging.info('Best result: Distance: {d} from Iteration {i}'.format(i=ind, d=self.dist)) | def solve_opt2(self, scorethresh: int=0.001, iterations: int=20):
'\n This function executes the 2-opt algorithm for optimizing the route with the given distance matrix.\n Here, the iterations, which always start with a new random route, can be set. the scorethresh defines the\n threshold, where the algorithm stops the optimizing process for each iteration. A common default value here is\n 0.0001. A score of 0 describes no opimization between two steps in the algorithm.\n\n :param scorethresh: float\n Lower threshold for the score of each iteration\n :param iterations: int\n Number of iteration with random initial route\n :return:\n '
self.sequence = self.init
(self.dist, sequence_dist) = self._get_fulldist(self.sequence)
logging.debug('Initial distance set: {d}'.format(d=self.dist))
logging.debug('Initial sequence set: {s}'.format(s=self.sequence))
all_sequences = []
all_dists = []
for it in range(iterations):
score = 1
iteration_sequences = []
iteration_dists = []
while (score > scorethresh):
dist_prev = self.dist
for start in range(1, (self.num - 2)):
for stop in range((start + 1), (self.num - 1)):
sequence_new = np.concatenate((self.sequence[0:start], self.sequence[stop:(((- len(self.sequence)) + start) - 1):(- 1)], self.sequence[(stop + 1):len(self.sequence)])).tolist()
(dist_new, sequence_new_dist) = self._get_fulldist(sequence_new)
self.sequence_dists.append(dist_new)
iteration_sequences.append(sequence_new)
iteration_dists.append(dist_new)
if (dist_new < self.dist):
self.sequence = sequence_new
self.dist = dist_new
logging.debug('New best distance set: {d}'.format(d=dist_new))
score = (1 - (self.dist / dist_prev))
all_sequences.append(iteration_sequences)
all_dists.append(iteration_dists)
self.iterated_dists.append(self.dist)
self.iterated_sequences.append(self.sequence)
logging.info('Score of Iteration {i}: {s}, Distance: {d}'.format(i=it, s=score, d=self.dist))
self.set_init(rand=True)
self.sequence = self.init
(self.dist, sequence_dist) = self._get_fulldist(self.sequence)
self.dist = np.min(self.iterated_dists)
try:
ind = np.where((self.iterated_dists == self.dist))[0][0]
except ValueError:
ind = np.where((self.iterated_dists == self.dist))[0]
self.sequence = self.iterated_sequences[ind]
self.best_iterated_sequences = all_sequences[ind]
self.best_iterated_dist = all_dists[ind]
logging.info('Best result: Distance: {d} from Iteration {i}'.format(i=ind, d=self.dist))<|docstring|>This function executes the 2-opt algorithm for optimizing the route with the given distance matrix.
Here, the iterations, which always start with a new random route, can be set. the scorethresh defines the
threshold, where the algorithm stops the optimizing process for each iteration. A common default value here is
0.0001. A score of 0 describes no opimization between two steps in the algorithm.
:param scorethresh: float
Lower threshold for the score of each iteration
:param iterations: int
Number of iteration with random initial route
:return:<|endoftext|> |
e877847b07a20b8ff35089b75049464886aea3a63e63f9e30492d0cc3789bdba | def set_init(self, rand: Optional[bool]=True, init_list: Optional[list]=None):
'\n This function sets the initial route to a given order (init_list) or randomly. If nothing is set, the order will\n be set to random.\n :param rand: bool\n :param init_list: list [int]\n :return:\n '
if (rand or (init_list is None)):
init_list = list(range(1, (self.num - 1)))
random.shuffle(init_list)
elif ((init_list is not None) and (len(init_list) == self.num)):
pass
else:
raise ValueError('init_list not set or does not have a length according to the given dist_frame')
init_list = np.concatenate(([0], init_list, [0]))
self.init = init_list | This function sets the initial route to a given order (init_list) or randomly. If nothing is set, the order will
be set to random.
:param rand: bool
:param init_list: list [int]
:return: | solve_coding_challenge/utils/tsp.py | set_init | hedrich9000/Coding_Challenge_msg | 1 | python | def set_init(self, rand: Optional[bool]=True, init_list: Optional[list]=None):
'\n This function sets the initial route to a given order (init_list) or randomly. If nothing is set, the order will\n be set to random.\n :param rand: bool\n :param init_list: list [int]\n :return:\n '
if (rand or (init_list is None)):
init_list = list(range(1, (self.num - 1)))
random.shuffle(init_list)
elif ((init_list is not None) and (len(init_list) == self.num)):
pass
else:
raise ValueError('init_list not set or does not have a length according to the given dist_frame')
init_list = np.concatenate(([0], init_list, [0]))
self.init = init_list | def set_init(self, rand: Optional[bool]=True, init_list: Optional[list]=None):
'\n This function sets the initial route to a given order (init_list) or randomly. If nothing is set, the order will\n be set to random.\n :param rand: bool\n :param init_list: list [int]\n :return:\n '
if (rand or (init_list is None)):
init_list = list(range(1, (self.num - 1)))
random.shuffle(init_list)
elif ((init_list is not None) and (len(init_list) == self.num)):
pass
else:
raise ValueError('init_list not set or does not have a length according to the given dist_frame')
init_list = np.concatenate(([0], init_list, [0]))
self.init = init_list<|docstring|>This function sets the initial route to a given order (init_list) or randomly. If nothing is set, the order will
be set to random.
:param rand: bool
:param init_list: list [int]
:return:<|endoftext|> |
75e10ac83566bbbb2952d176078c53638de434a63603fba29224d2d7c82ea04e | def _get_fulldist(self, sequence: list) -> Tuple[(float, list)]:
'\n Internal function to calculate the distances over the given sequence. Returns single distance as well as total\n distance.\n :param sequence: list [int]\n List of the locations in calculated order\n :return:\n fulldist: float\n Total distance for the given sequence\n sequence_dist: list [float]\n List of all single distances for the given sequence\n '
sequence_dist = []
for i in range((len(sequence) - 1)):
sequence_dist.append(self.dist_frame[sequence[i]][sequence[(i + 1)]])
fulldist = sum(sequence_dist)
return (fulldist, sequence_dist) | Internal function to calculate the distances over the given sequence. Returns single distance as well as total
distance.
:param sequence: list [int]
List of the locations in calculated order
:return:
fulldist: float
Total distance for the given sequence
sequence_dist: list [float]
List of all single distances for the given sequence | solve_coding_challenge/utils/tsp.py | _get_fulldist | hedrich9000/Coding_Challenge_msg | 1 | python | def _get_fulldist(self, sequence: list) -> Tuple[(float, list)]:
'\n Internal function to calculate the distances over the given sequence. Returns single distance as well as total\n distance.\n :param sequence: list [int]\n List of the locations in calculated order\n :return:\n fulldist: float\n Total distance for the given sequence\n sequence_dist: list [float]\n List of all single distances for the given sequence\n '
sequence_dist = []
for i in range((len(sequence) - 1)):
sequence_dist.append(self.dist_frame[sequence[i]][sequence[(i + 1)]])
fulldist = sum(sequence_dist)
return (fulldist, sequence_dist) | def _get_fulldist(self, sequence: list) -> Tuple[(float, list)]:
'\n Internal function to calculate the distances over the given sequence. Returns single distance as well as total\n distance.\n :param sequence: list [int]\n List of the locations in calculated order\n :return:\n fulldist: float\n Total distance for the given sequence\n sequence_dist: list [float]\n List of all single distances for the given sequence\n '
sequence_dist = []
for i in range((len(sequence) - 1)):
sequence_dist.append(self.dist_frame[sequence[i]][sequence[(i + 1)]])
fulldist = sum(sequence_dist)
return (fulldist, sequence_dist)<|docstring|>Internal function to calculate the distances over the given sequence. Returns single distance as well as total
distance.
:param sequence: list [int]
List of the locations in calculated order
:return:
fulldist: float
Total distance for the given sequence
sequence_dist: list [float]
List of all single distances for the given sequence<|endoftext|> |
1fc530814b7675822fe3538ad4d887a2147075fd18354e661c90a9699fa309be | def get_result(self) -> Tuple[(list, int)]:
'\n This function returns the internal objects containing the resulting sequence (in numbers from 0-20) and total\n distance for the respective sequence.\n :return:\n sequence: list [int]\n List of the locations in calculated order\n dist: float\n Total distance for the given sequence\n '
return (self.sequence, self.dist) | This function returns the internal objects containing the resulting sequence (in numbers from 0-20) and total
distance for the respective sequence.
:return:
sequence: list [int]
List of the locations in calculated order
dist: float
Total distance for the given sequence | solve_coding_challenge/utils/tsp.py | get_result | hedrich9000/Coding_Challenge_msg | 1 | python | def get_result(self) -> Tuple[(list, int)]:
'\n This function returns the internal objects containing the resulting sequence (in numbers from 0-20) and total\n distance for the respective sequence.\n :return:\n sequence: list [int]\n List of the locations in calculated order\n dist: float\n Total distance for the given sequence\n '
return (self.sequence, self.dist) | def get_result(self) -> Tuple[(list, int)]:
'\n This function returns the internal objects containing the resulting sequence (in numbers from 0-20) and total\n distance for the respective sequence.\n :return:\n sequence: list [int]\n List of the locations in calculated order\n dist: float\n Total distance for the given sequence\n '
return (self.sequence, self.dist)<|docstring|>This function returns the internal objects containing the resulting sequence (in numbers from 0-20) and total
distance for the respective sequence.
:return:
sequence: list [int]
List of the locations in calculated order
dist: float
Total distance for the given sequence<|endoftext|> |
e88a5bde25dba85a9acd09521828eeff1d27980206747fb258514b10bb83ef31 | async def setup(self):
'Called when services have all been started.'
pass | Called when services have all been started. | src/process/game.py | setup | vincent-lg/talismud | 4 | python | async def setup(self):
pass | async def setup(self):
pass<|docstring|>Called when services have all been started.<|endoftext|> |
9eadab278ed47bc5fa9e1c430469d3bac1c781f17a18afaefee754a5715199f1 | async def cleanup(self):
'Called when the process is about to be stopped.'
pass | Called when the process is about to be stopped. | src/process/game.py | cleanup | vincent-lg/talismud | 4 | python | async def cleanup(self):
pass | async def cleanup(self):
pass<|docstring|>Called when the process is about to be stopped.<|endoftext|> |
a5482baa2877fc743ed4e23b426ffc4fcda0d5d6e5b81ed48cdec81feea9542e | def generate_file(self, path: Path) -> None:
'Generates code for use with testing framework under given path'
code = self.generate_code()
with open((path / self.filename), 'w') as file:
written = file.write(code)
if (written < len(code)):
raise FileWriteError(f'{self.filename}: write did not store all specified data') | Generates code for use with testing framework under given path | src/program/program_file.py | generate_file | Szpila123/Remote_device_testing-parser | 0 | python | def generate_file(self, path: Path) -> None:
code = self.generate_code()
with open((path / self.filename), 'w') as file:
written = file.write(code)
if (written < len(code)):
raise FileWriteError(f'{self.filename}: write did not store all specified data') | def generate_file(self, path: Path) -> None:
code = self.generate_code()
with open((path / self.filename), 'w') as file:
written = file.write(code)
if (written < len(code)):
raise FileWriteError(f'{self.filename}: write did not store all specified data')<|docstring|>Generates code for use with testing framework under given path<|endoftext|> |
8bef2a1f8ee654bf107b1328f9337f3db1c5bc6d7029b980c3c3e648cf3a3d87 | def generate_code(self) -> str:
'Returns code inserted to generated file'
code = GENERATED_FILE_IMPORTS
code += self._get_code_types()
code += 'class Code(object):\n'
code += '\tdef __init__(self):\n'
code += ('\t\t' + '\t\t'.join(self._get_code_variables().splitlines(keepends=True)))
code += '\n'
code += ('\t\t' + '\t\t'.join(self._get_code_functions().splitlines(keepends=True)))
return code | Returns code inserted to generated file | src/program/program_file.py | generate_code | Szpila123/Remote_device_testing-parser | 0 | python | def generate_code(self) -> str:
code = GENERATED_FILE_IMPORTS
code += self._get_code_types()
code += 'class Code(object):\n'
code += '\tdef __init__(self):\n'
code += ('\t\t' + '\t\t'.join(self._get_code_variables().splitlines(keepends=True)))
code += '\n'
code += ('\t\t' + '\t\t'.join(self._get_code_functions().splitlines(keepends=True)))
return code | def generate_code(self) -> str:
code = GENERATED_FILE_IMPORTS
code += self._get_code_types()
code += 'class Code(object):\n'
code += '\tdef __init__(self):\n'
code += ('\t\t' + '\t\t'.join(self._get_code_variables().splitlines(keepends=True)))
code += '\n'
code += ('\t\t' + '\t\t'.join(self._get_code_functions().splitlines(keepends=True)))
return code<|docstring|>Returns code inserted to generated file<|endoftext|> |
99d7060588865a0df19014c37ea79f8f7c5a047d5cab6c05e73239a30b7fbaa7 | def _get_code_types(self) -> str:
'Generate code for program types with proper declaration order'
code = ''
done = set((type for type in self.types if (type.get_class() is ProgramTypeBase)))
for type in self.types:
if (type.get_class() is ProgramTypeEnum):
code += (type.generate_code() + '\n')
done.add(type)
code += '\n'
while (len(self.types) != len(done)):
for type in self.types:
if ((type not in done) and all(map((lambda x: (x in done)), type.dependencies))):
generated = type.generate_code()
if (len(generated) > 0):
code += (generated + '\n')
done.add(type)
return code | Generate code for program types with proper declaration order | src/program/program_file.py | _get_code_types | Szpila123/Remote_device_testing-parser | 0 | python | def _get_code_types(self) -> str:
code =
done = set((type for type in self.types if (type.get_class() is ProgramTypeBase)))
for type in self.types:
if (type.get_class() is ProgramTypeEnum):
code += (type.generate_code() + '\n')
done.add(type)
code += '\n'
while (len(self.types) != len(done)):
for type in self.types:
if ((type not in done) and all(map((lambda x: (x in done)), type.dependencies))):
generated = type.generate_code()
if (len(generated) > 0):
code += (generated + '\n')
done.add(type)
return code | def _get_code_types(self) -> str:
code =
done = set((type for type in self.types if (type.get_class() is ProgramTypeBase)))
for type in self.types:
if (type.get_class() is ProgramTypeEnum):
code += (type.generate_code() + '\n')
done.add(type)
code += '\n'
while (len(self.types) != len(done)):
for type in self.types:
if ((type not in done) and all(map((lambda x: (x in done)), type.dependencies))):
generated = type.generate_code()
if (len(generated) > 0):
code += (generated + '\n')
done.add(type)
return code<|docstring|>Generate code for program types with proper declaration order<|endoftext|> |
c21dc09303fa93e4e1304bdb358997f9eb5d798a15c731dc6c4be90868e3405c | def _get_code_variables(self) -> str:
'Generate code for program variables'
return ''.join((f'self.{var.generate_code()}' for var in self.variables)) | Generate code for program variables | src/program/program_file.py | _get_code_variables | Szpila123/Remote_device_testing-parser | 0 | python | def _get_code_variables(self) -> str:
return .join((f'self.{var.generate_code()}' for var in self.variables)) | def _get_code_variables(self) -> str:
return .join((f'self.{var.generate_code()}' for var in self.variables))<|docstring|>Generate code for program variables<|endoftext|> |
9702fa2245528ea661eb6cf324dd3603a4f6dd0c4b234ace73be6b9d3e025e30 | def _get_code_functions(self) -> str:
'Generate code for program functions'
return ''.join((f'self.{func.generate_code()}' for func in self.functions)) | Generate code for program functions | src/program/program_file.py | _get_code_functions | Szpila123/Remote_device_testing-parser | 0 | python | def _get_code_functions(self) -> str:
return .join((f'self.{func.generate_code()}' for func in self.functions)) | def _get_code_functions(self) -> str:
return .join((f'self.{func.generate_code()}' for func in self.functions))<|docstring|>Generate code for program functions<|endoftext|> |
af8e72c1a50722dbf1737b983fcf4e56ecad61aa60a2d2650d82b94315b4d1d4 | def _resolve_refs(self) -> None:
'Resolve referencens with proper size propagation'
for obj in chain(self.variables, self.functions, self.types):
if (obj.get_class() is not ProgramTypePointer):
obj.resolve_refs(self.objects_ref)
for obj in self.types:
if (obj.get_class() is ProgramTypePointer):
obj.resolve_refs(self.objects_ref) | Resolve referencens with proper size propagation | src/program/program_file.py | _resolve_refs | Szpila123/Remote_device_testing-parser | 0 | python | def _resolve_refs(self) -> None:
for obj in chain(self.variables, self.functions, self.types):
if (obj.get_class() is not ProgramTypePointer):
obj.resolve_refs(self.objects_ref)
for obj in self.types:
if (obj.get_class() is ProgramTypePointer):
obj.resolve_refs(self.objects_ref) | def _resolve_refs(self) -> None:
for obj in chain(self.variables, self.functions, self.types):
if (obj.get_class() is not ProgramTypePointer):
obj.resolve_refs(self.objects_ref)
for obj in self.types:
if (obj.get_class() is ProgramTypePointer):
obj.resolve_refs(self.objects_ref)<|docstring|>Resolve referencens with proper size propagation<|endoftext|> |
2f1c6956d839152a976f19feb434c1a3120c5525274f2c82a2c855fae2adb17f | def _get_inspect_result(self) -> ServiceInspectResult:
'Only there to allow tools to know the return type'
return super()._get_inspect_result() | Only there to allow tools to know the return type | python_on_whales/components/service.py | _get_inspect_result | N0K0/python-on-whales | 0 | python | def _get_inspect_result(self) -> ServiceInspectResult:
return super()._get_inspect_result() | def _get_inspect_result(self) -> ServiceInspectResult:
return super()._get_inspect_result()<|docstring|>Only there to allow tools to know the return type<|endoftext|> |
2ec413f50bd9931fd9757e30e7f06f2c946323ea26e4b6bf7a6391c9919fcbc4 | def ps(self) -> List[python_on_whales.components.task.Task]:
'Returns the list of tasks of this service.'
return ServiceCLI(self.client_config).ps(self) | Returns the list of tasks of this service. | python_on_whales/components/service.py | ps | N0K0/python-on-whales | 0 | python | def ps(self) -> List[python_on_whales.components.task.Task]:
return ServiceCLI(self.client_config).ps(self) | def ps(self) -> List[python_on_whales.components.task.Task]:
return ServiceCLI(self.client_config).ps(self)<|docstring|>Returns the list of tasks of this service.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.