body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
07a84d3db2e882f40b8123306e34cf271127aba1658b3313cea468232a42c6bd
def writing_threads(): '\n list of threads that can write to the log\n ' return [t for t in threads if (phase[t] in writing)]
list of threads that can write to the log
samples/tracemultiplexer/tracemultiplexer.py
writing_threads
jon-jacky/PyModel
61
python
def writing_threads(): '\n \n ' return [t for t in threads if (phase[t] in writing)]
def writing_threads(): '\n \n ' return [t for t in threads if (phase[t] in writing)]<|docstring|>list of threads that can write to the log<|endoftext|>
e157397c2cdfbc236e131f42379e3f20d5be717c2d8c2b5a88fdba1750507377
def state_invariant(): '\n At most one thread can write to the log\n ' return (len(writing_threads()) <= 1)
At most one thread can write to the log
samples/tracemultiplexer/tracemultiplexer.py
state_invariant
jon-jacky/PyModel
61
python
def state_invariant(): '\n \n ' return (len(writing_threads()) <= 1)
def state_invariant(): '\n \n ' return (len(writing_threads()) <= 1)<|docstring|>At most one thread can write to the log<|endoftext|>
91ef4c0750b5a6f163077ec02227d9ca647c98be9ba42eee07b4e3cceda74a48
def CreateTestExpectationMap(expectation_file, tests): 'Creates an expectation map based off a file or list of tests.\n\n Args:\n expectation_file: A filepath to an expectation file to read from, or None.\n If a filepath is specified, |tests| must be None.\n tests: An iterable of strings containing test names to check. If specified,\n |expectation_file| must be None.\n\n Returns:\n A dict in the following format:\n {\n test_name1 (str): {\n expectation1 (data_types.Expectation): {\n builder_name1 (str): {\n step_name1 (str): stats1 (data_types.BuildStats),\n step_name2 (str): stats2 (data_types.BuildStats),\n ...\n },\n builder_name2 (str): { ... },\n },\n expectation2 (data_types.Expectation): { ... },\n ...\n },\n test_name2 (str): { ... },\n ...\n }\n although anything beyond the the data_types.Expectation keys will be left\n empty to be filled at a later time.\n ' logging.info('Creating test expectation map') assert (expectation_file or tests) assert (not (expectation_file and tests)) if expectation_file: with open(expectation_file) as f: content = f.read() else: content = '# results: [ RetryOnFailure ]\n' for t in tests: content += ('%s [ RetryOnFailure ]\n' % t) list_parser = expectations_parser.TaggedTestListParser(content) expectation_map = {} logging.debug('Parsed %d expectations', len(list_parser.expectations)) for e in list_parser.expectations: if ('Skip' in e.raw_results): continue expectation = data_types.Expectation(e.test, e.tags, e.raw_results) expectations_for_test = expectation_map.setdefault(e.test, {}) assert (expectation not in expectations_for_test) expectations_for_test[expectation] = {} return expectation_map
Creates an expectation map based off a file or list of tests. Args: expectation_file: A filepath to an expectation file to read from, or None. If a filepath is specified, |tests| must be None. tests: An iterable of strings containing test names to check. If specified, |expectation_file| must be None. Returns: A dict in the following format: { test_name1 (str): { expectation1 (data_types.Expectation): { builder_name1 (str): { step_name1 (str): stats1 (data_types.BuildStats), step_name2 (str): stats2 (data_types.BuildStats), ... }, builder_name2 (str): { ... }, }, expectation2 (data_types.Expectation): { ... }, ... }, test_name2 (str): { ... }, ... } although anything beyond the the data_types.Expectation keys will be left empty to be filled at a later time.
content/test/gpu/unexpected_passes/expectations.py
CreateTestExpectationMap
Ron423c/chromium
0
python
def CreateTestExpectationMap(expectation_file, tests): 'Creates an expectation map based off a file or list of tests.\n\n Args:\n expectation_file: A filepath to an expectation file to read from, or None.\n If a filepath is specified, |tests| must be None.\n tests: An iterable of strings containing test names to check. If specified,\n |expectation_file| must be None.\n\n Returns:\n A dict in the following format:\n {\n test_name1 (str): {\n expectation1 (data_types.Expectation): {\n builder_name1 (str): {\n step_name1 (str): stats1 (data_types.BuildStats),\n step_name2 (str): stats2 (data_types.BuildStats),\n ...\n },\n builder_name2 (str): { ... },\n },\n expectation2 (data_types.Expectation): { ... },\n ...\n },\n test_name2 (str): { ... },\n ...\n }\n although anything beyond the the data_types.Expectation keys will be left\n empty to be filled at a later time.\n ' logging.info('Creating test expectation map') assert (expectation_file or tests) assert (not (expectation_file and tests)) if expectation_file: with open(expectation_file) as f: content = f.read() else: content = '# results: [ RetryOnFailure ]\n' for t in tests: content += ('%s [ RetryOnFailure ]\n' % t) list_parser = expectations_parser.TaggedTestListParser(content) expectation_map = {} logging.debug('Parsed %d expectations', len(list_parser.expectations)) for e in list_parser.expectations: if ('Skip' in e.raw_results): continue expectation = data_types.Expectation(e.test, e.tags, e.raw_results) expectations_for_test = expectation_map.setdefault(e.test, {}) assert (expectation not in expectations_for_test) expectations_for_test[expectation] = {} return expectation_map
def CreateTestExpectationMap(expectation_file, tests): 'Creates an expectation map based off a file or list of tests.\n\n Args:\n expectation_file: A filepath to an expectation file to read from, or None.\n If a filepath is specified, |tests| must be None.\n tests: An iterable of strings containing test names to check. If specified,\n |expectation_file| must be None.\n\n Returns:\n A dict in the following format:\n {\n test_name1 (str): {\n expectation1 (data_types.Expectation): {\n builder_name1 (str): {\n step_name1 (str): stats1 (data_types.BuildStats),\n step_name2 (str): stats2 (data_types.BuildStats),\n ...\n },\n builder_name2 (str): { ... },\n },\n expectation2 (data_types.Expectation): { ... },\n ...\n },\n test_name2 (str): { ... },\n ...\n }\n although anything beyond the the data_types.Expectation keys will be left\n empty to be filled at a later time.\n ' logging.info('Creating test expectation map') assert (expectation_file or tests) assert (not (expectation_file and tests)) if expectation_file: with open(expectation_file) as f: content = f.read() else: content = '# results: [ RetryOnFailure ]\n' for t in tests: content += ('%s [ RetryOnFailure ]\n' % t) list_parser = expectations_parser.TaggedTestListParser(content) expectation_map = {} logging.debug('Parsed %d expectations', len(list_parser.expectations)) for e in list_parser.expectations: if ('Skip' in e.raw_results): continue expectation = data_types.Expectation(e.test, e.tags, e.raw_results) expectations_for_test = expectation_map.setdefault(e.test, {}) assert (expectation not in expectations_for_test) expectations_for_test[expectation] = {} return expectation_map<|docstring|>Creates an expectation map based off a file or list of tests. Args: expectation_file: A filepath to an expectation file to read from, or None. If a filepath is specified, |tests| must be None. tests: An iterable of strings containing test names to check. If specified, |expectation_file| must be None. Returns: A dict in the following format: { test_name1 (str): { expectation1 (data_types.Expectation): { builder_name1 (str): { step_name1 (str): stats1 (data_types.BuildStats), step_name2 (str): stats2 (data_types.BuildStats), ... }, builder_name2 (str): { ... }, }, expectation2 (data_types.Expectation): { ... }, ... }, test_name2 (str): { ... }, ... } although anything beyond the the data_types.Expectation keys will be left empty to be filled at a later time.<|endoftext|>
751f291091e769cffac8314597dd4608263723e4792ea2048368c69ae8b837ff
def FilterOutUnusedExpectations(test_expectation_map): 'Filters out any unused Expectations from |test_expectation_map|.\n\n An Expectation is considered unused if its corresponding dictionary is empty.\n If removing Expectations results in a top-level test key having an empty\n dictionary, that test entry will also be removed.\n\n Args:\n test_expectation_map: A dict in the format returned by\n CreateTestExpectationMap(). Will be modified in place.\n\n Returns:\n A list containing any Expectations that were removed.\n ' logging.info('Filtering out unused expectations') unused_expectations = [] for (_, expectation_map) in test_expectation_map.iteritems(): for (expectation, builder_map) in expectation_map.iteritems(): if (not builder_map): unused_expectations.append(expectation) for unused in unused_expectations: for (_, expectation_map) in test_expectation_map.iteritems(): if (unused in expectation_map): del expectation_map[unused] logging.debug('Found %d unused expectations', len(unused_expectations)) empty_tests = [] for (test_name, expectation_map) in test_expectation_map.iteritems(): if (not expectation_map): empty_tests.append(test_name) for empty in empty_tests: del test_expectation_map[empty] logging.debug('Found %d empty tests: %s', len(empty_tests), empty_tests) return unused_expectations
Filters out any unused Expectations from |test_expectation_map|. An Expectation is considered unused if its corresponding dictionary is empty. If removing Expectations results in a top-level test key having an empty dictionary, that test entry will also be removed. Args: test_expectation_map: A dict in the format returned by CreateTestExpectationMap(). Will be modified in place. Returns: A list containing any Expectations that were removed.
content/test/gpu/unexpected_passes/expectations.py
FilterOutUnusedExpectations
Ron423c/chromium
0
python
def FilterOutUnusedExpectations(test_expectation_map): 'Filters out any unused Expectations from |test_expectation_map|.\n\n An Expectation is considered unused if its corresponding dictionary is empty.\n If removing Expectations results in a top-level test key having an empty\n dictionary, that test entry will also be removed.\n\n Args:\n test_expectation_map: A dict in the format returned by\n CreateTestExpectationMap(). Will be modified in place.\n\n Returns:\n A list containing any Expectations that were removed.\n ' logging.info('Filtering out unused expectations') unused_expectations = [] for (_, expectation_map) in test_expectation_map.iteritems(): for (expectation, builder_map) in expectation_map.iteritems(): if (not builder_map): unused_expectations.append(expectation) for unused in unused_expectations: for (_, expectation_map) in test_expectation_map.iteritems(): if (unused in expectation_map): del expectation_map[unused] logging.debug('Found %d unused expectations', len(unused_expectations)) empty_tests = [] for (test_name, expectation_map) in test_expectation_map.iteritems(): if (not expectation_map): empty_tests.append(test_name) for empty in empty_tests: del test_expectation_map[empty] logging.debug('Found %d empty tests: %s', len(empty_tests), empty_tests) return unused_expectations
def FilterOutUnusedExpectations(test_expectation_map): 'Filters out any unused Expectations from |test_expectation_map|.\n\n An Expectation is considered unused if its corresponding dictionary is empty.\n If removing Expectations results in a top-level test key having an empty\n dictionary, that test entry will also be removed.\n\n Args:\n test_expectation_map: A dict in the format returned by\n CreateTestExpectationMap(). Will be modified in place.\n\n Returns:\n A list containing any Expectations that were removed.\n ' logging.info('Filtering out unused expectations') unused_expectations = [] for (_, expectation_map) in test_expectation_map.iteritems(): for (expectation, builder_map) in expectation_map.iteritems(): if (not builder_map): unused_expectations.append(expectation) for unused in unused_expectations: for (_, expectation_map) in test_expectation_map.iteritems(): if (unused in expectation_map): del expectation_map[unused] logging.debug('Found %d unused expectations', len(unused_expectations)) empty_tests = [] for (test_name, expectation_map) in test_expectation_map.iteritems(): if (not expectation_map): empty_tests.append(test_name) for empty in empty_tests: del test_expectation_map[empty] logging.debug('Found %d empty tests: %s', len(empty_tests), empty_tests) return unused_expectations<|docstring|>Filters out any unused Expectations from |test_expectation_map|. An Expectation is considered unused if its corresponding dictionary is empty. If removing Expectations results in a top-level test key having an empty dictionary, that test entry will also be removed. Args: test_expectation_map: A dict in the format returned by CreateTestExpectationMap(). Will be modified in place. Returns: A list containing any Expectations that were removed.<|endoftext|>
52dfae245e94b214d3e28c2256ecc917b08efd2523819b3e40f524c3a6904a89
def SplitExpectationsByStaleness(test_expectation_map): "Separates |test_expectation_map| based on expectation staleness.\n\n Args:\n test_expectation_map: A dict in the format returned by\n CreateTestExpectationMap() with any unused expectations already filtered\n out.\n\n Returns:\n Three dicts (stale_dict, semi_stale_dict, active_dict). All three combined\n contain the information of |test_expectation_map| in the same format.\n |stale_dict| contains entries for expectations that are no longer being\n helpful, |semi_stale_dict| contains entries for expectations that might be\n removable or modifiable, but have at least one failed test run.\n |active_dict| contains entries for expectations that are preventing failures\n on all builders they're active on, and thus shouldn't be removed.\n " FULL_PASS = 1 NEVER_PASS = 2 PARTIAL_PASS = 3 stale_dict = {} semi_stale_dict = {} active_dict = {} for (test_name, expectation_map) in test_expectation_map.iteritems(): for (expectation, builder_map) in expectation_map.iteritems(): tmp_map = {FULL_PASS: {}, NEVER_PASS: {}, PARTIAL_PASS: {}} for (builder_name, step_map) in builder_map.iteritems(): fully_passed = {} partially_passed = {} never_passed = {} for (step_name, stats) in step_map.iteritems(): if (stats.passed_builds == stats.total_builds): assert (step_name not in fully_passed) fully_passed[step_name] = stats elif (stats.failed_builds == stats.total_builds): assert (step_name not in never_passed) never_passed[step_name] = stats else: assert (step_name not in partially_passed) partially_passed[step_name] = stats if fully_passed: tmp_map[FULL_PASS][builder_name] = fully_passed if never_passed: tmp_map[NEVER_PASS][builder_name] = never_passed if partially_passed: tmp_map[PARTIAL_PASS][builder_name] = partially_passed def _CopyPassesIntoBuilderMap(builder_map, pass_types): for pt in pass_types: for (builder, steps) in tmp_map[pt].iteritems(): builder_map.setdefault(builder, {}).update(steps) if (not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS])): builder_map = stale_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [FULL_PASS]) elif (not tmp_map[FULL_PASS]): builder_map = active_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS]) else: builder_map = semi_stale_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [FULL_PASS, PARTIAL_PASS, NEVER_PASS]) return (stale_dict, semi_stale_dict, active_dict)
Separates |test_expectation_map| based on expectation staleness. Args: test_expectation_map: A dict in the format returned by CreateTestExpectationMap() with any unused expectations already filtered out. Returns: Three dicts (stale_dict, semi_stale_dict, active_dict). All three combined contain the information of |test_expectation_map| in the same format. |stale_dict| contains entries for expectations that are no longer being helpful, |semi_stale_dict| contains entries for expectations that might be removable or modifiable, but have at least one failed test run. |active_dict| contains entries for expectations that are preventing failures on all builders they're active on, and thus shouldn't be removed.
content/test/gpu/unexpected_passes/expectations.py
SplitExpectationsByStaleness
Ron423c/chromium
0
python
def SplitExpectationsByStaleness(test_expectation_map): "Separates |test_expectation_map| based on expectation staleness.\n\n Args:\n test_expectation_map: A dict in the format returned by\n CreateTestExpectationMap() with any unused expectations already filtered\n out.\n\n Returns:\n Three dicts (stale_dict, semi_stale_dict, active_dict). All three combined\n contain the information of |test_expectation_map| in the same format.\n |stale_dict| contains entries for expectations that are no longer being\n helpful, |semi_stale_dict| contains entries for expectations that might be\n removable or modifiable, but have at least one failed test run.\n |active_dict| contains entries for expectations that are preventing failures\n on all builders they're active on, and thus shouldn't be removed.\n " FULL_PASS = 1 NEVER_PASS = 2 PARTIAL_PASS = 3 stale_dict = {} semi_stale_dict = {} active_dict = {} for (test_name, expectation_map) in test_expectation_map.iteritems(): for (expectation, builder_map) in expectation_map.iteritems(): tmp_map = {FULL_PASS: {}, NEVER_PASS: {}, PARTIAL_PASS: {}} for (builder_name, step_map) in builder_map.iteritems(): fully_passed = {} partially_passed = {} never_passed = {} for (step_name, stats) in step_map.iteritems(): if (stats.passed_builds == stats.total_builds): assert (step_name not in fully_passed) fully_passed[step_name] = stats elif (stats.failed_builds == stats.total_builds): assert (step_name not in never_passed) never_passed[step_name] = stats else: assert (step_name not in partially_passed) partially_passed[step_name] = stats if fully_passed: tmp_map[FULL_PASS][builder_name] = fully_passed if never_passed: tmp_map[NEVER_PASS][builder_name] = never_passed if partially_passed: tmp_map[PARTIAL_PASS][builder_name] = partially_passed def _CopyPassesIntoBuilderMap(builder_map, pass_types): for pt in pass_types: for (builder, steps) in tmp_map[pt].iteritems(): builder_map.setdefault(builder, {}).update(steps) if (not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS])): builder_map = stale_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [FULL_PASS]) elif (not tmp_map[FULL_PASS]): builder_map = active_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS]) else: builder_map = semi_stale_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [FULL_PASS, PARTIAL_PASS, NEVER_PASS]) return (stale_dict, semi_stale_dict, active_dict)
def SplitExpectationsByStaleness(test_expectation_map): "Separates |test_expectation_map| based on expectation staleness.\n\n Args:\n test_expectation_map: A dict in the format returned by\n CreateTestExpectationMap() with any unused expectations already filtered\n out.\n\n Returns:\n Three dicts (stale_dict, semi_stale_dict, active_dict). All three combined\n contain the information of |test_expectation_map| in the same format.\n |stale_dict| contains entries for expectations that are no longer being\n helpful, |semi_stale_dict| contains entries for expectations that might be\n removable or modifiable, but have at least one failed test run.\n |active_dict| contains entries for expectations that are preventing failures\n on all builders they're active on, and thus shouldn't be removed.\n " FULL_PASS = 1 NEVER_PASS = 2 PARTIAL_PASS = 3 stale_dict = {} semi_stale_dict = {} active_dict = {} for (test_name, expectation_map) in test_expectation_map.iteritems(): for (expectation, builder_map) in expectation_map.iteritems(): tmp_map = {FULL_PASS: {}, NEVER_PASS: {}, PARTIAL_PASS: {}} for (builder_name, step_map) in builder_map.iteritems(): fully_passed = {} partially_passed = {} never_passed = {} for (step_name, stats) in step_map.iteritems(): if (stats.passed_builds == stats.total_builds): assert (step_name not in fully_passed) fully_passed[step_name] = stats elif (stats.failed_builds == stats.total_builds): assert (step_name not in never_passed) never_passed[step_name] = stats else: assert (step_name not in partially_passed) partially_passed[step_name] = stats if fully_passed: tmp_map[FULL_PASS][builder_name] = fully_passed if never_passed: tmp_map[NEVER_PASS][builder_name] = never_passed if partially_passed: tmp_map[PARTIAL_PASS][builder_name] = partially_passed def _CopyPassesIntoBuilderMap(builder_map, pass_types): for pt in pass_types: for (builder, steps) in tmp_map[pt].iteritems(): builder_map.setdefault(builder, {}).update(steps) if (not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS])): builder_map = stale_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [FULL_PASS]) elif (not tmp_map[FULL_PASS]): builder_map = active_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS]) else: builder_map = semi_stale_dict.setdefault(test_name, {}).setdefault(expectation, {}) _CopyPassesIntoBuilderMap(builder_map, [FULL_PASS, PARTIAL_PASS, NEVER_PASS]) return (stale_dict, semi_stale_dict, active_dict)<|docstring|>Separates |test_expectation_map| based on expectation staleness. Args: test_expectation_map: A dict in the format returned by CreateTestExpectationMap() with any unused expectations already filtered out. Returns: Three dicts (stale_dict, semi_stale_dict, active_dict). All three combined contain the information of |test_expectation_map| in the same format. |stale_dict| contains entries for expectations that are no longer being helpful, |semi_stale_dict| contains entries for expectations that might be removable or modifiable, but have at least one failed test run. |active_dict| contains entries for expectations that are preventing failures on all builders they're active on, and thus shouldn't be removed.<|endoftext|>
2ba030dae7c794f1fa6bd8ed01da9724a3dce7682751466f27d3c51339861887
def RemoveExpectationsFromFile(expectations, expectation_file): 'Removes lines corresponding to |expectations| from |expectation_file|.\n\n Ignores any lines that match but are within a disable block or have an inline\n disable comment.\n\n Args:\n expectations: A list of data_types.Expectations to remove.\n expectation_file: A filepath pointing to an expectation file to remove lines\n from.\n\n Returns:\n A set of strings containing URLs of bugs associated with the removed\n expectations.\n ' header = validate_tag_consistency.TAG_HEADER with open(expectation_file) as f: input_contents = f.read() output_contents = '' in_disable_block = False disable_block_reason = '' removed_urls = set() for line in input_contents.splitlines(True): stripped_line = line.strip() if ((not stripped_line) or stripped_line.startswith('#')): output_contents += line assert (not ((FINDER_DISABLE_COMMENT in line) and (FINDER_ENABLE_COMMENT in line))) if (FINDER_DISABLE_COMMENT in line): if in_disable_block: raise RuntimeError(('Invalid expectation file %s - contains a disable comment "%s" that is in another disable block.' % (expectation_file, stripped_line))) in_disable_block = True disable_block_reason = _GetDisableReasonFromComment(line) if (FINDER_ENABLE_COMMENT in line): if (not in_disable_block): raise RuntimeError(('Invalid expectation file %s - contains an enable comment "%s" that is outside of a disable block.' % (expectation_file, stripped_line))) in_disable_block = False continue single_line_content = (header + line) list_parser = expectations_parser.TaggedTestListParser(single_line_content) assert (len(list_parser.expectations) == 1) typ_expectation = list_parser.expectations[0] current_expectation = data_types.Expectation(typ_expectation.test, typ_expectation.tags, typ_expectation.raw_results) if any([e for e in expectations if (e == current_expectation)]): if in_disable_block: output_contents += line logging.info('Would have removed expectation %s, but inside a disable block with reason %s', stripped_line, disable_block_reason) elif (FINDER_DISABLE_COMMENT in line): output_contents += line logging.info('Would have removed expectation %s, but it has an inline disable comment with reason %s', stripped_line.split('#')[0], _GetDisableReasonFromComment(line)) else: reason = list_parser.expectations[0].reason if reason: removed_urls.add(reason) else: output_contents += line with open(expectation_file, 'w') as f: f.write(output_contents) return removed_urls
Removes lines corresponding to |expectations| from |expectation_file|. Ignores any lines that match but are within a disable block or have an inline disable comment. Args: expectations: A list of data_types.Expectations to remove. expectation_file: A filepath pointing to an expectation file to remove lines from. Returns: A set of strings containing URLs of bugs associated with the removed expectations.
content/test/gpu/unexpected_passes/expectations.py
RemoveExpectationsFromFile
Ron423c/chromium
0
python
def RemoveExpectationsFromFile(expectations, expectation_file): 'Removes lines corresponding to |expectations| from |expectation_file|.\n\n Ignores any lines that match but are within a disable block or have an inline\n disable comment.\n\n Args:\n expectations: A list of data_types.Expectations to remove.\n expectation_file: A filepath pointing to an expectation file to remove lines\n from.\n\n Returns:\n A set of strings containing URLs of bugs associated with the removed\n expectations.\n ' header = validate_tag_consistency.TAG_HEADER with open(expectation_file) as f: input_contents = f.read() output_contents = in_disable_block = False disable_block_reason = removed_urls = set() for line in input_contents.splitlines(True): stripped_line = line.strip() if ((not stripped_line) or stripped_line.startswith('#')): output_contents += line assert (not ((FINDER_DISABLE_COMMENT in line) and (FINDER_ENABLE_COMMENT in line))) if (FINDER_DISABLE_COMMENT in line): if in_disable_block: raise RuntimeError(('Invalid expectation file %s - contains a disable comment "%s" that is in another disable block.' % (expectation_file, stripped_line))) in_disable_block = True disable_block_reason = _GetDisableReasonFromComment(line) if (FINDER_ENABLE_COMMENT in line): if (not in_disable_block): raise RuntimeError(('Invalid expectation file %s - contains an enable comment "%s" that is outside of a disable block.' % (expectation_file, stripped_line))) in_disable_block = False continue single_line_content = (header + line) list_parser = expectations_parser.TaggedTestListParser(single_line_content) assert (len(list_parser.expectations) == 1) typ_expectation = list_parser.expectations[0] current_expectation = data_types.Expectation(typ_expectation.test, typ_expectation.tags, typ_expectation.raw_results) if any([e for e in expectations if (e == current_expectation)]): if in_disable_block: output_contents += line logging.info('Would have removed expectation %s, but inside a disable block with reason %s', stripped_line, disable_block_reason) elif (FINDER_DISABLE_COMMENT in line): output_contents += line logging.info('Would have removed expectation %s, but it has an inline disable comment with reason %s', stripped_line.split('#')[0], _GetDisableReasonFromComment(line)) else: reason = list_parser.expectations[0].reason if reason: removed_urls.add(reason) else: output_contents += line with open(expectation_file, 'w') as f: f.write(output_contents) return removed_urls
def RemoveExpectationsFromFile(expectations, expectation_file): 'Removes lines corresponding to |expectations| from |expectation_file|.\n\n Ignores any lines that match but are within a disable block or have an inline\n disable comment.\n\n Args:\n expectations: A list of data_types.Expectations to remove.\n expectation_file: A filepath pointing to an expectation file to remove lines\n from.\n\n Returns:\n A set of strings containing URLs of bugs associated with the removed\n expectations.\n ' header = validate_tag_consistency.TAG_HEADER with open(expectation_file) as f: input_contents = f.read() output_contents = in_disable_block = False disable_block_reason = removed_urls = set() for line in input_contents.splitlines(True): stripped_line = line.strip() if ((not stripped_line) or stripped_line.startswith('#')): output_contents += line assert (not ((FINDER_DISABLE_COMMENT in line) and (FINDER_ENABLE_COMMENT in line))) if (FINDER_DISABLE_COMMENT in line): if in_disable_block: raise RuntimeError(('Invalid expectation file %s - contains a disable comment "%s" that is in another disable block.' % (expectation_file, stripped_line))) in_disable_block = True disable_block_reason = _GetDisableReasonFromComment(line) if (FINDER_ENABLE_COMMENT in line): if (not in_disable_block): raise RuntimeError(('Invalid expectation file %s - contains an enable comment "%s" that is outside of a disable block.' % (expectation_file, stripped_line))) in_disable_block = False continue single_line_content = (header + line) list_parser = expectations_parser.TaggedTestListParser(single_line_content) assert (len(list_parser.expectations) == 1) typ_expectation = list_parser.expectations[0] current_expectation = data_types.Expectation(typ_expectation.test, typ_expectation.tags, typ_expectation.raw_results) if any([e for e in expectations if (e == current_expectation)]): if in_disable_block: output_contents += line logging.info('Would have removed expectation %s, but inside a disable block with reason %s', stripped_line, disable_block_reason) elif (FINDER_DISABLE_COMMENT in line): output_contents += line logging.info('Would have removed expectation %s, but it has an inline disable comment with reason %s', stripped_line.split('#')[0], _GetDisableReasonFromComment(line)) else: reason = list_parser.expectations[0].reason if reason: removed_urls.add(reason) else: output_contents += line with open(expectation_file, 'w') as f: f.write(output_contents) return removed_urls<|docstring|>Removes lines corresponding to |expectations| from |expectation_file|. Ignores any lines that match but are within a disable block or have an inline disable comment. Args: expectations: A list of data_types.Expectations to remove. expectation_file: A filepath pointing to an expectation file to remove lines from. Returns: A set of strings containing URLs of bugs associated with the removed expectations.<|endoftext|>
39d9688c52f3b052b9e15300eaf4f1a081a3228b514c50130416b1a203f05005
def MergeExpectationMaps(base_map, merge_map, reference_map=None): 'Merges |merge_map| into |base_map|.\n\n Args:\n base_map: A dict to be updated with the contents of |merge_map|. Will be\n modified in place.\n merge_map: A dict in the format returned by\n expectations.CreateTestExpectationMap() whose contents will be merged\n into |base_map|.\n reference_map: A dict containing the information that was originally in\n |base_map|. Used for ensuring that a single expectation/builder/step\n combination is only ever updated once. If None, a copy of |base_map|\n will be used.\n ' reference_map = (reference_map or copy.deepcopy(base_map)) for (key, value) in merge_map.iteritems(): if (key not in base_map): base_map[key] = value elif isinstance(value, dict): MergeExpectationMaps(base_map[key], value, reference_map.get(key, {})) else: assert isinstance(value, data_types.BuildStats) reference_stats = reference_map.get(key, None) assert (reference_stats is not None) assert (reference_stats == base_map[key]) base_map[key] = value
Merges |merge_map| into |base_map|. Args: base_map: A dict to be updated with the contents of |merge_map|. Will be modified in place. merge_map: A dict in the format returned by expectations.CreateTestExpectationMap() whose contents will be merged into |base_map|. reference_map: A dict containing the information that was originally in |base_map|. Used for ensuring that a single expectation/builder/step combination is only ever updated once. If None, a copy of |base_map| will be used.
content/test/gpu/unexpected_passes/expectations.py
MergeExpectationMaps
Ron423c/chromium
0
python
def MergeExpectationMaps(base_map, merge_map, reference_map=None): 'Merges |merge_map| into |base_map|.\n\n Args:\n base_map: A dict to be updated with the contents of |merge_map|. Will be\n modified in place.\n merge_map: A dict in the format returned by\n expectations.CreateTestExpectationMap() whose contents will be merged\n into |base_map|.\n reference_map: A dict containing the information that was originally in\n |base_map|. Used for ensuring that a single expectation/builder/step\n combination is only ever updated once. If None, a copy of |base_map|\n will be used.\n ' reference_map = (reference_map or copy.deepcopy(base_map)) for (key, value) in merge_map.iteritems(): if (key not in base_map): base_map[key] = value elif isinstance(value, dict): MergeExpectationMaps(base_map[key], value, reference_map.get(key, {})) else: assert isinstance(value, data_types.BuildStats) reference_stats = reference_map.get(key, None) assert (reference_stats is not None) assert (reference_stats == base_map[key]) base_map[key] = value
def MergeExpectationMaps(base_map, merge_map, reference_map=None): 'Merges |merge_map| into |base_map|.\n\n Args:\n base_map: A dict to be updated with the contents of |merge_map|. Will be\n modified in place.\n merge_map: A dict in the format returned by\n expectations.CreateTestExpectationMap() whose contents will be merged\n into |base_map|.\n reference_map: A dict containing the information that was originally in\n |base_map|. Used for ensuring that a single expectation/builder/step\n combination is only ever updated once. If None, a copy of |base_map|\n will be used.\n ' reference_map = (reference_map or copy.deepcopy(base_map)) for (key, value) in merge_map.iteritems(): if (key not in base_map): base_map[key] = value elif isinstance(value, dict): MergeExpectationMaps(base_map[key], value, reference_map.get(key, {})) else: assert isinstance(value, data_types.BuildStats) reference_stats = reference_map.get(key, None) assert (reference_stats is not None) assert (reference_stats == base_map[key]) base_map[key] = value<|docstring|>Merges |merge_map| into |base_map|. Args: base_map: A dict to be updated with the contents of |merge_map|. Will be modified in place. merge_map: A dict in the format returned by expectations.CreateTestExpectationMap() whose contents will be merged into |base_map|. reference_map: A dict containing the information that was originally in |base_map|. Used for ensuring that a single expectation/builder/step combination is only ever updated once. If None, a copy of |base_map| will be used.<|endoftext|>
12fb6e24a55e8bfa7c8e466600244452ea151b6afb1aea3981fe75230620c042
def AddResultListToMap(expectation_map, builder, results): 'Adds |results| to |expectation_map|.\n\n Args:\n expectation_map: A dict in the format returned by\n expectations.CreateTestExpectationMap(). Will be modified in-place.\n builder: A string containing the builder |results| came from. Should be\n prefixed with something to distinguish between identically named CI and\n try builders.\n results: A list of data_types.Result objects corresponding to the ResultDB\n data queried for |builder|.\n\n Returns:\n A list of data_types.Result objects who did not have a matching expectation\n in |expectation_map|.\n ' failure_results = set() pass_results = set() unmatched_results = [] for r in results: if (r.actual_result == 'Pass'): pass_results.add(r) else: failure_results.add(r) modified_failing_retry_results = set() for r in failure_results: modified_failing_retry_results.add(data_types.Result(r.test, r.tags, 'Pass', r.step, r.build_id)) pass_results -= modified_failing_retry_results for r in (pass_results | failure_results): found_matching = _AddResultToMap(r, builder, expectation_map) if (not found_matching): unmatched_results.append(r) return unmatched_results
Adds |results| to |expectation_map|. Args: expectation_map: A dict in the format returned by expectations.CreateTestExpectationMap(). Will be modified in-place. builder: A string containing the builder |results| came from. Should be prefixed with something to distinguish between identically named CI and try builders. results: A list of data_types.Result objects corresponding to the ResultDB data queried for |builder|. Returns: A list of data_types.Result objects who did not have a matching expectation in |expectation_map|.
content/test/gpu/unexpected_passes/expectations.py
AddResultListToMap
Ron423c/chromium
0
python
def AddResultListToMap(expectation_map, builder, results): 'Adds |results| to |expectation_map|.\n\n Args:\n expectation_map: A dict in the format returned by\n expectations.CreateTestExpectationMap(). Will be modified in-place.\n builder: A string containing the builder |results| came from. Should be\n prefixed with something to distinguish between identically named CI and\n try builders.\n results: A list of data_types.Result objects corresponding to the ResultDB\n data queried for |builder|.\n\n Returns:\n A list of data_types.Result objects who did not have a matching expectation\n in |expectation_map|.\n ' failure_results = set() pass_results = set() unmatched_results = [] for r in results: if (r.actual_result == 'Pass'): pass_results.add(r) else: failure_results.add(r) modified_failing_retry_results = set() for r in failure_results: modified_failing_retry_results.add(data_types.Result(r.test, r.tags, 'Pass', r.step, r.build_id)) pass_results -= modified_failing_retry_results for r in (pass_results | failure_results): found_matching = _AddResultToMap(r, builder, expectation_map) if (not found_matching): unmatched_results.append(r) return unmatched_results
def AddResultListToMap(expectation_map, builder, results): 'Adds |results| to |expectation_map|.\n\n Args:\n expectation_map: A dict in the format returned by\n expectations.CreateTestExpectationMap(). Will be modified in-place.\n builder: A string containing the builder |results| came from. Should be\n prefixed with something to distinguish between identically named CI and\n try builders.\n results: A list of data_types.Result objects corresponding to the ResultDB\n data queried for |builder|.\n\n Returns:\n A list of data_types.Result objects who did not have a matching expectation\n in |expectation_map|.\n ' failure_results = set() pass_results = set() unmatched_results = [] for r in results: if (r.actual_result == 'Pass'): pass_results.add(r) else: failure_results.add(r) modified_failing_retry_results = set() for r in failure_results: modified_failing_retry_results.add(data_types.Result(r.test, r.tags, 'Pass', r.step, r.build_id)) pass_results -= modified_failing_retry_results for r in (pass_results | failure_results): found_matching = _AddResultToMap(r, builder, expectation_map) if (not found_matching): unmatched_results.append(r) return unmatched_results<|docstring|>Adds |results| to |expectation_map|. Args: expectation_map: A dict in the format returned by expectations.CreateTestExpectationMap(). Will be modified in-place. builder: A string containing the builder |results| came from. Should be prefixed with something to distinguish between identically named CI and try builders. results: A list of data_types.Result objects corresponding to the ResultDB data queried for |builder|. Returns: A list of data_types.Result objects who did not have a matching expectation in |expectation_map|.<|endoftext|>
1794601d37e23bd4f0388d64874039ec386dc6d32c4eeab8f971abac7441ad2d
def _AddResultToMap(result, builder, expectation_map): 'Adds a single |result| to |expectation_map|.\n\n Args:\n result: A data_types.Result object to add.\n builder: A string containing the name of the builder |result| came from.\n expectation_map: A dict in the format returned by\n expectations.CreateTestExpectationMap(). Will be modified in-place.\n\n Returns:\n True if an expectation in |expectation_map| applied to |result|, otherwise\n False.\n ' found_matching_expectation = False for expectations in expectation_map.itervalues(): for (e, builder_map) in expectations.iteritems(): if e.AppliesToResult(result): found_matching_expectation = True step_map = builder_map.setdefault(builder, {}) stats = step_map.setdefault(result.step, data_types.BuildStats()) if (result.actual_result == 'Pass'): stats.AddPassedBuild() else: stats.AddFailedBuild(result.build_id) return found_matching_expectation
Adds a single |result| to |expectation_map|. Args: result: A data_types.Result object to add. builder: A string containing the name of the builder |result| came from. expectation_map: A dict in the format returned by expectations.CreateTestExpectationMap(). Will be modified in-place. Returns: True if an expectation in |expectation_map| applied to |result|, otherwise False.
content/test/gpu/unexpected_passes/expectations.py
_AddResultToMap
Ron423c/chromium
0
python
def _AddResultToMap(result, builder, expectation_map): 'Adds a single |result| to |expectation_map|.\n\n Args:\n result: A data_types.Result object to add.\n builder: A string containing the name of the builder |result| came from.\n expectation_map: A dict in the format returned by\n expectations.CreateTestExpectationMap(). Will be modified in-place.\n\n Returns:\n True if an expectation in |expectation_map| applied to |result|, otherwise\n False.\n ' found_matching_expectation = False for expectations in expectation_map.itervalues(): for (e, builder_map) in expectations.iteritems(): if e.AppliesToResult(result): found_matching_expectation = True step_map = builder_map.setdefault(builder, {}) stats = step_map.setdefault(result.step, data_types.BuildStats()) if (result.actual_result == 'Pass'): stats.AddPassedBuild() else: stats.AddFailedBuild(result.build_id) return found_matching_expectation
def _AddResultToMap(result, builder, expectation_map): 'Adds a single |result| to |expectation_map|.\n\n Args:\n result: A data_types.Result object to add.\n builder: A string containing the name of the builder |result| came from.\n expectation_map: A dict in the format returned by\n expectations.CreateTestExpectationMap(). Will be modified in-place.\n\n Returns:\n True if an expectation in |expectation_map| applied to |result|, otherwise\n False.\n ' found_matching_expectation = False for expectations in expectation_map.itervalues(): for (e, builder_map) in expectations.iteritems(): if e.AppliesToResult(result): found_matching_expectation = True step_map = builder_map.setdefault(builder, {}) stats = step_map.setdefault(result.step, data_types.BuildStats()) if (result.actual_result == 'Pass'): stats.AddPassedBuild() else: stats.AddFailedBuild(result.build_id) return found_matching_expectation<|docstring|>Adds a single |result| to |expectation_map|. Args: result: A data_types.Result object to add. builder: A string containing the name of the builder |result| came from. expectation_map: A dict in the format returned by expectations.CreateTestExpectationMap(). Will be modified in-place. Returns: True if an expectation in |expectation_map| applied to |result|, otherwise False.<|endoftext|>
8024c8cdd9584529919c4e061060b4e8e88507667eb4a5053e747d02a5583a44
def initialize(self, server): 'Initialize request\n\n `server`\n SockJSRouter instance.\n ' self.server = server self.logged = False
Initialize request `server` SockJSRouter instance.
sockjs/tornado/basehandler.py
initialize
jwilk-forks/sockjs-tornado
431
python
def initialize(self, server): 'Initialize request\n\n `server`\n SockJSRouter instance.\n ' self.server = server self.logged = False
def initialize(self, server): 'Initialize request\n\n `server`\n SockJSRouter instance.\n ' self.server = server self.logged = False<|docstring|>Initialize request `server` SockJSRouter instance.<|endoftext|>
9a99660112fc3bd8714da7c451d93f61597893ff6ef70623faf55a6aacc33721
def prepare(self): 'Increment connection count' self.logged = True self.server.stats.on_conn_opened()
Increment connection count
sockjs/tornado/basehandler.py
prepare
jwilk-forks/sockjs-tornado
431
python
def prepare(self): self.logged = True self.server.stats.on_conn_opened()
def prepare(self): self.logged = True self.server.stats.on_conn_opened()<|docstring|>Increment connection count<|endoftext|>
180a4349eeb4bc6040980583d63b00e171cdb2858a67d4d18ef05290e252b92d
def _log_disconnect(self): 'Decrement connection count' if self.logged: self.server.stats.on_conn_closed() self.logged = False
Decrement connection count
sockjs/tornado/basehandler.py
_log_disconnect
jwilk-forks/sockjs-tornado
431
python
def _log_disconnect(self): if self.logged: self.server.stats.on_conn_closed() self.logged = False
def _log_disconnect(self): if self.logged: self.server.stats.on_conn_closed() self.logged = False<|docstring|>Decrement connection count<|endoftext|>
d06def1b150ed7c6cdcc2654adc5239f3e4c8a7d5e946e9347de9c97a4ef3141
def finish(self, chunk=None): 'Tornado `finish` handler' self._log_disconnect() super(BaseHandler, self).finish(chunk)
Tornado `finish` handler
sockjs/tornado/basehandler.py
finish
jwilk-forks/sockjs-tornado
431
python
def finish(self, chunk=None): self._log_disconnect() super(BaseHandler, self).finish(chunk)
def finish(self, chunk=None): self._log_disconnect() super(BaseHandler, self).finish(chunk)<|docstring|>Tornado `finish` handler<|endoftext|>
d70b163ad573b18db27b63348513702db63fd88aed2223b2fdf790041bbe9795
def on_connection_close(self): 'Tornado `on_connection_close` handler' self._log_disconnect()
Tornado `on_connection_close` handler
sockjs/tornado/basehandler.py
on_connection_close
jwilk-forks/sockjs-tornado
431
python
def on_connection_close(self): self._log_disconnect()
def on_connection_close(self): self._log_disconnect()<|docstring|>Tornado `on_connection_close` handler<|endoftext|>
a75084c6722169cdf4531528e9b23bd03ff0f25cce00e14341d90d64312f8aeb
def enable_cache(self): 'Enable client-side caching for the current request' self.set_header('Cache-Control', ('max-age=%d, public' % CACHE_TIME)) d = (datetime.datetime.now() + datetime.timedelta(seconds=CACHE_TIME)) self.set_header('Expires', d.strftime('%a, %d %b %Y %H:%M:%S')) self.set_header('access-control-max-age', CACHE_TIME)
Enable client-side caching for the current request
sockjs/tornado/basehandler.py
enable_cache
jwilk-forks/sockjs-tornado
431
python
def enable_cache(self): self.set_header('Cache-Control', ('max-age=%d, public' % CACHE_TIME)) d = (datetime.datetime.now() + datetime.timedelta(seconds=CACHE_TIME)) self.set_header('Expires', d.strftime('%a, %d %b %Y %H:%M:%S')) self.set_header('access-control-max-age', CACHE_TIME)
def enable_cache(self): self.set_header('Cache-Control', ('max-age=%d, public' % CACHE_TIME)) d = (datetime.datetime.now() + datetime.timedelta(seconds=CACHE_TIME)) self.set_header('Expires', d.strftime('%a, %d %b %Y %H:%M:%S')) self.set_header('access-control-max-age', CACHE_TIME)<|docstring|>Enable client-side caching for the current request<|endoftext|>
89b824f6194b9d1cd90fefff1fbfdae8bba84165ef59b33e0a1a1c1535ff95f5
def disable_cache(self): 'Disable client-side cache for the current request' self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
Disable client-side cache for the current request
sockjs/tornado/basehandler.py
disable_cache
jwilk-forks/sockjs-tornado
431
python
def disable_cache(self): self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def disable_cache(self): self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')<|docstring|>Disable client-side cache for the current request<|endoftext|>
5cffd4ca14333a759a0310eea378caf904d9f2fd2d395278a336b95e5d5e2d3e
def handle_session_cookie(self): 'Handle JSESSIONID cookie logic' if (not self.server.settings['jsessionid']): return cookie = self.cookies.get('JSESSIONID') if (not cookie): cv = 'dummy' else: cv = cookie.value self.set_cookie('JSESSIONID', cv)
Handle JSESSIONID cookie logic
sockjs/tornado/basehandler.py
handle_session_cookie
jwilk-forks/sockjs-tornado
431
python
def handle_session_cookie(self): if (not self.server.settings['jsessionid']): return cookie = self.cookies.get('JSESSIONID') if (not cookie): cv = 'dummy' else: cv = cookie.value self.set_cookie('JSESSIONID', cv)
def handle_session_cookie(self): if (not self.server.settings['jsessionid']): return cookie = self.cookies.get('JSESSIONID') if (not cookie): cv = 'dummy' else: cv = cookie.value self.set_cookie('JSESSIONID', cv)<|docstring|>Handle JSESSIONID cookie logic<|endoftext|>
459d4a73e26355d3b104ace09fdd54b711e9013269845e0a937472b9a7357058
def safe_finish(self): 'Finish session. If it will blow up - connection was set to Keep-Alive and\n client dropped connection, ignore any IOError or socket error.' try: self.finish() except (socket.error, IOError): LOG.debug('Ignoring IOError in safe_finish()') pass
Finish session. If it will blow up - connection was set to Keep-Alive and client dropped connection, ignore any IOError or socket error.
sockjs/tornado/basehandler.py
safe_finish
jwilk-forks/sockjs-tornado
431
python
def safe_finish(self): 'Finish session. If it will blow up - connection was set to Keep-Alive and\n client dropped connection, ignore any IOError or socket error.' try: self.finish() except (socket.error, IOError): LOG.debug('Ignoring IOError in safe_finish()') pass
def safe_finish(self): 'Finish session. If it will blow up - connection was set to Keep-Alive and\n client dropped connection, ignore any IOError or socket error.' try: self.finish() except (socket.error, IOError): LOG.debug('Ignoring IOError in safe_finish()') pass<|docstring|>Finish session. If it will blow up - connection was set to Keep-Alive and client dropped connection, ignore any IOError or socket error.<|endoftext|>
d1487395cf2abe9f266af310ef2eab4d8b80cb317ddfff952610b4bb60cfb233
@asynchronous def options(self, *args, **kwargs): 'XHR cross-domain OPTIONS handler' self.enable_cache() self.handle_session_cookie() self.preflight() if self.verify_origin(): allowed_methods = getattr(self, 'access_methods', 'OPTIONS, POST') self.set_header('Access-Control-Allow-Methods', allowed_methods) self.set_header('Allow', allowed_methods) self.set_status(204) else: self.set_status(403) self.finish()
XHR cross-domain OPTIONS handler
sockjs/tornado/basehandler.py
options
jwilk-forks/sockjs-tornado
431
python
@asynchronous def options(self, *args, **kwargs): self.enable_cache() self.handle_session_cookie() self.preflight() if self.verify_origin(): allowed_methods = getattr(self, 'access_methods', 'OPTIONS, POST') self.set_header('Access-Control-Allow-Methods', allowed_methods) self.set_header('Allow', allowed_methods) self.set_status(204) else: self.set_status(403) self.finish()
@asynchronous def options(self, *args, **kwargs): self.enable_cache() self.handle_session_cookie() self.preflight() if self.verify_origin(): allowed_methods = getattr(self, 'access_methods', 'OPTIONS, POST') self.set_header('Access-Control-Allow-Methods', allowed_methods) self.set_header('Allow', allowed_methods) self.set_status(204) else: self.set_status(403) self.finish()<|docstring|>XHR cross-domain OPTIONS handler<|endoftext|>
4ac6e4b5232231ff5310917a2d874dae455c5381ef57af63b7f9129657317e80
def preflight(self): 'Handles request authentication' origin = self.request.headers.get('Origin', '*') self.set_header('Access-Control-Allow-Origin', origin) headers = self.request.headers.get('Access-Control-Request-Headers') if headers: self.set_header('Access-Control-Allow-Headers', headers) self.set_header('Access-Control-Allow-Credentials', 'true')
Handles request authentication
sockjs/tornado/basehandler.py
preflight
jwilk-forks/sockjs-tornado
431
python
def preflight(self): origin = self.request.headers.get('Origin', '*') self.set_header('Access-Control-Allow-Origin', origin) headers = self.request.headers.get('Access-Control-Request-Headers') if headers: self.set_header('Access-Control-Allow-Headers', headers) self.set_header('Access-Control-Allow-Credentials', 'true')
def preflight(self): origin = self.request.headers.get('Origin', '*') self.set_header('Access-Control-Allow-Origin', origin) headers = self.request.headers.get('Access-Control-Request-Headers') if headers: self.set_header('Access-Control-Allow-Headers', headers) self.set_header('Access-Control-Allow-Credentials', 'true')<|docstring|>Handles request authentication<|endoftext|>
eaf44caeebf42ed46cfa282f1db5632bc79b5ea94b5e8a966cb4fcbf21be5c27
def verify_origin(self): 'Verify if request can be served' return True
Verify if request can be served
sockjs/tornado/basehandler.py
verify_origin
jwilk-forks/sockjs-tornado
431
python
def verify_origin(self): return True
def verify_origin(self): return True<|docstring|>Verify if request can be served<|endoftext|>
1c301809dc9f5376664e24d7c02006e75fba840ee3d278dab98780f89fdee63f
def test_assert_truth(self): '\n We shall contemplate truth by testing reality, via asserts.\n ' self.assertTrue(True)
We shall contemplate truth by testing reality, via asserts.
koans/about_asserts.py
test_assert_truth
c2tm/python_koans
0
python
def test_assert_truth(self): '\n \n ' self.assertTrue(True)
def test_assert_truth(self): '\n \n ' self.assertTrue(True)<|docstring|>We shall contemplate truth by testing reality, via asserts.<|endoftext|>
d571d8dcef60de679498cfd07a1da2904dec00156dd88a6a23bb9456e7487ed0
def test_assert_with_message(self): '\n Enlightenment may be more easily achieved with appropriate messages.\n ' self.assertTrue(True, 'This should be True -- Please fix this')
Enlightenment may be more easily achieved with appropriate messages.
koans/about_asserts.py
test_assert_with_message
c2tm/python_koans
0
python
def test_assert_with_message(self): '\n \n ' self.assertTrue(True, 'This should be True -- Please fix this')
def test_assert_with_message(self): '\n \n ' self.assertTrue(True, 'This should be True -- Please fix this')<|docstring|>Enlightenment may be more easily achieved with appropriate messages.<|endoftext|>
0f66e7baa800bc6e4a7b5c1a29f7083d295bd3d3354786c4b2e61697f06c9854
def test_fill_in_values(self): '\n Sometimes we will ask you to fill in the values\n ' self.assertEqual(2, (1 + 1))
Sometimes we will ask you to fill in the values
koans/about_asserts.py
test_fill_in_values
c2tm/python_koans
0
python
def test_fill_in_values(self): '\n \n ' self.assertEqual(2, (1 + 1))
def test_fill_in_values(self): '\n \n ' self.assertEqual(2, (1 + 1))<|docstring|>Sometimes we will ask you to fill in the values<|endoftext|>
0b1cf7aaf9a581ea7b2636cd1e4f8d0b02ed2e3a60a6a62e9bbd5bc7c0918b99
def test_assert_equality(self): '\n To understand reality, we must compare our expectations against reality.\n ' expected_value = 2 actual_value = (1 + 1) self.assertTrue((expected_value == actual_value))
To understand reality, we must compare our expectations against reality.
koans/about_asserts.py
test_assert_equality
c2tm/python_koans
0
python
def test_assert_equality(self): '\n \n ' expected_value = 2 actual_value = (1 + 1) self.assertTrue((expected_value == actual_value))
def test_assert_equality(self): '\n \n ' expected_value = 2 actual_value = (1 + 1) self.assertTrue((expected_value == actual_value))<|docstring|>To understand reality, we must compare our expectations against reality.<|endoftext|>
4e34a9cbba9ea48ff1f987ef385812d0b1238941b13a8ea9ec59333b83a1aa57
def test_a_better_way_of_asserting_equality(self): '\n Some ways of asserting equality are better than others.\n ' expected_value = 2 actual_value = (1 + 1) self.assertEqual(expected_value, actual_value)
Some ways of asserting equality are better than others.
koans/about_asserts.py
test_a_better_way_of_asserting_equality
c2tm/python_koans
0
python
def test_a_better_way_of_asserting_equality(self): '\n \n ' expected_value = 2 actual_value = (1 + 1) self.assertEqual(expected_value, actual_value)
def test_a_better_way_of_asserting_equality(self): '\n \n ' expected_value = 2 actual_value = (1 + 1) self.assertEqual(expected_value, actual_value)<|docstring|>Some ways of asserting equality are better than others.<|endoftext|>
94ec2ffdca2a1d08eb1d8d91981a8492c8f7fbadde23d40eba1e045d63bd2d99
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self): '\n Understand what lies within.\n ' assert True
Understand what lies within.
koans/about_asserts.py
test_that_unittest_asserts_work_the_same_way_as_python_asserts
c2tm/python_koans
0
python
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self): '\n \n ' assert True
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self): '\n \n ' assert True<|docstring|>Understand what lies within.<|endoftext|>
cc168e4b6dbb199d715bdcf88b493747afd9b17289aaf41964eb405b5e67c3fe
def test_that_sometimes_we_need_to_know_the_class_type(self): '\n What is in a class name?\n ' self.assertEqual(str, 'navel'.__class__)
What is in a class name?
koans/about_asserts.py
test_that_sometimes_we_need_to_know_the_class_type
c2tm/python_koans
0
python
def test_that_sometimes_we_need_to_know_the_class_type(self): '\n \n ' self.assertEqual(str, 'navel'.__class__)
def test_that_sometimes_we_need_to_know_the_class_type(self): '\n \n ' self.assertEqual(str, 'navel'.__class__)<|docstring|>What is in a class name?<|endoftext|>
4c21f8d45719a4eda4ab601523ecd49ff427cde2a9b68b336eec7a07cece0462
def test_get_move_type_general(min_lw, min_lw2): 'It should identify general moves.' from_loc = min_lw.wells()[0].top() to_loc = min_lw2.wells()[0].top() result = get_move_type(from_loc, to_loc) assert (result == MoveType.GENERAL_ARC)
It should identify general moves.
api/tests/opentrons/protocols/geometry/test_planning.py
test_get_move_type_general
y3rsh/opentrons
235
python
def test_get_move_type_general(min_lw, min_lw2): from_loc = min_lw.wells()[0].top() to_loc = min_lw2.wells()[0].top() result = get_move_type(from_loc, to_loc) assert (result == MoveType.GENERAL_ARC)
def test_get_move_type_general(min_lw, min_lw2): from_loc = min_lw.wells()[0].top() to_loc = min_lw2.wells()[0].top() result = get_move_type(from_loc, to_loc) assert (result == MoveType.GENERAL_ARC)<|docstring|>It should identify general moves.<|endoftext|>
ad570896c4932926ace1df7dd228ecf7fbec12344c948bd23d31e409f23de47d
def test_get_move_type_in_labware(min_lw): 'It should identify general moves.' from_loc = min_lw.wells()[0].top() to_loc = min_lw.wells()[1].top() result = get_move_type(from_loc, to_loc) assert (result == MoveType.IN_LABWARE_ARC)
It should identify general moves.
api/tests/opentrons/protocols/geometry/test_planning.py
test_get_move_type_in_labware
y3rsh/opentrons
235
python
def test_get_move_type_in_labware(min_lw): from_loc = min_lw.wells()[0].top() to_loc = min_lw.wells()[1].top() result = get_move_type(from_loc, to_loc) assert (result == MoveType.IN_LABWARE_ARC)
def test_get_move_type_in_labware(min_lw): from_loc = min_lw.wells()[0].top() to_loc = min_lw.wells()[1].top() result = get_move_type(from_loc, to_loc) assert (result == MoveType.IN_LABWARE_ARC)<|docstring|>It should identify general moves.<|endoftext|>
137adaea1ab98f97ec140c948169739e8c7cbcbe7c584c63d168bc89d9df8068
def test_get_move_type_in_well(min_lw): 'It should identify general moves.' from_loc = min_lw.wells()[0].top() to_loc = min_lw.wells()[0].bottom() result = get_move_type(from_loc, to_loc) assert (result == MoveType.DIRECT)
It should identify general moves.
api/tests/opentrons/protocols/geometry/test_planning.py
test_get_move_type_in_well
y3rsh/opentrons
235
python
def test_get_move_type_in_well(min_lw): from_loc = min_lw.wells()[0].top() to_loc = min_lw.wells()[0].bottom() result = get_move_type(from_loc, to_loc) assert (result == MoveType.DIRECT)
def test_get_move_type_in_well(min_lw): from_loc = min_lw.wells()[0].top() to_loc = min_lw.wells()[0].bottom() result = get_move_type(from_loc, to_loc) assert (result == MoveType.DIRECT)<|docstring|>It should identify general moves.<|endoftext|>
74aa2b26b639d0d5890625fd0d85074d9550b833f357c1b0b469255307d31401
def test_get_move_type_general_with_force_direct(min_lw, min_lw2): 'It should identify general moves.' from_loc = min_lw.wells()[0].top() to_loc = min_lw2.wells()[0].top() result = get_move_type(from_loc, to_loc, force_direct=True) assert (result == MoveType.DIRECT)
It should identify general moves.
api/tests/opentrons/protocols/geometry/test_planning.py
test_get_move_type_general_with_force_direct
y3rsh/opentrons
235
python
def test_get_move_type_general_with_force_direct(min_lw, min_lw2): from_loc = min_lw.wells()[0].top() to_loc = min_lw2.wells()[0].top() result = get_move_type(from_loc, to_loc, force_direct=True) assert (result == MoveType.DIRECT)
def test_get_move_type_general_with_force_direct(min_lw, min_lw2): from_loc = min_lw.wells()[0].top() to_loc = min_lw2.wells()[0].top() result = get_move_type(from_loc, to_loc, force_direct=True) assert (result == MoveType.DIRECT)<|docstring|>It should identify general moves.<|endoftext|>
c1c18f9ab5b52c21de15e6891d8a439c6bbb9fc700cc7ae2fd55485e7a1e0732
def load_dia_binary(self, path: Text): 'Load existing examples as constraints for diarization\n\n This will set (or overwrite) the following attributes and return them\n * cannot_link_time\n * must_link_time\n * dont_know_time\n\n Parameters\n ----------\n path : Text\n Only load examples for this file.\n ' db = connect() examples = [eg for eg in db.get_dataset(self.dataset) if ((eg['recipe'] == 'pyannote.dia.binary') and (eg['path'] == path))] cannot_link: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] == 'reject')] must_link: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] == 'accept')] dont_know: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] not in ['accept', 'reject'])] if (len(cannot_link) > 0): prodigy.log(f'RECIPE: {path}: init: {len(cannot_link)} cannot link constraints') if (len(must_link) > 0): prodigy.log(f'RECIPE: {path}: init: {len(must_link)} must link constraints') cannot_link = propagate_constraints(cannot_link, must_link) self.cannot_link_time = cannot_link self.must_link_time = must_link self.dont_know_time = dont_know
Load existing examples as constraints for diarization This will set (or overwrite) the following attributes and return them * cannot_link_time * must_link_time * dont_know_time Parameters ---------- path : Text Only load examples for this file.
pyannote/audio/interactive/recipes/dia.py
load_dia_binary
stonelazy/pyannote-audio
1,543
python
def load_dia_binary(self, path: Text): 'Load existing examples as constraints for diarization\n\n This will set (or overwrite) the following attributes and return them\n * cannot_link_time\n * must_link_time\n * dont_know_time\n\n Parameters\n ----------\n path : Text\n Only load examples for this file.\n ' db = connect() examples = [eg for eg in db.get_dataset(self.dataset) if ((eg['recipe'] == 'pyannote.dia.binary') and (eg['path'] == path))] cannot_link: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] == 'reject')] must_link: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] == 'accept')] dont_know: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] not in ['accept', 'reject'])] if (len(cannot_link) > 0): prodigy.log(f'RECIPE: {path}: init: {len(cannot_link)} cannot link constraints') if (len(must_link) > 0): prodigy.log(f'RECIPE: {path}: init: {len(must_link)} must link constraints') cannot_link = propagate_constraints(cannot_link, must_link) self.cannot_link_time = cannot_link self.must_link_time = must_link self.dont_know_time = dont_know
def load_dia_binary(self, path: Text): 'Load existing examples as constraints for diarization\n\n This will set (or overwrite) the following attributes and return them\n * cannot_link_time\n * must_link_time\n * dont_know_time\n\n Parameters\n ----------\n path : Text\n Only load examples for this file.\n ' db = connect() examples = [eg for eg in db.get_dataset(self.dataset) if ((eg['recipe'] == 'pyannote.dia.binary') and (eg['path'] == path))] cannot_link: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] == 'reject')] must_link: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] == 'accept')] dont_know: CONSTRAINTS = [(eg['t1'], eg['t2']) for eg in examples if (eg['answer'] not in ['accept', 'reject'])] if (len(cannot_link) > 0): prodigy.log(f'RECIPE: {path}: init: {len(cannot_link)} cannot link constraints') if (len(must_link) > 0): prodigy.log(f'RECIPE: {path}: init: {len(must_link)} must link constraints') cannot_link = propagate_constraints(cannot_link, must_link) self.cannot_link_time = cannot_link self.must_link_time = must_link self.dont_know_time = dont_know<|docstring|>Load existing examples as constraints for diarization This will set (or overwrite) the following attributes and return them * cannot_link_time * must_link_time * dont_know_time Parameters ---------- path : Text Only load examples for this file.<|endoftext|>
3221f532e9d556f10aaf9726e8f89d3d9bfd12850a71c55a7509b62960093dad
def complete_build_flow_test(self): 'In local user folder' client = TestClient() command = os.sep.join(['.', 'bin', 'say_hello']) for pure_c in (False, True): for (install, lang, static) in [('install', 0, True), ('install -o language=1', 1, True), ('install -o language=1 -o static=False', 1, False), ('install -o static=False', 0, False)]: dll_export = (client.default_compiler_visual_studio and (not static)) files = cpp_hello_conan_files('Hello0', '0.1', dll_export=dll_export, pure_c=pure_c) client.save(files, clean_first=True) client.run(install) time.sleep(1) client.run('build') client.runner(command, cwd=client.current_folder) msg = ('Hello' if (lang == 0) else 'Hola') self.assertIn(('%s Hello0' % msg), client.user_io.out) conan_info_path = os.path.join(client.current_folder, CONANINFO) conan_info = ConanInfo.loads(load(conan_info_path)) self.assertTrue((conan_info.full_options.language == lang)) if static: self.assertTrue(conan_info.full_options.static) else: self.assertFalse(conan_info.full_options.static)
In local user folder
conans/test/integration/basic_build_test.py
complete_build_flow_test
tru/conan
1
python
def complete_build_flow_test(self): client = TestClient() command = os.sep.join(['.', 'bin', 'say_hello']) for pure_c in (False, True): for (install, lang, static) in [('install', 0, True), ('install -o language=1', 1, True), ('install -o language=1 -o static=False', 1, False), ('install -o static=False', 0, False)]: dll_export = (client.default_compiler_visual_studio and (not static)) files = cpp_hello_conan_files('Hello0', '0.1', dll_export=dll_export, pure_c=pure_c) client.save(files, clean_first=True) client.run(install) time.sleep(1) client.run('build') client.runner(command, cwd=client.current_folder) msg = ('Hello' if (lang == 0) else 'Hola') self.assertIn(('%s Hello0' % msg), client.user_io.out) conan_info_path = os.path.join(client.current_folder, CONANINFO) conan_info = ConanInfo.loads(load(conan_info_path)) self.assertTrue((conan_info.full_options.language == lang)) if static: self.assertTrue(conan_info.full_options.static) else: self.assertFalse(conan_info.full_options.static)
def complete_build_flow_test(self): client = TestClient() command = os.sep.join(['.', 'bin', 'say_hello']) for pure_c in (False, True): for (install, lang, static) in [('install', 0, True), ('install -o language=1', 1, True), ('install -o language=1 -o static=False', 1, False), ('install -o static=False', 0, False)]: dll_export = (client.default_compiler_visual_studio and (not static)) files = cpp_hello_conan_files('Hello0', '0.1', dll_export=dll_export, pure_c=pure_c) client.save(files, clean_first=True) client.run(install) time.sleep(1) client.run('build') client.runner(command, cwd=client.current_folder) msg = ('Hello' if (lang == 0) else 'Hola') self.assertIn(('%s Hello0' % msg), client.user_io.out) conan_info_path = os.path.join(client.current_folder, CONANINFO) conan_info = ConanInfo.loads(load(conan_info_path)) self.assertTrue((conan_info.full_options.language == lang)) if static: self.assertTrue(conan_info.full_options.static) else: self.assertFalse(conan_info.full_options.static)<|docstring|>In local user folder<|endoftext|>
e83166a321a410325c217a86d708bd15fdb5cd9b9f08c3fd493ad652b051de2c
def load_scramble_data(parameters, logger): '\n Load and randomly scramble data for train and test split\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n ' data_warnings(parameters, logger) logger.info('Preparing training/validation set data') logger.info('') train_data = load_clean_data(parameters, logger, 'TRAINING_DATA') logger.info('Preparing test set data') logger.info('') test_data = load_clean_data(parameters, logger, 'TEST_DATA') train_cols_to_use = parameters.config['TRAINING_DATA']['Cols_To_Use'].split(',') test_cols_to_use = parameters.config['TEST_DATA']['Cols_To_Use'].split(',') train_label_col = parameters.config['TRAINING_DATA']['Label_Col'] test_label_col = parameters.config['TEST_DATA']['Label_Col'] (X_train, X_test_discard, y_train, y_test_discard) = train_test_split(train_data[train_cols_to_use], train_data[train_label_col], stratify=train_data[train_label_col], test_size=(1 - float(parameters.config['TRAINING_DATA']['Split_Fraction'])), random_state=int(parameters.config['TRAINING_DATA']['Split_Seed'])) (X_train_discard, X_test, y_train_discard, y_test) = train_test_split(test_data[test_cols_to_use], test_data[test_label_col], stratify=test_data[test_label_col], test_size=float(parameters.config['TEST_DATA']['Split_Fraction']), random_state=int(parameters.config['TEST_DATA']['Split_Seed'])) return (X_train, X_test, y_train, y_test)
Load and randomly scramble data for train and test split @params: parameters - Required : Parameter object to read config settings (Parameter) logger - Required : Logger object for logging to console and file (Logger)
preprocessing.py
load_scramble_data
nonproliferation/mimosas
1
python
def load_scramble_data(parameters, logger): '\n Load and randomly scramble data for train and test split\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n ' data_warnings(parameters, logger) logger.info('Preparing training/validation set data') logger.info() train_data = load_clean_data(parameters, logger, 'TRAINING_DATA') logger.info('Preparing test set data') logger.info() test_data = load_clean_data(parameters, logger, 'TEST_DATA') train_cols_to_use = parameters.config['TRAINING_DATA']['Cols_To_Use'].split(',') test_cols_to_use = parameters.config['TEST_DATA']['Cols_To_Use'].split(',') train_label_col = parameters.config['TRAINING_DATA']['Label_Col'] test_label_col = parameters.config['TEST_DATA']['Label_Col'] (X_train, X_test_discard, y_train, y_test_discard) = train_test_split(train_data[train_cols_to_use], train_data[train_label_col], stratify=train_data[train_label_col], test_size=(1 - float(parameters.config['TRAINING_DATA']['Split_Fraction'])), random_state=int(parameters.config['TRAINING_DATA']['Split_Seed'])) (X_train_discard, X_test, y_train_discard, y_test) = train_test_split(test_data[test_cols_to_use], test_data[test_label_col], stratify=test_data[test_label_col], test_size=float(parameters.config['TEST_DATA']['Split_Fraction']), random_state=int(parameters.config['TEST_DATA']['Split_Seed'])) return (X_train, X_test, y_train, y_test)
def load_scramble_data(parameters, logger): '\n Load and randomly scramble data for train and test split\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n ' data_warnings(parameters, logger) logger.info('Preparing training/validation set data') logger.info() train_data = load_clean_data(parameters, logger, 'TRAINING_DATA') logger.info('Preparing test set data') logger.info() test_data = load_clean_data(parameters, logger, 'TEST_DATA') train_cols_to_use = parameters.config['TRAINING_DATA']['Cols_To_Use'].split(',') test_cols_to_use = parameters.config['TEST_DATA']['Cols_To_Use'].split(',') train_label_col = parameters.config['TRAINING_DATA']['Label_Col'] test_label_col = parameters.config['TEST_DATA']['Label_Col'] (X_train, X_test_discard, y_train, y_test_discard) = train_test_split(train_data[train_cols_to_use], train_data[train_label_col], stratify=train_data[train_label_col], test_size=(1 - float(parameters.config['TRAINING_DATA']['Split_Fraction'])), random_state=int(parameters.config['TRAINING_DATA']['Split_Seed'])) (X_train_discard, X_test, y_train_discard, y_test) = train_test_split(test_data[test_cols_to_use], test_data[test_label_col], stratify=test_data[test_label_col], test_size=float(parameters.config['TEST_DATA']['Split_Fraction']), random_state=int(parameters.config['TEST_DATA']['Split_Seed'])) return (X_train, X_test, y_train, y_test)<|docstring|>Load and randomly scramble data for train and test split @params: parameters - Required : Parameter object to read config settings (Parameter) logger - Required : Logger object for logging to console and file (Logger)<|endoftext|>
a132bd6ae860b0c4f9f5546f261e1a788d57c37c7f09179c02317e8b72d7c590
def load_clean_data(parameters, logger, mode='TRAINING_DATA'): '\n Load and clean data. Used as a helper for load_scramble_data\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n mode - Optional : Pull data from different locations in config file (Str)\n ' if ('background_correction' in parameters.config[mode]['Data_Options'].split(',')): if (not os.path.exists(os.path.join(parameters.main_path, parameters.config[mode]['Background_Data']))): data = pd.read_csv(os.path.join(parameters.main_path, parameters.config[mode]['Data'])) logger.warning('Background Correction has been specified in CONFIG, but the background data specified at') logger.warning(parameters.config[mode]['Background_Data']) logger.warning('does not exist.') logger.warning('Loading input data WITHOUT background correction.') logger.warning('') else: data = background_correction(parameters, logger, mode) else: data = pd.read_csv(os.path.join(parameters.main_path, parameters.config[mode]['Data'])) data.loc[(:, 'utc-time')] = pd.to_datetime(data['time'], unit='s', utc=True) data_options = parameters.config[mode]['Data_Options'].split(',') cols_to_standardize = parameters.config[mode]['Cols_To_Standardize'].split(',') cols_to_minmaxscale = parameters.config[mode]['Cols_To_MinMaxScale'].split(',') if (not parameters.config[mode]['Device_ID_Col']): parameters.config[mode]['Device_ID_Col'] = 'sensor_id' data.loc[(:, 'sensor_id')] = 1 if ('remove_outliers' in data_options): data = remove_outliers(data, parameters, logger, mode) for idx in data.groupby([parameters.config[mode]['Device_ID_Col']]).groups.values(): grouped_data = data.loc[idx] if ('standardize' in data_options): data.loc[(idx, :)] = standardize_features(grouped_data, cols_to_standardize) if ('minmaxscale' in data_options): data.loc[(idx, :)] = minmaxscale_features(grouped_data, cols_to_minmaxscale) return data
Load and clean data. Used as a helper for load_scramble_data @params: parameters - Required : Parameter object to read config settings (Parameter) logger - Required : Logger object for logging to console and file (Logger) mode - Optional : Pull data from different locations in config file (Str)
preprocessing.py
load_clean_data
nonproliferation/mimosas
1
python
def load_clean_data(parameters, logger, mode='TRAINING_DATA'): '\n Load and clean data. Used as a helper for load_scramble_data\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n mode - Optional : Pull data from different locations in config file (Str)\n ' if ('background_correction' in parameters.config[mode]['Data_Options'].split(',')): if (not os.path.exists(os.path.join(parameters.main_path, parameters.config[mode]['Background_Data']))): data = pd.read_csv(os.path.join(parameters.main_path, parameters.config[mode]['Data'])) logger.warning('Background Correction has been specified in CONFIG, but the background data specified at') logger.warning(parameters.config[mode]['Background_Data']) logger.warning('does not exist.') logger.warning('Loading input data WITHOUT background correction.') logger.warning() else: data = background_correction(parameters, logger, mode) else: data = pd.read_csv(os.path.join(parameters.main_path, parameters.config[mode]['Data'])) data.loc[(:, 'utc-time')] = pd.to_datetime(data['time'], unit='s', utc=True) data_options = parameters.config[mode]['Data_Options'].split(',') cols_to_standardize = parameters.config[mode]['Cols_To_Standardize'].split(',') cols_to_minmaxscale = parameters.config[mode]['Cols_To_MinMaxScale'].split(',') if (not parameters.config[mode]['Device_ID_Col']): parameters.config[mode]['Device_ID_Col'] = 'sensor_id' data.loc[(:, 'sensor_id')] = 1 if ('remove_outliers' in data_options): data = remove_outliers(data, parameters, logger, mode) for idx in data.groupby([parameters.config[mode]['Device_ID_Col']]).groups.values(): grouped_data = data.loc[idx] if ('standardize' in data_options): data.loc[(idx, :)] = standardize_features(grouped_data, cols_to_standardize) if ('minmaxscale' in data_options): data.loc[(idx, :)] = minmaxscale_features(grouped_data, cols_to_minmaxscale) return data
def load_clean_data(parameters, logger, mode='TRAINING_DATA'): '\n Load and clean data. Used as a helper for load_scramble_data\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n mode - Optional : Pull data from different locations in config file (Str)\n ' if ('background_correction' in parameters.config[mode]['Data_Options'].split(',')): if (not os.path.exists(os.path.join(parameters.main_path, parameters.config[mode]['Background_Data']))): data = pd.read_csv(os.path.join(parameters.main_path, parameters.config[mode]['Data'])) logger.warning('Background Correction has been specified in CONFIG, but the background data specified at') logger.warning(parameters.config[mode]['Background_Data']) logger.warning('does not exist.') logger.warning('Loading input data WITHOUT background correction.') logger.warning() else: data = background_correction(parameters, logger, mode) else: data = pd.read_csv(os.path.join(parameters.main_path, parameters.config[mode]['Data'])) data.loc[(:, 'utc-time')] = pd.to_datetime(data['time'], unit='s', utc=True) data_options = parameters.config[mode]['Data_Options'].split(',') cols_to_standardize = parameters.config[mode]['Cols_To_Standardize'].split(',') cols_to_minmaxscale = parameters.config[mode]['Cols_To_MinMaxScale'].split(',') if (not parameters.config[mode]['Device_ID_Col']): parameters.config[mode]['Device_ID_Col'] = 'sensor_id' data.loc[(:, 'sensor_id')] = 1 if ('remove_outliers' in data_options): data = remove_outliers(data, parameters, logger, mode) for idx in data.groupby([parameters.config[mode]['Device_ID_Col']]).groups.values(): grouped_data = data.loc[idx] if ('standardize' in data_options): data.loc[(idx, :)] = standardize_features(grouped_data, cols_to_standardize) if ('minmaxscale' in data_options): data.loc[(idx, :)] = minmaxscale_features(grouped_data, cols_to_minmaxscale) return data<|docstring|>Load and clean data. Used as a helper for load_scramble_data @params: parameters - Required : Parameter object to read config settings (Parameter) logger - Required : Logger object for logging to console and file (Logger) mode - Optional : Pull data from different locations in config file (Str)<|endoftext|>
9719ccd46160adcf5182df7ff46a0117bff56c8ddddd445cba4c829c5fb733bd
def data_warnings(parameters, logger): '\n Check for configuration options which may produce erroneous results without\n impeding execution of the code. Log any such cases.\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n ' if (parameters.config['TRAINING_DATA']['Data'] == parameters.config['TEST_DATA']['Data']): if (parameters.config['TRAINING_DATA']['Split_Seed'] != parameters.config['TEST_DATA']['Split_Seed']): logger.warning('The seed for the pseudoRNG is different for the training and test data configuration options.') logger.warning('This is likely to result in shared samples, which bias prediction performance upward.') logger.warning('') else: split_frac_sum = (float(parameters.config['TRAINING_DATA']['Split_Fraction']) + float(parameters.config['TEST_DATA']['Split_Fraction'])) if (not np.isclose(split_frac_sum, 1.0)): if (split_frac_sum > 1.0): logger.warning('The split fractions for the training and test data configurations sum to greater than 1.') logger.warning('This guarantees that the trainig and test datasets will share samples, which bias prediction performance upward.') logger.warning('') del split_frac_sum for mode in ['TRAINING_DATA', 'TEST_DATA']: if (('standardize' in parameters.config[mode]['Data_Options'].split(',')) and ('minmaxscale' in parameters.config[mode]['Data_Options'].split(','))): if set(parameters.config[mode]['Cols_To_Standardize'].split(',')).intersection(set(parameters.config[mode]['Cols_To_MinMaxScale'].split(','))): logger.warning('Input feature(s) in the {} data are being subjected to multiple scaling/normalization functions:'.format(mode)) for feat in set(parameters.config[mode]['Cols_To_Standardize'].split(',')).intersection(set(parameters.config[mode]['Cols_To_MinMaxScale'].split(','))): logger.warning(feat) logger.warning('') return None
Check for configuration options which may produce erroneous results without impeding execution of the code. Log any such cases. @params: parameters - Required : Parameter object to read config settings (Parameter) logger - Required : Logger object for logging to console and file (Logger)
preprocessing.py
data_warnings
nonproliferation/mimosas
1
python
def data_warnings(parameters, logger): '\n Check for configuration options which may produce erroneous results without\n impeding execution of the code. Log any such cases.\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n ' if (parameters.config['TRAINING_DATA']['Data'] == parameters.config['TEST_DATA']['Data']): if (parameters.config['TRAINING_DATA']['Split_Seed'] != parameters.config['TEST_DATA']['Split_Seed']): logger.warning('The seed for the pseudoRNG is different for the training and test data configuration options.') logger.warning('This is likely to result in shared samples, which bias prediction performance upward.') logger.warning() else: split_frac_sum = (float(parameters.config['TRAINING_DATA']['Split_Fraction']) + float(parameters.config['TEST_DATA']['Split_Fraction'])) if (not np.isclose(split_frac_sum, 1.0)): if (split_frac_sum > 1.0): logger.warning('The split fractions for the training and test data configurations sum to greater than 1.') logger.warning('This guarantees that the trainig and test datasets will share samples, which bias prediction performance upward.') logger.warning() del split_frac_sum for mode in ['TRAINING_DATA', 'TEST_DATA']: if (('standardize' in parameters.config[mode]['Data_Options'].split(',')) and ('minmaxscale' in parameters.config[mode]['Data_Options'].split(','))): if set(parameters.config[mode]['Cols_To_Standardize'].split(',')).intersection(set(parameters.config[mode]['Cols_To_MinMaxScale'].split(','))): logger.warning('Input feature(s) in the {} data are being subjected to multiple scaling/normalization functions:'.format(mode)) for feat in set(parameters.config[mode]['Cols_To_Standardize'].split(',')).intersection(set(parameters.config[mode]['Cols_To_MinMaxScale'].split(','))): logger.warning(feat) logger.warning() return None
def data_warnings(parameters, logger): '\n Check for configuration options which may produce erroneous results without\n impeding execution of the code. Log any such cases.\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n ' if (parameters.config['TRAINING_DATA']['Data'] == parameters.config['TEST_DATA']['Data']): if (parameters.config['TRAINING_DATA']['Split_Seed'] != parameters.config['TEST_DATA']['Split_Seed']): logger.warning('The seed for the pseudoRNG is different for the training and test data configuration options.') logger.warning('This is likely to result in shared samples, which bias prediction performance upward.') logger.warning() else: split_frac_sum = (float(parameters.config['TRAINING_DATA']['Split_Fraction']) + float(parameters.config['TEST_DATA']['Split_Fraction'])) if (not np.isclose(split_frac_sum, 1.0)): if (split_frac_sum > 1.0): logger.warning('The split fractions for the training and test data configurations sum to greater than 1.') logger.warning('This guarantees that the trainig and test datasets will share samples, which bias prediction performance upward.') logger.warning() del split_frac_sum for mode in ['TRAINING_DATA', 'TEST_DATA']: if (('standardize' in parameters.config[mode]['Data_Options'].split(',')) and ('minmaxscale' in parameters.config[mode]['Data_Options'].split(','))): if set(parameters.config[mode]['Cols_To_Standardize'].split(',')).intersection(set(parameters.config[mode]['Cols_To_MinMaxScale'].split(','))): logger.warning('Input feature(s) in the {} data are being subjected to multiple scaling/normalization functions:'.format(mode)) for feat in set(parameters.config[mode]['Cols_To_Standardize'].split(',')).intersection(set(parameters.config[mode]['Cols_To_MinMaxScale'].split(','))): logger.warning(feat) logger.warning() return None<|docstring|>Check for configuration options which may produce erroneous results without impeding execution of the code. Log any such cases. @params: parameters - Required : Parameter object to read config settings (Parameter) logger - Required : Logger object for logging to console and file (Logger)<|endoftext|>
bdc1bfcf0eca27f5c9bc8a06d9be9746f5b5374364b31fe42c34f92c38719c17
def standardize_features(data, columns): '\n Standardize data in the specified columns to have zero mean and unit variance.\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' scaler = StandardScaler() standardized_data = scaler.fit_transform(data[columns]) for (idx, col) in enumerate(columns): data.loc[(:, col)] = standardized_data[(:, idx)] return data
Standardize data in the specified columns to have zero mean and unit variance. @params: data - Required : the data from a single device which may contain remove_outliers (list) columns - Required : Params object representing model parameters (Params)
preprocessing.py
standardize_features
nonproliferation/mimosas
1
python
def standardize_features(data, columns): '\n Standardize data in the specified columns to have zero mean and unit variance.\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' scaler = StandardScaler() standardized_data = scaler.fit_transform(data[columns]) for (idx, col) in enumerate(columns): data.loc[(:, col)] = standardized_data[(:, idx)] return data
def standardize_features(data, columns): '\n Standardize data in the specified columns to have zero mean and unit variance.\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' scaler = StandardScaler() standardized_data = scaler.fit_transform(data[columns]) for (idx, col) in enumerate(columns): data.loc[(:, col)] = standardized_data[(:, idx)] return data<|docstring|>Standardize data in the specified columns to have zero mean and unit variance. @params: data - Required : the data from a single device which may contain remove_outliers (list) columns - Required : Params object representing model parameters (Params)<|endoftext|>
76694c4d528ffb719c5f021c42f82250643a884136f5f049f396a6d75ec38275
def minmaxscale_features(data, columns): '\n Perform Min/Max Scaling on the given columns to map them linearly onto [0, 1].\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' scaler = MinMaxScaler() scaled_data = scaler.fit_transform(data[columns]) for (idx, col) in enumerate(columns): data.loc[(:, col)] = scaled_data[(:, idx)] return data
Perform Min/Max Scaling on the given columns to map them linearly onto [0, 1]. @params: data - Required : the data from a single device which may contain remove_outliers (list) columns - Required : Params object representing model parameters (Params)
preprocessing.py
minmaxscale_features
nonproliferation/mimosas
1
python
def minmaxscale_features(data, columns): '\n Perform Min/Max Scaling on the given columns to map them linearly onto [0, 1].\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' scaler = MinMaxScaler() scaled_data = scaler.fit_transform(data[columns]) for (idx, col) in enumerate(columns): data.loc[(:, col)] = scaled_data[(:, idx)] return data
def minmaxscale_features(data, columns): '\n Perform Min/Max Scaling on the given columns to map them linearly onto [0, 1].\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' scaler = MinMaxScaler() scaled_data = scaler.fit_transform(data[columns]) for (idx, col) in enumerate(columns): data.loc[(:, col)] = scaled_data[(:, idx)] return data<|docstring|>Perform Min/Max Scaling on the given columns to map them linearly onto [0, 1]. @params: data - Required : the data from a single device which may contain remove_outliers (list) columns - Required : Params object representing model parameters (Params)<|endoftext|>
a9c152230a92c59fef62def24ff2d049df8f2711ce6b03a4eeaec86b6ec0442e
def remove_outliers(data, parameters, logger, mode): '\n Remove outlier data likely to correspond to experimenter perturbation.\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' cols_to_cull_on = parameters.config[mode]['Remove_Outlier_Cols'].split(',') ids = list(data[parameters.config[mode]['Device_ID_Col']].unique()) threshold = float(parameters.config[mode]['Outlier_MADs_Threshold']) data_by_id = {} full = [] cut = [] for device in ids: data_by_id[device] = data[(data[parameters.config[mode]['Device_ID_Col']] == device)] full.append(float(len(data_by_id[device].index))) lower_cut = (data_by_id[device].mean() - (threshold * data_by_id[device].mad())) upper_cut = (data_by_id[device].mean() + (threshold * data_by_id[device].mad())) for prod in cols_to_cull_on: data_by_id[device] = data_by_id[device][((data_by_id[device][prod] >= lower_cut[prod]) & (data_by_id[device][prod] <= upper_cut[prod]))] cut.append(float(len(data_by_id[device].index))) data_cleaned = pd.concat([data_by_id[device] for device in data_by_id.keys()]) data = data_cleaned.sort_values(by=['time']).reset_index() if (parameters.config['MAIN']['Verbose'] == 'True'): logger.info('Cleaning statistics:') logger.info('{} total raw data points'.format(int(sum(full)))) logger.info('{} data points after cleaning'.format(int(sum(cut)))) logger.info("The fraction of each device's events removed by cleaning is:") for i in range(len(full)): logger.info('ID {}: {:0.4f}'.format(str(ids[i]).rjust(max(list(map(len, map(str, ids))))), (1.0 - (cut[i] / full[i])))) logger.info('') return data
Remove outlier data likely to correspond to experimenter perturbation. @params: data - Required : the data from a single device which may contain remove_outliers (list) columns - Required : Params object representing model parameters (Params)
preprocessing.py
remove_outliers
nonproliferation/mimosas
1
python
def remove_outliers(data, parameters, logger, mode): '\n Remove outlier data likely to correspond to experimenter perturbation.\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' cols_to_cull_on = parameters.config[mode]['Remove_Outlier_Cols'].split(',') ids = list(data[parameters.config[mode]['Device_ID_Col']].unique()) threshold = float(parameters.config[mode]['Outlier_MADs_Threshold']) data_by_id = {} full = [] cut = [] for device in ids: data_by_id[device] = data[(data[parameters.config[mode]['Device_ID_Col']] == device)] full.append(float(len(data_by_id[device].index))) lower_cut = (data_by_id[device].mean() - (threshold * data_by_id[device].mad())) upper_cut = (data_by_id[device].mean() + (threshold * data_by_id[device].mad())) for prod in cols_to_cull_on: data_by_id[device] = data_by_id[device][((data_by_id[device][prod] >= lower_cut[prod]) & (data_by_id[device][prod] <= upper_cut[prod]))] cut.append(float(len(data_by_id[device].index))) data_cleaned = pd.concat([data_by_id[device] for device in data_by_id.keys()]) data = data_cleaned.sort_values(by=['time']).reset_index() if (parameters.config['MAIN']['Verbose'] == 'True'): logger.info('Cleaning statistics:') logger.info('{} total raw data points'.format(int(sum(full)))) logger.info('{} data points after cleaning'.format(int(sum(cut)))) logger.info("The fraction of each device's events removed by cleaning is:") for i in range(len(full)): logger.info('ID {}: {:0.4f}'.format(str(ids[i]).rjust(max(list(map(len, map(str, ids))))), (1.0 - (cut[i] / full[i])))) logger.info() return data
def remove_outliers(data, parameters, logger, mode): '\n Remove outlier data likely to correspond to experimenter perturbation.\n \n @params:\n data - Required : the data from a single device which may contain remove_outliers (list)\n columns - Required : Params object representing model parameters (Params)\n ' cols_to_cull_on = parameters.config[mode]['Remove_Outlier_Cols'].split(',') ids = list(data[parameters.config[mode]['Device_ID_Col']].unique()) threshold = float(parameters.config[mode]['Outlier_MADs_Threshold']) data_by_id = {} full = [] cut = [] for device in ids: data_by_id[device] = data[(data[parameters.config[mode]['Device_ID_Col']] == device)] full.append(float(len(data_by_id[device].index))) lower_cut = (data_by_id[device].mean() - (threshold * data_by_id[device].mad())) upper_cut = (data_by_id[device].mean() + (threshold * data_by_id[device].mad())) for prod in cols_to_cull_on: data_by_id[device] = data_by_id[device][((data_by_id[device][prod] >= lower_cut[prod]) & (data_by_id[device][prod] <= upper_cut[prod]))] cut.append(float(len(data_by_id[device].index))) data_cleaned = pd.concat([data_by_id[device] for device in data_by_id.keys()]) data = data_cleaned.sort_values(by=['time']).reset_index() if (parameters.config['MAIN']['Verbose'] == 'True'): logger.info('Cleaning statistics:') logger.info('{} total raw data points'.format(int(sum(full)))) logger.info('{} data points after cleaning'.format(int(sum(cut)))) logger.info("The fraction of each device's events removed by cleaning is:") for i in range(len(full)): logger.info('ID {}: {:0.4f}'.format(str(ids[i]).rjust(max(list(map(len, map(str, ids))))), (1.0 - (cut[i] / full[i])))) logger.info() return data<|docstring|>Remove outlier data likely to correspond to experimenter perturbation. @params: data - Required : the data from a single device which may contain remove_outliers (list) columns - Required : Params object representing model parameters (Params)<|endoftext|>
63f3d175789ddd3fd98470120f61334be556ac68a8854b3731a554114e9edbb2
def background_correction(parameters, logger, mode): '\n Background correction function which linearly interpolates background data\n and subtracts the interpolated values from the input data.\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n mode - Required : Pull data from different locations in config file (Str)\n ' input_data = pd.read_csv(parameters.config[mode]['Data']) background = pd.read_csv(parameters.config[mode]['Background_Data']) background = background[((background.loc[(:, 'time')] > input_data.loc[(:, 'time')].min()) & (background.loc[(:, 'time')] < input_data.loc[(:, 'time')].max()))] for (input_col, bkgd_col) in ast.literal_eval(parameters.config[mode]['Background_Correction_Cols']).items(): interpolated_bkgd = np.interp(input_data['time'], background['time'], background[bkgd_col]) input_data.loc[(:, input_col)] = (input_data.loc[(:, input_col)] - interpolated_bkgd) return input_data
Background correction function which linearly interpolates background data and subtracts the interpolated values from the input data. @params: parameters - Required : Parameter object to read config settings (Parameter) logger - Required : Logger object for logging to console and file (Logger) mode - Required : Pull data from different locations in config file (Str)
preprocessing.py
background_correction
nonproliferation/mimosas
1
python
def background_correction(parameters, logger, mode): '\n Background correction function which linearly interpolates background data\n and subtracts the interpolated values from the input data.\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n mode - Required : Pull data from different locations in config file (Str)\n ' input_data = pd.read_csv(parameters.config[mode]['Data']) background = pd.read_csv(parameters.config[mode]['Background_Data']) background = background[((background.loc[(:, 'time')] > input_data.loc[(:, 'time')].min()) & (background.loc[(:, 'time')] < input_data.loc[(:, 'time')].max()))] for (input_col, bkgd_col) in ast.literal_eval(parameters.config[mode]['Background_Correction_Cols']).items(): interpolated_bkgd = np.interp(input_data['time'], background['time'], background[bkgd_col]) input_data.loc[(:, input_col)] = (input_data.loc[(:, input_col)] - interpolated_bkgd) return input_data
def background_correction(parameters, logger, mode): '\n Background correction function which linearly interpolates background data\n and subtracts the interpolated values from the input data.\n \n @params:\n parameters - Required : Parameter object to read config settings (Parameter)\n logger - Required : Logger object for logging to console and file (Logger)\n mode - Required : Pull data from different locations in config file (Str)\n ' input_data = pd.read_csv(parameters.config[mode]['Data']) background = pd.read_csv(parameters.config[mode]['Background_Data']) background = background[((background.loc[(:, 'time')] > input_data.loc[(:, 'time')].min()) & (background.loc[(:, 'time')] < input_data.loc[(:, 'time')].max()))] for (input_col, bkgd_col) in ast.literal_eval(parameters.config[mode]['Background_Correction_Cols']).items(): interpolated_bkgd = np.interp(input_data['time'], background['time'], background[bkgd_col]) input_data.loc[(:, input_col)] = (input_data.loc[(:, input_col)] - interpolated_bkgd) return input_data<|docstring|>Background correction function which linearly interpolates background data and subtracts the interpolated values from the input data. @params: parameters - Required : Parameter object to read config settings (Parameter) logger - Required : Logger object for logging to console and file (Logger) mode - Required : Pull data from different locations in config file (Str)<|endoftext|>
6a9b58adcd06241ae0bbdfbf68d83ea4f639fa345936baa1d36c55a4f03dfcad
def test_escape_parameter() -> None: '\n Test ``escape_parameter``.\n ' assert (escape_parameter('*') == '*') assert (escape_parameter('foo') == "'foo'") assert (escape_parameter("O'Malley's") == "'O''Malley''s'") assert (escape_parameter(True) == 'TRUE') assert (escape_parameter(False) == 'FALSE') assert (escape_parameter(1) == '1') assert (escape_parameter(1.0) == '1.0')
Test ``escape_parameter``.
tests/sql/dbapi/utils_test.py
test_escape_parameter
DataJunction/datajunction
0
python
def test_escape_parameter() -> None: '\n \n ' assert (escape_parameter('*') == '*') assert (escape_parameter('foo') == "'foo'") assert (escape_parameter("O'Malley's") == "'OMalleys'") assert (escape_parameter(True) == 'TRUE') assert (escape_parameter(False) == 'FALSE') assert (escape_parameter(1) == '1') assert (escape_parameter(1.0) == '1.0')
def test_escape_parameter() -> None: '\n \n ' assert (escape_parameter('*') == '*') assert (escape_parameter('foo') == "'foo'") assert (escape_parameter("O'Malley's") == "'OMalleys'") assert (escape_parameter(True) == 'TRUE') assert (escape_parameter(False) == 'FALSE') assert (escape_parameter(1) == '1') assert (escape_parameter(1.0) == '1.0')<|docstring|>Test ``escape_parameter``.<|endoftext|>
3971d1947285a341be45a25f7336d4c04fc65f253beae6f1bb55c4db5a927ca8
def _GetProjectPath(path): 'Find the absolute path of the git checkout that contains |path|.' if git.FindRepoCheckoutRoot(path): manifest = git.ManifestCheckout.Cached(path) return manifest.FindCheckoutFromPath(path).GetPath(absolute=True) else: return os.path.dirname(path)
Find the absolute path of the git checkout that contains |path|.
cli/cros/cros_lint.py
_GetProjectPath
sglass68/chromite
0
python
def _GetProjectPath(path): if git.FindRepoCheckoutRoot(path): manifest = git.ManifestCheckout.Cached(path) return manifest.FindCheckoutFromPath(path).GetPath(absolute=True) else: return os.path.dirname(path)
def _GetProjectPath(path): if git.FindRepoCheckoutRoot(path): manifest = git.ManifestCheckout.Cached(path) return manifest.FindCheckoutFromPath(path).GetPath(absolute=True) else: return os.path.dirname(path)<|docstring|>Find the absolute path of the git checkout that contains |path|.<|endoftext|>
a7581d9617caefc8384d2e6376376e22e32ebdc806ed44dc0796b60cefd7bd5a
def _GetPylintrc(path): 'Locate the pylintrc file that applies to |path|.' if (not path.endswith('.py')): return path = os.path.realpath(path) project_path = _GetProjectPath(path) parent = os.path.dirname(path) while (project_path and parent.startswith(project_path)): pylintrc = os.path.join(parent, 'pylintrc') if os.path.isfile(pylintrc): break parent = os.path.dirname(parent) if ((project_path is None) or (not os.path.isfile(pylintrc))): pylintrc = os.path.join(constants.SOURCE_ROOT, 'chromite', 'pylintrc') return pylintrc
Locate the pylintrc file that applies to |path|.
cli/cros/cros_lint.py
_GetPylintrc
sglass68/chromite
0
python
def _GetPylintrc(path): if (not path.endswith('.py')): return path = os.path.realpath(path) project_path = _GetProjectPath(path) parent = os.path.dirname(path) while (project_path and parent.startswith(project_path)): pylintrc = os.path.join(parent, 'pylintrc') if os.path.isfile(pylintrc): break parent = os.path.dirname(parent) if ((project_path is None) or (not os.path.isfile(pylintrc))): pylintrc = os.path.join(constants.SOURCE_ROOT, 'chromite', 'pylintrc') return pylintrc
def _GetPylintrc(path): if (not path.endswith('.py')): return path = os.path.realpath(path) project_path = _GetProjectPath(path) parent = os.path.dirname(path) while (project_path and parent.startswith(project_path)): pylintrc = os.path.join(parent, 'pylintrc') if os.path.isfile(pylintrc): break parent = os.path.dirname(parent) if ((project_path is None) or (not os.path.isfile(pylintrc))): pylintrc = os.path.join(constants.SOURCE_ROOT, 'chromite', 'pylintrc') return pylintrc<|docstring|>Locate the pylintrc file that applies to |path|.<|endoftext|>
851917df1512b3d66a2ff9f346ca7d82d1c4895a6619f355836df8ae553cf88c
def _GetPylintGroups(paths): 'Return a dictionary mapping pylintrc files to lists of paths.' groups = {} for path in paths: pylintrc = _GetPylintrc(path) if pylintrc: groups.setdefault(pylintrc, []).append(path) return groups
Return a dictionary mapping pylintrc files to lists of paths.
cli/cros/cros_lint.py
_GetPylintGroups
sglass68/chromite
0
python
def _GetPylintGroups(paths): groups = {} for path in paths: pylintrc = _GetPylintrc(path) if pylintrc: groups.setdefault(pylintrc, []).append(path) return groups
def _GetPylintGroups(paths): groups = {} for path in paths: pylintrc = _GetPylintrc(path) if pylintrc: groups.setdefault(pylintrc, []).append(path) return groups<|docstring|>Return a dictionary mapping pylintrc files to lists of paths.<|endoftext|>
19654653f329a4a3d2d1f29eab39492cf837c6fc6a92b2136e491b9997c64c5c
def _GetPythonPath(paths): 'Return the set of Python library paths to use.' return ((sys.path + [os.path.join(constants.SOURCE_ROOT, 'chroot', 'usr', 'lib', 'portage', 'pym'), os.path.join(constants.CROSUTILS_DIR, 'lib'), os.path.join(constants.SOURCE_ROOT, 'src', 'platform'), constants.SOURCE_ROOT]) + list(set((os.path.dirname(x) for x in paths))))
Return the set of Python library paths to use.
cli/cros/cros_lint.py
_GetPythonPath
sglass68/chromite
0
python
def _GetPythonPath(paths): return ((sys.path + [os.path.join(constants.SOURCE_ROOT, 'chroot', 'usr', 'lib', 'portage', 'pym'), os.path.join(constants.CROSUTILS_DIR, 'lib'), os.path.join(constants.SOURCE_ROOT, 'src', 'platform'), constants.SOURCE_ROOT]) + list(set((os.path.dirname(x) for x in paths))))
def _GetPythonPath(paths): return ((sys.path + [os.path.join(constants.SOURCE_ROOT, 'chroot', 'usr', 'lib', 'portage', 'pym'), os.path.join(constants.CROSUTILS_DIR, 'lib'), os.path.join(constants.SOURCE_ROOT, 'src', 'platform'), constants.SOURCE_ROOT]) + list(set((os.path.dirname(x) for x in paths))))<|docstring|>Return the set of Python library paths to use.<|endoftext|>
3a01d38861496981ebf8ad2ccf1f980fec0a1e0964fa6159651ed81d82e66a41
def _LinterRunCommand(cmd, debug, **kwargs): 'Run the linter with common RunCommand args set as higher levels expect.' return cros_build_lib.RunCommand(cmd, error_code_ok=True, print_cmd=debug, debug_level=logging.NOTICE, **kwargs)
Run the linter with common RunCommand args set as higher levels expect.
cli/cros/cros_lint.py
_LinterRunCommand
sglass68/chromite
0
python
def _LinterRunCommand(cmd, debug, **kwargs): return cros_build_lib.RunCommand(cmd, error_code_ok=True, print_cmd=debug, debug_level=logging.NOTICE, **kwargs)
def _LinterRunCommand(cmd, debug, **kwargs): return cros_build_lib.RunCommand(cmd, error_code_ok=True, print_cmd=debug, debug_level=logging.NOTICE, **kwargs)<|docstring|>Run the linter with common RunCommand args set as higher levels expect.<|endoftext|>
edcf759d132bd2b45ac43f0fcd00243947baedc8d49e5b74392f5cdfb668e63f
def _CpplintFile(path, output_format, debug): 'Returns result of running cpplint on |path|.' cmd = [os.path.join(constants.DEPOT_TOOLS_DIR, 'cpplint.py')] if (output_format != 'default'): cmd.append(('--output=%s' % CPPLINT_OUTPUT_FORMAT_MAP[output_format])) cmd.append(path) return _LinterRunCommand(cmd, debug)
Returns result of running cpplint on |path|.
cli/cros/cros_lint.py
_CpplintFile
sglass68/chromite
0
python
def _CpplintFile(path, output_format, debug): cmd = [os.path.join(constants.DEPOT_TOOLS_DIR, 'cpplint.py')] if (output_format != 'default'): cmd.append(('--output=%s' % CPPLINT_OUTPUT_FORMAT_MAP[output_format])) cmd.append(path) return _LinterRunCommand(cmd, debug)
def _CpplintFile(path, output_format, debug): cmd = [os.path.join(constants.DEPOT_TOOLS_DIR, 'cpplint.py')] if (output_format != 'default'): cmd.append(('--output=%s' % CPPLINT_OUTPUT_FORMAT_MAP[output_format])) cmd.append(path) return _LinterRunCommand(cmd, debug)<|docstring|>Returns result of running cpplint on |path|.<|endoftext|>
e61a64302bb9d36bf8e7f2bfdabf9ffaa5cb8eeb5ac09cbe7bef9e4e30bf66d6
def _PylintFile(path, output_format, debug): 'Returns result of running pylint on |path|.' pylint = os.path.join(constants.DEPOT_TOOLS_DIR, 'pylint') pylintrc = _GetPylintrc(path) cmd = [pylint, ('--rcfile=%s' % pylintrc)] if (output_format != 'default'): cmd.append(('--output-format=%s' % output_format)) cmd.append(path) extra_env = {'PYTHONPATH': ':'.join(_GetPythonPath([path]))} return _LinterRunCommand(cmd, debug, extra_env=extra_env)
Returns result of running pylint on |path|.
cli/cros/cros_lint.py
_PylintFile
sglass68/chromite
0
python
def _PylintFile(path, output_format, debug): pylint = os.path.join(constants.DEPOT_TOOLS_DIR, 'pylint') pylintrc = _GetPylintrc(path) cmd = [pylint, ('--rcfile=%s' % pylintrc)] if (output_format != 'default'): cmd.append(('--output-format=%s' % output_format)) cmd.append(path) extra_env = {'PYTHONPATH': ':'.join(_GetPythonPath([path]))} return _LinterRunCommand(cmd, debug, extra_env=extra_env)
def _PylintFile(path, output_format, debug): pylint = os.path.join(constants.DEPOT_TOOLS_DIR, 'pylint') pylintrc = _GetPylintrc(path) cmd = [pylint, ('--rcfile=%s' % pylintrc)] if (output_format != 'default'): cmd.append(('--output-format=%s' % output_format)) cmd.append(path) extra_env = {'PYTHONPATH': ':'.join(_GetPythonPath([path]))} return _LinterRunCommand(cmd, debug, extra_env=extra_env)<|docstring|>Returns result of running pylint on |path|.<|endoftext|>
1c6aeaeaa5ae82a0be41f9bcf4a83eb67ca6e7904adcdf6fbb7033badf9f123b
def _BreakoutFilesByLinter(files): 'Maps a linter method to the list of files to lint.' map_to_return = {} for f in files: extension = os.path.splitext(f)[1] if (extension in PYTHON_EXTENSIONS): pylint_list = map_to_return.setdefault(_PylintFile, []) pylint_list.append(f) elif (extension in CPP_EXTENSIONS): cpplint_list = map_to_return.setdefault(_CpplintFile, []) cpplint_list.append(f) return map_to_return
Maps a linter method to the list of files to lint.
cli/cros/cros_lint.py
_BreakoutFilesByLinter
sglass68/chromite
0
python
def _BreakoutFilesByLinter(files): map_to_return = {} for f in files: extension = os.path.splitext(f)[1] if (extension in PYTHON_EXTENSIONS): pylint_list = map_to_return.setdefault(_PylintFile, []) pylint_list.append(f) elif (extension in CPP_EXTENSIONS): cpplint_list = map_to_return.setdefault(_CpplintFile, []) cpplint_list.append(f) return map_to_return
def _BreakoutFilesByLinter(files): map_to_return = {} for f in files: extension = os.path.splitext(f)[1] if (extension in PYTHON_EXTENSIONS): pylint_list = map_to_return.setdefault(_PylintFile, []) pylint_list.append(f) elif (extension in CPP_EXTENSIONS): cpplint_list = map_to_return.setdefault(_CpplintFile, []) cpplint_list.append(f) return map_to_return<|docstring|>Maps a linter method to the list of files to lint.<|endoftext|>
98eb7c0cdcc7413743434b3842b24c78561cb07941640872b5bed61c806fe7d8
def _Dispatcher(errors, output_format, debug, linter, path): 'Call |linter| on |path| and take care of coalescing exit codes/output.' result = linter(path, output_format, debug) if result.returncode: with errors.get_lock(): errors.value += 1
Call |linter| on |path| and take care of coalescing exit codes/output.
cli/cros/cros_lint.py
_Dispatcher
sglass68/chromite
0
python
def _Dispatcher(errors, output_format, debug, linter, path): result = linter(path, output_format, debug) if result.returncode: with errors.get_lock(): errors.value += 1
def _Dispatcher(errors, output_format, debug, linter, path): result = linter(path, output_format, debug) if result.returncode: with errors.get_lock(): errors.value += 1<|docstring|>Call |linter| on |path| and take care of coalescing exit codes/output.<|endoftext|>
c7f27d24ea4959e4c997eaff22d7d1bf4a38bb10ba31458b90069eef5ea6eed0
def __eq__(self, *args): ' x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y ' pass
x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y
release/stubs.min/System/Security/AccessControl_parts/CryptoKeyRights.py
__eq__
daddycocoaman/ironpython-stubs
182
python
def __eq__(self, *args): ' ' pass
def __eq__(self, *args): ' ' pass<|docstring|>x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y<|endoftext|>
ada11d26366342d19ddf94f50604d65258a36c58cfc3ac043990b0b26b3c935d
def __format__(self, *args): ' __format__(formattable: IFormattable,format: str) -> str ' pass
__format__(formattable: IFormattable,format: str) -> str
release/stubs.min/System/Security/AccessControl_parts/CryptoKeyRights.py
__format__
daddycocoaman/ironpython-stubs
182
python
def __format__(self, *args): ' ' pass
def __format__(self, *args): ' ' pass<|docstring|>__format__(formattable: IFormattable,format: str) -> str<|endoftext|>
32b5271afcd5ecc37febb67dd854fa2d1b2c4c68b2c41d2ec119d33157e9bbaa
def __init__(self, *args): ' x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature ' pass
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
release/stubs.min/System/Security/AccessControl_parts/CryptoKeyRights.py
__init__
daddycocoaman/ironpython-stubs
182
python
def __init__(self, *args): ' ' pass
def __init__(self, *args): ' ' pass<|docstring|>x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature<|endoftext|>
f2af1def40c7af61730fee0f0a15045b11c520b705fcfb73a53b1b2cd19db219
@property def debug(self): '\n the debug parameter\n :return:\n ' return self._debug
the debug parameter :return:
instapy/plugins/telegram_util.py
debug
mehrdad-shokri/InstaPy
0
python
@property def debug(self): '\n the debug parameter\n :return:\n ' return self._debug
@property def debug(self): '\n the debug parameter\n :return:\n ' return self._debug<|docstring|>the debug parameter :return:<|endoftext|>
beb664261e0f666109ebf9f4a8611a2a3ec7ae6c4b985c493435a3e288144db5
@debug.setter def debug(self, value): '\n sets the debug if needed\n :param debug:\n :return:\n ' self._debug = value if (self._debug is True): if (self.__logger is None): self.__logger = logging.getLogger() self.__logger.setLevel(logging.DEBUG)
sets the debug if needed :param debug: :return:
instapy/plugins/telegram_util.py
debug
mehrdad-shokri/InstaPy
0
python
@debug.setter def debug(self, value): '\n sets the debug if needed\n :param debug:\n :return:\n ' self._debug = value if (self._debug is True): if (self.__logger is None): self.__logger = logging.getLogger() self.__logger.setLevel(logging.DEBUG)
@debug.setter def debug(self, value): '\n sets the debug if needed\n :param debug:\n :return:\n ' self._debug = value if (self._debug is True): if (self.__logger is None): self.__logger = logging.getLogger() self.__logger.setLevel(logging.DEBUG)<|docstring|>sets the debug if needed :param debug: :return:<|endoftext|>
918bcbe43c071417e349788368a51ee7de40a8a0ad3e424596f67e4e06517e86
def telegram_bot(self): '\n Funtion to initialize a telegram bot that you can talk to and control your InstaPy Bot\n :return:\n ' if (self.token == ''): self.__logger.warning('You need to set token for InstaPyTelegramBot to work') return if (self.telegram_username == ''): self.__logger.warning('You need to set telegram_username for InstaPyTelegramBot to work') return if (self.instapy_session is None): self.__logger.warning('You need to set instapy_session for InstaPyTelegramBot to work') return self._clean_web_hooks() if (self.proxy is not None): updater = Updater(token=self.token, use_context=True, user_sig_handler=self.end, request_kwargs=self.proxy) else: updater = Updater(token=self.token, use_context=True, user_sig_handler=self.end) self.__updater = updater dispatcher = updater.dispatcher self.__context = dispatcher dispatcher.add_error_handler(self._error_callback) start_handler = CommandHandler('start', self._start) dispatcher.add_handler(start_handler) report_handler = CommandHandler('report', self._report) dispatcher.add_handler(report_handler) report_handler = CommandHandler('stop', self._stop) dispatcher.add_handler(report_handler) unknown_handler = MessageHandler(Filters.command, self._unknown) dispatcher.add_handler(unknown_handler) updater.start_polling() if (self.__chat_id is not None): self.__context.bot.send_message(self.__chat_id, text='Telegram session restored, InstaPy starting\n')
Funtion to initialize a telegram bot that you can talk to and control your InstaPy Bot :return:
instapy/plugins/telegram_util.py
telegram_bot
mehrdad-shokri/InstaPy
0
python
def telegram_bot(self): '\n Funtion to initialize a telegram bot that you can talk to and control your InstaPy Bot\n :return:\n ' if (self.token == ): self.__logger.warning('You need to set token for InstaPyTelegramBot to work') return if (self.telegram_username == ): self.__logger.warning('You need to set telegram_username for InstaPyTelegramBot to work') return if (self.instapy_session is None): self.__logger.warning('You need to set instapy_session for InstaPyTelegramBot to work') return self._clean_web_hooks() if (self.proxy is not None): updater = Updater(token=self.token, use_context=True, user_sig_handler=self.end, request_kwargs=self.proxy) else: updater = Updater(token=self.token, use_context=True, user_sig_handler=self.end) self.__updater = updater dispatcher = updater.dispatcher self.__context = dispatcher dispatcher.add_error_handler(self._error_callback) start_handler = CommandHandler('start', self._start) dispatcher.add_handler(start_handler) report_handler = CommandHandler('report', self._report) dispatcher.add_handler(report_handler) report_handler = CommandHandler('stop', self._stop) dispatcher.add_handler(report_handler) unknown_handler = MessageHandler(Filters.command, self._unknown) dispatcher.add_handler(unknown_handler) updater.start_polling() if (self.__chat_id is not None): self.__context.bot.send_message(self.__chat_id, text='Telegram session restored, InstaPy starting\n')
def telegram_bot(self): '\n Funtion to initialize a telegram bot that you can talk to and control your InstaPy Bot\n :return:\n ' if (self.token == ): self.__logger.warning('You need to set token for InstaPyTelegramBot to work') return if (self.telegram_username == ): self.__logger.warning('You need to set telegram_username for InstaPyTelegramBot to work') return if (self.instapy_session is None): self.__logger.warning('You need to set instapy_session for InstaPyTelegramBot to work') return self._clean_web_hooks() if (self.proxy is not None): updater = Updater(token=self.token, use_context=True, user_sig_handler=self.end, request_kwargs=self.proxy) else: updater = Updater(token=self.token, use_context=True, user_sig_handler=self.end) self.__updater = updater dispatcher = updater.dispatcher self.__context = dispatcher dispatcher.add_error_handler(self._error_callback) start_handler = CommandHandler('start', self._start) dispatcher.add_handler(start_handler) report_handler = CommandHandler('report', self._report) dispatcher.add_handler(report_handler) report_handler = CommandHandler('stop', self._stop) dispatcher.add_handler(report_handler) unknown_handler = MessageHandler(Filters.command, self._unknown) dispatcher.add_handler(unknown_handler) updater.start_polling() if (self.__chat_id is not None): self.__context.bot.send_message(self.__chat_id, text='Telegram session restored, InstaPy starting\n')<|docstring|>Funtion to initialize a telegram bot that you can talk to and control your InstaPy Bot :return:<|endoftext|>
a7b3695f55629a31f173eb04072bbe0581e05e9adaeeb2c1ef89aef5fea567dd
def send_message(self, text=''): "\n function to be able to send messages from anywhere else in the instapy code\n :param text: the text of the message you want to send\n remember we cannot send_messages if we don't have the chat_id\n to get the chat_id, user has to send at little one /start to the bot\n :return:\n " if ((self.__chat_id is None) and (self.__context is None)): raise TelegramError else: self.__context.bot.send_message(chat_id=self.__chat_id, text=text)
function to be able to send messages from anywhere else in the instapy code :param text: the text of the message you want to send remember we cannot send_messages if we don't have the chat_id to get the chat_id, user has to send at little one /start to the bot :return:
instapy/plugins/telegram_util.py
send_message
mehrdad-shokri/InstaPy
0
python
def send_message(self, text=): "\n function to be able to send messages from anywhere else in the instapy code\n :param text: the text of the message you want to send\n remember we cannot send_messages if we don't have the chat_id\n to get the chat_id, user has to send at little one /start to the bot\n :return:\n " if ((self.__chat_id is None) and (self.__context is None)): raise TelegramError else: self.__context.bot.send_message(chat_id=self.__chat_id, text=text)
def send_message(self, text=): "\n function to be able to send messages from anywhere else in the instapy code\n :param text: the text of the message you want to send\n remember we cannot send_messages if we don't have the chat_id\n to get the chat_id, user has to send at little one /start to the bot\n :return:\n " if ((self.__chat_id is None) and (self.__context is None)): raise TelegramError else: self.__context.bot.send_message(chat_id=self.__chat_id, text=text)<|docstring|>function to be able to send messages from anywhere else in the instapy code :param text: the text of the message you want to send remember we cannot send_messages if we don't have the chat_id to get the chat_id, user has to send at little one /start to the bot :return:<|endoftext|>
8c38767be5c2748072b097381365571745f9579162b881a2f52c6b9e3150bfeb
@staticmethod def telegram_delete_session(session): '\n function to force delete the telegram_chat_id.txt file that is in the logs folder\n :param session: the instapy session\n :return:\n ' os.remove('{}telegram_chat_id.txt'.format(session.logfolder))
function to force delete the telegram_chat_id.txt file that is in the logs folder :param session: the instapy session :return:
instapy/plugins/telegram_util.py
telegram_delete_session
mehrdad-shokri/InstaPy
0
python
@staticmethod def telegram_delete_session(session): '\n function to force delete the telegram_chat_id.txt file that is in the logs folder\n :param session: the instapy session\n :return:\n ' os.remove('{}telegram_chat_id.txt'.format(session.logfolder))
@staticmethod def telegram_delete_session(session): '\n function to force delete the telegram_chat_id.txt file that is in the logs folder\n :param session: the instapy session\n :return:\n ' os.remove('{}telegram_chat_id.txt'.format(session.logfolder))<|docstring|>function to force delete the telegram_chat_id.txt file that is in the logs folder :param session: the instapy session :return:<|endoftext|>
d84da55cf6b54d9bd32bf770a57d7094f4aa529c4f46b9c2547635a5017ab566
def _start(self, update, context): '\n basic /start function\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): with open('{}telegram_chat_id.txt'.format(self.instapy_session.logfolder), 'w') as telegramfile: telegramfile.write(str(self.__chat_id)) context.bot.send_message(chat_id=update.message.chat_id, text='Bot initialized sucessfully!\n')
basic /start function :param update: :param context: :return:
instapy/plugins/telegram_util.py
_start
mehrdad-shokri/InstaPy
0
python
def _start(self, update, context): '\n basic /start function\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): with open('{}telegram_chat_id.txt'.format(self.instapy_session.logfolder), 'w') as telegramfile: telegramfile.write(str(self.__chat_id)) context.bot.send_message(chat_id=update.message.chat_id, text='Bot initialized sucessfully!\n')
def _start(self, update, context): '\n basic /start function\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): with open('{}telegram_chat_id.txt'.format(self.instapy_session.logfolder), 'w') as telegramfile: telegramfile.write(str(self.__chat_id)) context.bot.send_message(chat_id=update.message.chat_id, text='Bot initialized sucessfully!\n')<|docstring|>basic /start function :param update: :param context: :return:<|endoftext|>
846bd62dc216b3a423d1c2a2615b9ef50b7a0e523743aae7809059c29f2c4e18
def _report(self, update, context): '\n report live statistics\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): context.bot.send_message(chat_id=update.message.chat_id, text=self._live_report())
report live statistics :param update: :param context: :return:
instapy/plugins/telegram_util.py
_report
mehrdad-shokri/InstaPy
0
python
def _report(self, update, context): '\n report live statistics\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): context.bot.send_message(chat_id=update.message.chat_id, text=self._live_report())
def _report(self, update, context): '\n report live statistics\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): context.bot.send_message(chat_id=update.message.chat_id, text=self._live_report())<|docstring|>report live statistics :param update: :param context: :return:<|endoftext|>
d0e72638a24538dd2934bcd96c5616364f8b0cbcbb98fedfbd3de9e3a7c2282c
def _stop(self, update, context): '\n should stop the bot\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): self.instapy_session.aborting = True context.bot.send_message(chat_id=update.message.chat_id, text='InstaPy session abort set\n')
should stop the bot :param update: :param context: :return:
instapy/plugins/telegram_util.py
_stop
mehrdad-shokri/InstaPy
0
python
def _stop(self, update, context): '\n should stop the bot\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): self.instapy_session.aborting = True context.bot.send_message(chat_id=update.message.chat_id, text='InstaPy session abort set\n')
def _stop(self, update, context): '\n should stop the bot\n :param update:\n :param context:\n :return:\n ' self.__chat_id = update.message.chat_id if self._check_authorized(update, context): self.instapy_session.aborting = True context.bot.send_message(chat_id=update.message.chat_id, text='InstaPy session abort set\n')<|docstring|>should stop the bot :param update: :param context: :return:<|endoftext|>
f9a9a3f57b573ee84dac2d8750d025cd6351aea6711591d8e0419abaae380eb5
def _unknown(self, update, context): '\n trap all others commands as unknown\n :return:\n ' if self._check_authorized(update, context): context.bot.send_message(chat_id=update.message.chat_id, text="Sorry I don't understand that command") context.bot.send_message(chat_id=update.message.chat_id, text=(((' Recognized actions are:\n' + ' - /start (initialize bot) \n') + ' - /report (a live report from the bot)\n') + ' - /stop (force stop the bot)\n'))
trap all others commands as unknown :return:
instapy/plugins/telegram_util.py
_unknown
mehrdad-shokri/InstaPy
0
python
def _unknown(self, update, context): '\n trap all others commands as unknown\n :return:\n ' if self._check_authorized(update, context): context.bot.send_message(chat_id=update.message.chat_id, text="Sorry I don't understand that command") context.bot.send_message(chat_id=update.message.chat_id, text=(((' Recognized actions are:\n' + ' - /start (initialize bot) \n') + ' - /report (a live report from the bot)\n') + ' - /stop (force stop the bot)\n'))
def _unknown(self, update, context): '\n trap all others commands as unknown\n :return:\n ' if self._check_authorized(update, context): context.bot.send_message(chat_id=update.message.chat_id, text="Sorry I don't understand that command") context.bot.send_message(chat_id=update.message.chat_id, text=(((' Recognized actions are:\n' + ' - /start (initialize bot) \n') + ' - /report (a live report from the bot)\n') + ' - /stop (force stop the bot)\n'))<|docstring|>trap all others commands as unknown :return:<|endoftext|>
97869203058224e5ec1a320532132cc5feec61f7504ebc6bf8b7fb0d61392d23
def _check_authorized(self, update, context): '\n check if a user is authorized to use this bot\n :param update:\n :param context:\n :return:\n ' if (update.message.from_user.username != self.telegram_username): self.__logger.warning('unauthorized access from {}'.format(update.message.from_user)) context.bot.send_message(chat_id=update.message.chat_id, text='You are not authorized to use this service \n') return False else: return True
check if a user is authorized to use this bot :param update: :param context: :return:
instapy/plugins/telegram_util.py
_check_authorized
mehrdad-shokri/InstaPy
0
python
def _check_authorized(self, update, context): '\n check if a user is authorized to use this bot\n :param update:\n :param context:\n :return:\n ' if (update.message.from_user.username != self.telegram_username): self.__logger.warning('unauthorized access from {}'.format(update.message.from_user)) context.bot.send_message(chat_id=update.message.chat_id, text='You are not authorized to use this service \n') return False else: return True
def _check_authorized(self, update, context): '\n check if a user is authorized to use this bot\n :param update:\n :param context:\n :return:\n ' if (update.message.from_user.username != self.telegram_username): self.__logger.warning('unauthorized access from {}'.format(update.message.from_user)) context.bot.send_message(chat_id=update.message.chat_id, text='You are not authorized to use this service \n') return False else: return True<|docstring|>check if a user is authorized to use this bot :param update: :param context: :return:<|endoftext|>
e8c6ed6ea16b8e38916889c63da14b04eb92d8b67f93ec3d41354469e422272a
def _clean_web_hooks(self): '\n make sure no web_hooks are configured already otherwise telegram\n will respond 409\n :return:\n ' r = requests.get('https://api.telegram.org/bot{}/deleteWebhook'.format(self.token)) if (r.json()['ok'] is not True): self.__logger.warning('unable to remove webhook! Wrong token?')
make sure no web_hooks are configured already otherwise telegram will respond 409 :return:
instapy/plugins/telegram_util.py
_clean_web_hooks
mehrdad-shokri/InstaPy
0
python
def _clean_web_hooks(self): '\n make sure no web_hooks are configured already otherwise telegram\n will respond 409\n :return:\n ' r = requests.get('https://api.telegram.org/bot{}/deleteWebhook'.format(self.token)) if (r.json()['ok'] is not True): self.__logger.warning('unable to remove webhook! Wrong token?')
def _clean_web_hooks(self): '\n make sure no web_hooks are configured already otherwise telegram\n will respond 409\n :return:\n ' r = requests.get('https://api.telegram.org/bot{}/deleteWebhook'.format(self.token)) if (r.json()['ok'] is not True): self.__logger.warning('unable to remove webhook! Wrong token?')<|docstring|>make sure no web_hooks are configured already otherwise telegram will respond 409 :return:<|endoftext|>
f83299d59cb24a5696bc4da24fc8ade65ed4235a885655a79490c43bf292dc15
def _live_report(self): '\n adapted version of instapy live report function for showing up on a telegram message\n :return:\n ' stats = [self.instapy_session.liked_img, self.instapy_session.already_liked, self.instapy_session.commented, self.instapy_session.followed, self.instapy_session.already_followed, self.instapy_session.unfollowed, self.instapy_session.stories_watched, self.instapy_session.reels_watched, self.instapy_session.inap_img, self.instapy_session.not_valid_users] sessional_run_time = self.instapy_session.run_time() run_time_info = ('{} seconds'.format(sessional_run_time) if (sessional_run_time < 60) else ('{} minutes'.format(truncate_float((sessional_run_time / 60), 2)) if (sessional_run_time < 3600) else '{} hours'.format(truncate_float(((sessional_run_time / 60) / 60), 2)))) run_time_msg = '[Session lasted {}]'.format(run_time_info) if any((stat for stat in stats)): return 'Sessional Live Report:\n|> LIKED {} images\n|> ALREADY LIKED: {}\n|> COMMENTED on {} images\n|> FOLLOWED {} users\n|> ALREADY FOLLOWED: {}\n|> UNFOLLOWED {} users\n|> LIKED {} comments\n|> REPLIED to {} comments\n|> INAPPROPRIATE images: {}\n|> NOT VALID users: {}\n|> WATCHED {} story(ies)\n|> WATCHED {} reel(s)\n\n{}'.format(self.instapy_session.liked_img, self.instapy_session.already_liked, self.instapy_session.commented, self.instapy_session.followed, self.instapy_session.already_followed, self.instapy_session.unfollowed, self.instapy_session.liked_comments, self.instapy_session.replied_to_comments, self.instapy_session.inap_img, self.instapy_session.not_valid_users, self.instapy_session.stories_watched, self.instapy_session.reels_watched, run_time_msg) else: return 'Sessional Live Report:\n|> No any statistics to show\n\n{}'.format(run_time_msg)
adapted version of instapy live report function for showing up on a telegram message :return:
instapy/plugins/telegram_util.py
_live_report
mehrdad-shokri/InstaPy
0
python
def _live_report(self): '\n adapted version of instapy live report function for showing up on a telegram message\n :return:\n ' stats = [self.instapy_session.liked_img, self.instapy_session.already_liked, self.instapy_session.commented, self.instapy_session.followed, self.instapy_session.already_followed, self.instapy_session.unfollowed, self.instapy_session.stories_watched, self.instapy_session.reels_watched, self.instapy_session.inap_img, self.instapy_session.not_valid_users] sessional_run_time = self.instapy_session.run_time() run_time_info = ('{} seconds'.format(sessional_run_time) if (sessional_run_time < 60) else ('{} minutes'.format(truncate_float((sessional_run_time / 60), 2)) if (sessional_run_time < 3600) else '{} hours'.format(truncate_float(((sessional_run_time / 60) / 60), 2)))) run_time_msg = '[Session lasted {}]'.format(run_time_info) if any((stat for stat in stats)): return 'Sessional Live Report:\n|> LIKED {} images\n|> ALREADY LIKED: {}\n|> COMMENTED on {} images\n|> FOLLOWED {} users\n|> ALREADY FOLLOWED: {}\n|> UNFOLLOWED {} users\n|> LIKED {} comments\n|> REPLIED to {} comments\n|> INAPPROPRIATE images: {}\n|> NOT VALID users: {}\n|> WATCHED {} story(ies)\n|> WATCHED {} reel(s)\n\n{}'.format(self.instapy_session.liked_img, self.instapy_session.already_liked, self.instapy_session.commented, self.instapy_session.followed, self.instapy_session.already_followed, self.instapy_session.unfollowed, self.instapy_session.liked_comments, self.instapy_session.replied_to_comments, self.instapy_session.inap_img, self.instapy_session.not_valid_users, self.instapy_session.stories_watched, self.instapy_session.reels_watched, run_time_msg) else: return 'Sessional Live Report:\n|> No any statistics to show\n\n{}'.format(run_time_msg)
def _live_report(self): '\n adapted version of instapy live report function for showing up on a telegram message\n :return:\n ' stats = [self.instapy_session.liked_img, self.instapy_session.already_liked, self.instapy_session.commented, self.instapy_session.followed, self.instapy_session.already_followed, self.instapy_session.unfollowed, self.instapy_session.stories_watched, self.instapy_session.reels_watched, self.instapy_session.inap_img, self.instapy_session.not_valid_users] sessional_run_time = self.instapy_session.run_time() run_time_info = ('{} seconds'.format(sessional_run_time) if (sessional_run_time < 60) else ('{} minutes'.format(truncate_float((sessional_run_time / 60), 2)) if (sessional_run_time < 3600) else '{} hours'.format(truncate_float(((sessional_run_time / 60) / 60), 2)))) run_time_msg = '[Session lasted {}]'.format(run_time_info) if any((stat for stat in stats)): return 'Sessional Live Report:\n|> LIKED {} images\n|> ALREADY LIKED: {}\n|> COMMENTED on {} images\n|> FOLLOWED {} users\n|> ALREADY FOLLOWED: {}\n|> UNFOLLOWED {} users\n|> LIKED {} comments\n|> REPLIED to {} comments\n|> INAPPROPRIATE images: {}\n|> NOT VALID users: {}\n|> WATCHED {} story(ies)\n|> WATCHED {} reel(s)\n\n{}'.format(self.instapy_session.liked_img, self.instapy_session.already_liked, self.instapy_session.commented, self.instapy_session.followed, self.instapy_session.already_followed, self.instapy_session.unfollowed, self.instapy_session.liked_comments, self.instapy_session.replied_to_comments, self.instapy_session.inap_img, self.instapy_session.not_valid_users, self.instapy_session.stories_watched, self.instapy_session.reels_watched, run_time_msg) else: return 'Sessional Live Report:\n|> No any statistics to show\n\n{}'.format(run_time_msg)<|docstring|>adapted version of instapy live report function for showing up on a telegram message :return:<|endoftext|>
e96737745df4cf0627212243b8ca6431af4ac39973c75cd395a9b0b8f07631d3
def end(self): '\n tidy up things\n :return:\n ' if ((self.__chat_id is not None) and (self.__context is not None)): self.__context.bot.send_message(chat_id=self.__chat_id, text=self._live_report()) self.__updater.stop() self.token = '' self.telegram_username = '' self.instapy_session = None self.__chat_id = None self.__context = None
tidy up things :return:
instapy/plugins/telegram_util.py
end
mehrdad-shokri/InstaPy
0
python
def end(self): '\n tidy up things\n :return:\n ' if ((self.__chat_id is not None) and (self.__context is not None)): self.__context.bot.send_message(chat_id=self.__chat_id, text=self._live_report()) self.__updater.stop() self.token = self.telegram_username = self.instapy_session = None self.__chat_id = None self.__context = None
def end(self): '\n tidy up things\n :return:\n ' if ((self.__chat_id is not None) and (self.__context is not None)): self.__context.bot.send_message(chat_id=self.__chat_id, text=self._live_report()) self.__updater.stop() self.token = self.telegram_username = self.instapy_session = None self.__chat_id = None self.__context = None<|docstring|>tidy up things :return:<|endoftext|>
0fb5d9f900614dfb65e646f767be98f0eedc030a4253c4c36dbcb02743416da6
@pytest.fixture def dummy_container(create_container): 'Returns a container that is created but not started' return create_container('alpine', command=['sh', '-c', 'while true; do sleep 1; done'])
Returns a container that is created but not started
tests/integration/docker_utils/test_docker.py
dummy_container
pinzon/localstack
31,928
python
@pytest.fixture def dummy_container(create_container): return create_container('alpine', command=['sh', '-c', 'while true; do sleep 1; done'])
@pytest.fixture def dummy_container(create_container): return create_container('alpine', command=['sh', '-c', 'while true; do sleep 1; done'])<|docstring|>Returns a container that is created but not started<|endoftext|>
43dab1903f75d2822a8e7a4f595aca46ada9c854a5393a896cfc7079ed548311
@pytest.fixture def create_container(docker_client: ContainerClient, create_network): '\n Uses the factory as fixture pattern to wrap ContainerClient.create_container as a factory that\n removes the containers after the fixture is cleaned up.\n\n Depends on create network for correct cleanup order\n ' containers = [] def _create_container(image_name: str, **kwargs): kwargs['name'] = kwargs.get('name', _random_container_name()) cid = docker_client.create_container(image_name, **kwargs) cid = cid.strip() containers.append(cid) return ContainerInfo(cid, kwargs['name']) (yield _create_container) for c in containers: try: docker_client.remove_container(c) except Exception: LOG.warning('failed to remove test container %s', c)
Uses the factory as fixture pattern to wrap ContainerClient.create_container as a factory that removes the containers after the fixture is cleaned up. Depends on create network for correct cleanup order
tests/integration/docker_utils/test_docker.py
create_container
pinzon/localstack
31,928
python
@pytest.fixture def create_container(docker_client: ContainerClient, create_network): '\n Uses the factory as fixture pattern to wrap ContainerClient.create_container as a factory that\n removes the containers after the fixture is cleaned up.\n\n Depends on create network for correct cleanup order\n ' containers = [] def _create_container(image_name: str, **kwargs): kwargs['name'] = kwargs.get('name', _random_container_name()) cid = docker_client.create_container(image_name, **kwargs) cid = cid.strip() containers.append(cid) return ContainerInfo(cid, kwargs['name']) (yield _create_container) for c in containers: try: docker_client.remove_container(c) except Exception: LOG.warning('failed to remove test container %s', c)
@pytest.fixture def create_container(docker_client: ContainerClient, create_network): '\n Uses the factory as fixture pattern to wrap ContainerClient.create_container as a factory that\n removes the containers after the fixture is cleaned up.\n\n Depends on create network for correct cleanup order\n ' containers = [] def _create_container(image_name: str, **kwargs): kwargs['name'] = kwargs.get('name', _random_container_name()) cid = docker_client.create_container(image_name, **kwargs) cid = cid.strip() containers.append(cid) return ContainerInfo(cid, kwargs['name']) (yield _create_container) for c in containers: try: docker_client.remove_container(c) except Exception: LOG.warning('failed to remove test container %s', c)<|docstring|>Uses the factory as fixture pattern to wrap ContainerClient.create_container as a factory that removes the containers after the fixture is cleaned up. Depends on create network for correct cleanup order<|endoftext|>
a9162a6c37bb384d4451520a1ed5863949c2a1898e46de709d07c51d0d08bfdc
@pytest.fixture def create_network(): '\n Uses the factory as fixture pattern to wrap the creation of networks as a factory that\n removes the networks after the fixture is cleaned up.\n ' networks = [] def _create_network(network_name: str): network_id = safe_run([config.DOCKER_CMD, 'network', 'create', network_name]).strip() networks.append(network_id) return network_id (yield _create_network) for network in networks: try: LOG.debug('Removing network %s', network) safe_run([config.DOCKER_CMD, 'network', 'remove', network]) except CalledProcessError: pass
Uses the factory as fixture pattern to wrap the creation of networks as a factory that removes the networks after the fixture is cleaned up.
tests/integration/docker_utils/test_docker.py
create_network
pinzon/localstack
31,928
python
@pytest.fixture def create_network(): '\n Uses the factory as fixture pattern to wrap the creation of networks as a factory that\n removes the networks after the fixture is cleaned up.\n ' networks = [] def _create_network(network_name: str): network_id = safe_run([config.DOCKER_CMD, 'network', 'create', network_name]).strip() networks.append(network_id) return network_id (yield _create_network) for network in networks: try: LOG.debug('Removing network %s', network) safe_run([config.DOCKER_CMD, 'network', 'remove', network]) except CalledProcessError: pass
@pytest.fixture def create_network(): '\n Uses the factory as fixture pattern to wrap the creation of networks as a factory that\n removes the networks after the fixture is cleaned up.\n ' networks = [] def _create_network(network_name: str): network_id = safe_run([config.DOCKER_CMD, 'network', 'create', network_name]).strip() networks.append(network_id) return network_id (yield _create_network) for network in networks: try: LOG.debug('Removing network %s', network) safe_run([config.DOCKER_CMD, 'network', 'remove', network]) except CalledProcessError: pass<|docstring|>Uses the factory as fixture pattern to wrap the creation of networks as a factory that removes the networks after the fixture is cleaned up.<|endoftext|>
f8508b1185082d09719b6ed6fd80155fa8f2dccc846624764de18b3d3b14bf10
def test_create_r2plus1d(self): '\n Test simple r2plus1d with different inputs.\n ' for (input_channel, input_clip_length, input_crop_size) in itertools.product((3, 2), (4, 8), (56, 64)): stage_spatial_stride = (2, 2, 2, 2) stage_temporal_stride = (1, 1, 2, 2) total_spatial_stride = (2 * np.prod(stage_spatial_stride)) total_temporal_stride = np.prod(stage_temporal_stride) head_pool_kernel_size = ((input_clip_length // total_temporal_stride), (input_crop_size // total_spatial_stride), (input_crop_size // total_spatial_stride)) model = create_r2plus1d(input_channel=input_channel, model_depth=50, model_num_class=400, dropout_rate=0.0, norm=nn.BatchNorm3d, activation=nn.ReLU, stem_dim_out=8, stem_conv_kernel_size=(1, 7, 7), stem_conv_stride=(1, 2, 2), stage_conv_b_kernel_size=(((3, 3, 3),) * 4), stage_spatial_stride=stage_spatial_stride, stage_temporal_stride=stage_temporal_stride, stage_bottleneck=(create_bottleneck_block, create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block), head_pool=nn.AvgPool3d, head_pool_kernel_size=head_pool_kernel_size, head_output_size=(1, 1, 1), head_activation=nn.Softmax) for tensor in TestR2plus1d._get_inputs(input_channel, input_clip_length, input_crop_size): if (tensor.shape[1] != input_channel): with self.assertRaises(RuntimeError): out = model(tensor) continue out = model(tensor) output_shape = out.shape output_shape_gt = (tensor.shape[0], 400) self.assertEqual(output_shape, output_shape_gt, 'Output shape {} is different from expected shape {}'.format(output_shape, output_shape_gt))
Test simple r2plus1d with different inputs.
tests/test_models_r2plus1d.py
test_create_r2plus1d
nateraw/pytorchvideo
2,391
python
def test_create_r2plus1d(self): '\n \n ' for (input_channel, input_clip_length, input_crop_size) in itertools.product((3, 2), (4, 8), (56, 64)): stage_spatial_stride = (2, 2, 2, 2) stage_temporal_stride = (1, 1, 2, 2) total_spatial_stride = (2 * np.prod(stage_spatial_stride)) total_temporal_stride = np.prod(stage_temporal_stride) head_pool_kernel_size = ((input_clip_length // total_temporal_stride), (input_crop_size // total_spatial_stride), (input_crop_size // total_spatial_stride)) model = create_r2plus1d(input_channel=input_channel, model_depth=50, model_num_class=400, dropout_rate=0.0, norm=nn.BatchNorm3d, activation=nn.ReLU, stem_dim_out=8, stem_conv_kernel_size=(1, 7, 7), stem_conv_stride=(1, 2, 2), stage_conv_b_kernel_size=(((3, 3, 3),) * 4), stage_spatial_stride=stage_spatial_stride, stage_temporal_stride=stage_temporal_stride, stage_bottleneck=(create_bottleneck_block, create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block), head_pool=nn.AvgPool3d, head_pool_kernel_size=head_pool_kernel_size, head_output_size=(1, 1, 1), head_activation=nn.Softmax) for tensor in TestR2plus1d._get_inputs(input_channel, input_clip_length, input_crop_size): if (tensor.shape[1] != input_channel): with self.assertRaises(RuntimeError): out = model(tensor) continue out = model(tensor) output_shape = out.shape output_shape_gt = (tensor.shape[0], 400) self.assertEqual(output_shape, output_shape_gt, 'Output shape {} is different from expected shape {}'.format(output_shape, output_shape_gt))
def test_create_r2plus1d(self): '\n \n ' for (input_channel, input_clip_length, input_crop_size) in itertools.product((3, 2), (4, 8), (56, 64)): stage_spatial_stride = (2, 2, 2, 2) stage_temporal_stride = (1, 1, 2, 2) total_spatial_stride = (2 * np.prod(stage_spatial_stride)) total_temporal_stride = np.prod(stage_temporal_stride) head_pool_kernel_size = ((input_clip_length // total_temporal_stride), (input_crop_size // total_spatial_stride), (input_crop_size // total_spatial_stride)) model = create_r2plus1d(input_channel=input_channel, model_depth=50, model_num_class=400, dropout_rate=0.0, norm=nn.BatchNorm3d, activation=nn.ReLU, stem_dim_out=8, stem_conv_kernel_size=(1, 7, 7), stem_conv_stride=(1, 2, 2), stage_conv_b_kernel_size=(((3, 3, 3),) * 4), stage_spatial_stride=stage_spatial_stride, stage_temporal_stride=stage_temporal_stride, stage_bottleneck=(create_bottleneck_block, create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block), head_pool=nn.AvgPool3d, head_pool_kernel_size=head_pool_kernel_size, head_output_size=(1, 1, 1), head_activation=nn.Softmax) for tensor in TestR2plus1d._get_inputs(input_channel, input_clip_length, input_crop_size): if (tensor.shape[1] != input_channel): with self.assertRaises(RuntimeError): out = model(tensor) continue out = model(tensor) output_shape = out.shape output_shape_gt = (tensor.shape[0], 400) self.assertEqual(output_shape, output_shape_gt, 'Output shape {} is different from expected shape {}'.format(output_shape, output_shape_gt))<|docstring|>Test simple r2plus1d with different inputs.<|endoftext|>
c4d143b562eeba9a6aef7df554a4dab23be3388a1192f4d8239c6f749a71fca6
@staticmethod def _get_inputs(channel: int=3, clip_length: int=16, crop_size: int=224) -> torch.tensor: '\n Provide different tensors as test cases.\n\n Yield:\n (torch.tensor): tensor as test case input.\n ' shapes = ((1, channel, clip_length, crop_size, crop_size), (2, channel, clip_length, crop_size, crop_size)) for shape in shapes: (yield torch.rand(shape))
Provide different tensors as test cases. Yield: (torch.tensor): tensor as test case input.
tests/test_models_r2plus1d.py
_get_inputs
nateraw/pytorchvideo
2,391
python
@staticmethod def _get_inputs(channel: int=3, clip_length: int=16, crop_size: int=224) -> torch.tensor: '\n Provide different tensors as test cases.\n\n Yield:\n (torch.tensor): tensor as test case input.\n ' shapes = ((1, channel, clip_length, crop_size, crop_size), (2, channel, clip_length, crop_size, crop_size)) for shape in shapes: (yield torch.rand(shape))
@staticmethod def _get_inputs(channel: int=3, clip_length: int=16, crop_size: int=224) -> torch.tensor: '\n Provide different tensors as test cases.\n\n Yield:\n (torch.tensor): tensor as test case input.\n ' shapes = ((1, channel, clip_length, crop_size, crop_size), (2, channel, clip_length, crop_size, crop_size)) for shape in shapes: (yield torch.rand(shape))<|docstring|>Provide different tensors as test cases. Yield: (torch.tensor): tensor as test case input.<|endoftext|>
4ee2cbbe23f606399e2a564c3d1da8106e94c2a4476df897e6cd6337ef3f553d
def run(self, checksOnly=False, doAsserts=False): '\n Entry point for validation\n ' try: self._checksOnly = checksOnly self._process(self._conf, confscheme, '', allowUnknownKeys=True) except ZenMakeConfError as ex: origMsg = ex.msg ex.msg = ('Error in the file %r:' % self._conf['__file__']) for line in origMsg.splitlines(): ex.msg += ('\n %s' % line) raise ex if doAsserts: assert (_oldconfscheme == confscheme)
Entry point for validation
src/zenmake/zm/buildconf/validator.py
run
pustotnik/raven
0
python
def run(self, checksOnly=False, doAsserts=False): '\n \n ' try: self._checksOnly = checksOnly self._process(self._conf, confscheme, , allowUnknownKeys=True) except ZenMakeConfError as ex: origMsg = ex.msg ex.msg = ('Error in the file %r:' % self._conf['__file__']) for line in origMsg.splitlines(): ex.msg += ('\n %s' % line) raise ex if doAsserts: assert (_oldconfscheme == confscheme)
def run(self, checksOnly=False, doAsserts=False): '\n \n ' try: self._checksOnly = checksOnly self._process(self._conf, confscheme, , allowUnknownKeys=True) except ZenMakeConfError as ex: origMsg = ex.msg ex.msg = ('Error in the file %r:' % self._conf['__file__']) for line in origMsg.splitlines(): ex.msg += ('\n %s' % line) raise ex if doAsserts: assert (_oldconfscheme == confscheme)<|docstring|>Entry point for validation<|endoftext|>
4bc57219c7912341e3f98b15ffdd2e7f57e24b0ae3ef89cf152d25ff4ae67080
def __init__(self, sg: 'ShapesGraph', node: Union[(URIRef, BNode)], p=False, path: Optional[Union[(URIRef, BNode)]]=None, logger=None): '\n Shape\n :type sg: ShapesGraph\n :type node: URIRef | BNode\n :type p: bool\n :type path: URIRef | BNode | None\n :type logger: logging.Logger\n ' self.logger = (logger or logging.getLogger(__name__)) self.sg = sg self.node = node self._p = p self._path = path self._advanced = False deactivated_vals = set(self.objects(SH_deactivated)) if (len(deactivated_vals) > 1): raise ShapeLoadError('A SHACL Shape cannot have more than one sh:deactivated predicate.', 'https://www.w3.org/TR/shacl/#deactivated') elif (len(deactivated_vals) < 1): self._deactivated = False else: d = next(iter(deactivated_vals)) if (not isinstance(d, Literal)): raise ShapeLoadError('The value of sh:deactivated predicate on a SHACL Shape must be a Literal.', 'https://www.w3.org/TR/shacl/#deactivated') self._deactivated = bool(d.value) severity = set(self.objects(SH_severity)) if len(severity): self._severity = next(iter(severity)) else: self._severity = SH_Violation messages = set(self.objects(SH_message)) if len(messages): self._messages = messages else: self._messages = set() names = set(self.objects(SH_name)) if len(names): self._names = names else: self._names = set() descriptions = set(self.objects(SH_description)) if len(descriptions): self._descriptions = descriptions else: self._descriptions = set()
Shape :type sg: ShapesGraph :type node: URIRef | BNode :type p: bool :type path: URIRef | BNode | None :type logger: logging.Logger
pyshacl/shape.py
__init__
westurner/pySHACL
0
python
def __init__(self, sg: 'ShapesGraph', node: Union[(URIRef, BNode)], p=False, path: Optional[Union[(URIRef, BNode)]]=None, logger=None): '\n Shape\n :type sg: ShapesGraph\n :type node: URIRef | BNode\n :type p: bool\n :type path: URIRef | BNode | None\n :type logger: logging.Logger\n ' self.logger = (logger or logging.getLogger(__name__)) self.sg = sg self.node = node self._p = p self._path = path self._advanced = False deactivated_vals = set(self.objects(SH_deactivated)) if (len(deactivated_vals) > 1): raise ShapeLoadError('A SHACL Shape cannot have more than one sh:deactivated predicate.', 'https://www.w3.org/TR/shacl/#deactivated') elif (len(deactivated_vals) < 1): self._deactivated = False else: d = next(iter(deactivated_vals)) if (not isinstance(d, Literal)): raise ShapeLoadError('The value of sh:deactivated predicate on a SHACL Shape must be a Literal.', 'https://www.w3.org/TR/shacl/#deactivated') self._deactivated = bool(d.value) severity = set(self.objects(SH_severity)) if len(severity): self._severity = next(iter(severity)) else: self._severity = SH_Violation messages = set(self.objects(SH_message)) if len(messages): self._messages = messages else: self._messages = set() names = set(self.objects(SH_name)) if len(names): self._names = names else: self._names = set() descriptions = set(self.objects(SH_description)) if len(descriptions): self._descriptions = descriptions else: self._descriptions = set()
def __init__(self, sg: 'ShapesGraph', node: Union[(URIRef, BNode)], p=False, path: Optional[Union[(URIRef, BNode)]]=None, logger=None): '\n Shape\n :type sg: ShapesGraph\n :type node: URIRef | BNode\n :type p: bool\n :type path: URIRef | BNode | None\n :type logger: logging.Logger\n ' self.logger = (logger or logging.getLogger(__name__)) self.sg = sg self.node = node self._p = p self._path = path self._advanced = False deactivated_vals = set(self.objects(SH_deactivated)) if (len(deactivated_vals) > 1): raise ShapeLoadError('A SHACL Shape cannot have more than one sh:deactivated predicate.', 'https://www.w3.org/TR/shacl/#deactivated') elif (len(deactivated_vals) < 1): self._deactivated = False else: d = next(iter(deactivated_vals)) if (not isinstance(d, Literal)): raise ShapeLoadError('The value of sh:deactivated predicate on a SHACL Shape must be a Literal.', 'https://www.w3.org/TR/shacl/#deactivated') self._deactivated = bool(d.value) severity = set(self.objects(SH_severity)) if len(severity): self._severity = next(iter(severity)) else: self._severity = SH_Violation messages = set(self.objects(SH_message)) if len(messages): self._messages = messages else: self._messages = set() names = set(self.objects(SH_name)) if len(names): self._names = names else: self._names = set() descriptions = set(self.objects(SH_description)) if len(descriptions): self._descriptions = descriptions else: self._descriptions = set()<|docstring|>Shape :type sg: ShapesGraph :type node: URIRef | BNode :type p: bool :type path: URIRef | BNode | None :type logger: logging.Logger<|endoftext|>
a932f2259728aff9aa543cae72f499d2a4b53f0fa2fca65aaf3cdc0e21738109
def focus_nodes(self, data_graph): '\n The set of focus nodes for a shape may be identified as follows:\n\n specified in a shape using target declarations\n specified in any constraint that references a shape in parameters of shape-expecting constraint parameters (e.g. sh:node)\n specified as explicit input to the SHACL processor for validating a specific RDF term against a shape\n :return:\n ' (target_nodes, target_classes, implicit_classes, target_objects_of, target_subjects_of) = self.target() if self._advanced: advanced_targets = self.advanced_target() else: advanced_targets = False found_node_targets = set() found_node_targets.update(iter(target_nodes)) target_classes = set(target_classes) target_classes.update(set(implicit_classes)) found_target_instances = set() for tc in target_classes: s = data_graph.subjects(RDF_type, tc) found_target_instances.update(s) subc = data_graph.subjects(RDFS_subClassOf, tc) for subclass in iter(subc): if (subclass == tc): continue s1 = data_graph.subjects(RDF_type, subclass) found_target_instances.update(s1) found_node_targets.update(found_target_instances) found_target_subject_of = set() for s_of in target_subjects_of: subs = {s for (s, o) in data_graph.subject_objects(s_of)} found_target_subject_of.update(subs) found_node_targets.update(found_target_subject_of) found_target_object_of = set() for o_of in target_objects_of: objs = {o for (s, o) in data_graph.subject_objects(o_of)} found_target_object_of.update(objs) found_node_targets.update(found_target_object_of) if advanced_targets: for (at_node, at) in advanced_targets.items(): if (at['type'] == SH_SPARQLTarget): qh = at['qh'] select = qh.apply_prefixes(qh.select_text) results = data_graph.query(select, initBindings=None) if ((not results) or (len(results.bindings) < 1)): continue for r in results: t = r['this'] found_node_targets.add(t) elif (at['type'] in (SH_JSTarget, SH_JSTargetType)): results = at['targeter'].find_targets(data_graph) for r in results: found_node_targets.add(r) else: results = at['qt'].find_targets(data_graph) if ((not results) or (len(results.bindings) < 1)): continue for r in results: t = r['this'] found_node_targets.add(t) return found_node_targets
The set of focus nodes for a shape may be identified as follows: specified in a shape using target declarations specified in any constraint that references a shape in parameters of shape-expecting constraint parameters (e.g. sh:node) specified as explicit input to the SHACL processor for validating a specific RDF term against a shape :return:
pyshacl/shape.py
focus_nodes
westurner/pySHACL
0
python
def focus_nodes(self, data_graph): '\n The set of focus nodes for a shape may be identified as follows:\n\n specified in a shape using target declarations\n specified in any constraint that references a shape in parameters of shape-expecting constraint parameters (e.g. sh:node)\n specified as explicit input to the SHACL processor for validating a specific RDF term against a shape\n :return:\n ' (target_nodes, target_classes, implicit_classes, target_objects_of, target_subjects_of) = self.target() if self._advanced: advanced_targets = self.advanced_target() else: advanced_targets = False found_node_targets = set() found_node_targets.update(iter(target_nodes)) target_classes = set(target_classes) target_classes.update(set(implicit_classes)) found_target_instances = set() for tc in target_classes: s = data_graph.subjects(RDF_type, tc) found_target_instances.update(s) subc = data_graph.subjects(RDFS_subClassOf, tc) for subclass in iter(subc): if (subclass == tc): continue s1 = data_graph.subjects(RDF_type, subclass) found_target_instances.update(s1) found_node_targets.update(found_target_instances) found_target_subject_of = set() for s_of in target_subjects_of: subs = {s for (s, o) in data_graph.subject_objects(s_of)} found_target_subject_of.update(subs) found_node_targets.update(found_target_subject_of) found_target_object_of = set() for o_of in target_objects_of: objs = {o for (s, o) in data_graph.subject_objects(o_of)} found_target_object_of.update(objs) found_node_targets.update(found_target_object_of) if advanced_targets: for (at_node, at) in advanced_targets.items(): if (at['type'] == SH_SPARQLTarget): qh = at['qh'] select = qh.apply_prefixes(qh.select_text) results = data_graph.query(select, initBindings=None) if ((not results) or (len(results.bindings) < 1)): continue for r in results: t = r['this'] found_node_targets.add(t) elif (at['type'] in (SH_JSTarget, SH_JSTargetType)): results = at['targeter'].find_targets(data_graph) for r in results: found_node_targets.add(r) else: results = at['qt'].find_targets(data_graph) if ((not results) or (len(results.bindings) < 1)): continue for r in results: t = r['this'] found_node_targets.add(t) return found_node_targets
def focus_nodes(self, data_graph): '\n The set of focus nodes for a shape may be identified as follows:\n\n specified in a shape using target declarations\n specified in any constraint that references a shape in parameters of shape-expecting constraint parameters (e.g. sh:node)\n specified as explicit input to the SHACL processor for validating a specific RDF term against a shape\n :return:\n ' (target_nodes, target_classes, implicit_classes, target_objects_of, target_subjects_of) = self.target() if self._advanced: advanced_targets = self.advanced_target() else: advanced_targets = False found_node_targets = set() found_node_targets.update(iter(target_nodes)) target_classes = set(target_classes) target_classes.update(set(implicit_classes)) found_target_instances = set() for tc in target_classes: s = data_graph.subjects(RDF_type, tc) found_target_instances.update(s) subc = data_graph.subjects(RDFS_subClassOf, tc) for subclass in iter(subc): if (subclass == tc): continue s1 = data_graph.subjects(RDF_type, subclass) found_target_instances.update(s1) found_node_targets.update(found_target_instances) found_target_subject_of = set() for s_of in target_subjects_of: subs = {s for (s, o) in data_graph.subject_objects(s_of)} found_target_subject_of.update(subs) found_node_targets.update(found_target_subject_of) found_target_object_of = set() for o_of in target_objects_of: objs = {o for (s, o) in data_graph.subject_objects(o_of)} found_target_object_of.update(objs) found_node_targets.update(found_target_object_of) if advanced_targets: for (at_node, at) in advanced_targets.items(): if (at['type'] == SH_SPARQLTarget): qh = at['qh'] select = qh.apply_prefixes(qh.select_text) results = data_graph.query(select, initBindings=None) if ((not results) or (len(results.bindings) < 1)): continue for r in results: t = r['this'] found_node_targets.add(t) elif (at['type'] in (SH_JSTarget, SH_JSTargetType)): results = at['targeter'].find_targets(data_graph) for r in results: found_node_targets.add(r) else: results = at['qt'].find_targets(data_graph) if ((not results) or (len(results.bindings) < 1)): continue for r in results: t = r['this'] found_node_targets.add(t) return found_node_targets<|docstring|>The set of focus nodes for a shape may be identified as follows: specified in a shape using target declarations specified in any constraint that references a shape in parameters of shape-expecting constraint parameters (e.g. sh:node) specified as explicit input to the SHACL processor for validating a specific RDF term against a shape :return:<|endoftext|>
ae497eaebd7d9cc982ab96751431e2d2c3161ea522b94ab041c1f232dffa73ea
def value_nodes(self, target_graph, focus): '\n For each focus node, you can get a set of value nodes.\n For a Node Shape, each focus node has just one value node,\n which is just the focus_node\n :param target_graph:\n :param focus:\n :return:\n ' if (not isinstance(focus, (tuple, list, set))): focus = [focus] if (not self.is_property_shape): return {f: set((f,)) for f in focus} path_val = self.path() focus_dict = {} for f in focus: focus_dict[f] = self.value_nodes_from_path(self.sg, f, path_val, target_graph) return focus_dict
For each focus node, you can get a set of value nodes. For a Node Shape, each focus node has just one value node, which is just the focus_node :param target_graph: :param focus: :return:
pyshacl/shape.py
value_nodes
westurner/pySHACL
0
python
def value_nodes(self, target_graph, focus): '\n For each focus node, you can get a set of value nodes.\n For a Node Shape, each focus node has just one value node,\n which is just the focus_node\n :param target_graph:\n :param focus:\n :return:\n ' if (not isinstance(focus, (tuple, list, set))): focus = [focus] if (not self.is_property_shape): return {f: set((f,)) for f in focus} path_val = self.path() focus_dict = {} for f in focus: focus_dict[f] = self.value_nodes_from_path(self.sg, f, path_val, target_graph) return focus_dict
def value_nodes(self, target_graph, focus): '\n For each focus node, you can get a set of value nodes.\n For a Node Shape, each focus node has just one value node,\n which is just the focus_node\n :param target_graph:\n :param focus:\n :return:\n ' if (not isinstance(focus, (tuple, list, set))): focus = [focus] if (not self.is_property_shape): return {f: set((f,)) for f in focus} path_val = self.path() focus_dict = {} for f in focus: focus_dict[f] = self.value_nodes_from_path(self.sg, f, path_val, target_graph) return focus_dict<|docstring|>For each focus node, you can get a set of value nodes. For a Node Shape, each focus node has just one value node, which is just the focus_node :param target_graph: :param focus: :return:<|endoftext|>
8f98f3768ad01406b8bac20e6b005528fa82cc56ebe252d631989cb5cb37b406
@property def max_iter(self): 'Returns the maximum number of descent iterations' return self._max_iter
Returns the maximum number of descent iterations
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
max_iter
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
@property def max_iter(self): return self._max_iter
@property def max_iter(self): return self._max_iter<|docstring|>Returns the maximum number of descent iterations<|endoftext|>
b128fa02f197a5445e3b4ece2f7e791e5c6c35dc559272a1d986fa90e0aa5959
@max_iter.setter def max_iter(self, value): 'Set the maximum number of descent iterations' self._max_iter = int(value)
Set the maximum number of descent iterations
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
max_iter
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
@max_iter.setter def max_iter(self, value): self._max_iter = int(value)
@max_iter.setter def max_iter(self, value): self._max_iter = int(value)<|docstring|>Set the maximum number of descent iterations<|endoftext|>
ffe1970959fd48941d0d94e550c32ec79b3911ceb092cf74a90dee49df84b972
@property def eps(self): 'Return tolerance value for stop criterion' return self._eps
Return tolerance value for stop criterion
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
eps
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
@property def eps(self): return self._eps
@property def eps(self): return self._eps<|docstring|>Return tolerance value for stop criterion<|endoftext|>
310e014ac8e3dddade52d845021fef028a9ac44f5e355853fb1af268240bc667
@eps.setter def eps(self, value): 'Set tolerance value for stop criterion' self._eps = float(value)
Set tolerance value for stop criterion
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
eps
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
@eps.setter def eps(self, value): self._eps = float(value)
@eps.setter def eps(self, value): self._eps = float(value)<|docstring|>Set tolerance value for stop criterion<|endoftext|>
182a2921da08a34d196a7d30979fc8caabf31b56b2a35c9c2e130bb3c6faa1c0
@property def discrete(self): 'True if feature space is discrete, False if continuous.' return self._discrete
True if feature space is discrete, False if continuous.
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
discrete
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
@property def discrete(self): return self._discrete
@property def discrete(self): return self._discrete<|docstring|>True if feature space is discrete, False if continuous.<|endoftext|>
905c9853dfb46c1af463a535cfee32bb71910681309b080b0a72fa719c747776
@discrete.setter def discrete(self, value): 'True if feature space is discrete, False if continuous.' self._discrete = bool(value)
True if feature space is discrete, False if continuous.
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
discrete
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
@discrete.setter def discrete(self, value): self._discrete = bool(value)
@discrete.setter def discrete(self, value): self._discrete = bool(value)<|docstring|>True if feature space is discrete, False if continuous.<|endoftext|>
5e9cdf3dabd5e68a0eb0a2582407276057da01b74041825dc901935c90ba1e2a
def _init_line_search(self, eta, eta_min, eta_max, discrete): 'Initialize line-search optimizer' if ((discrete is True) and (self.constr is not None) and (self.constr.class_type == 'l2')): raise NotImplementedError('L2 constraint is not supported for discrete optimization') self._line_search = CLineSearchBisect(fun=self._fun, constr=self._constr, bounds=self._bounds, max_iter=50, eta=eta, eta_min=eta_min, eta_max=eta_max)
Initialize line-search optimizer
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
_init_line_search
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
def _init_line_search(self, eta, eta_min, eta_max, discrete): if ((discrete is True) and (self.constr is not None) and (self.constr.class_type == 'l2')): raise NotImplementedError('L2 constraint is not supported for discrete optimization') self._line_search = CLineSearchBisect(fun=self._fun, constr=self._constr, bounds=self._bounds, max_iter=50, eta=eta, eta_min=eta_min, eta_max=eta_max)
def _init_line_search(self, eta, eta_min, eta_max, discrete): if ((discrete is True) and (self.constr is not None) and (self.constr.class_type == 'l2')): raise NotImplementedError('L2 constraint is not supported for discrete optimization') self._line_search = CLineSearchBisect(fun=self._fun, constr=self._constr, bounds=self._bounds, max_iter=50, eta=eta, eta_min=eta_min, eta_max=eta_max)<|docstring|>Initialize line-search optimizer<|endoftext|>
92e8fd396b6dffbf2172d8f7ef8de336d41d35011132f9249ed4c0e689267934
@staticmethod def _l1_projected_gradient(grad): "\n Find v that maximizes v'grad onto the unary-norm l1 ball.\n This is the maximization of an inner product over the l1 ball,\n and the optimal (sparse) direction v is found by setting\n v = sign(grad) when abs(grad) is maximum and 0 elsewhere.\n " abs_grad = abs(grad) grad_max = abs_grad.max() argmax_pos = (abs_grad == grad_max) proj_grad = CArray.zeros(shape=grad.shape, sparse=grad.issparse) proj_grad[argmax_pos] = grad[argmax_pos].sign() return proj_grad
Find v that maximizes v'grad onto the unary-norm l1 ball. This is the maximization of an inner product over the l1 ball, and the optimal (sparse) direction v is found by setting v = sign(grad) when abs(grad) is maximum and 0 elsewhere.
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
_l1_projected_gradient
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
@staticmethod def _l1_projected_gradient(grad): "\n Find v that maximizes v'grad onto the unary-norm l1 ball.\n This is the maximization of an inner product over the l1 ball,\n and the optimal (sparse) direction v is found by setting\n v = sign(grad) when abs(grad) is maximum and 0 elsewhere.\n " abs_grad = abs(grad) grad_max = abs_grad.max() argmax_pos = (abs_grad == grad_max) proj_grad = CArray.zeros(shape=grad.shape, sparse=grad.issparse) proj_grad[argmax_pos] = grad[argmax_pos].sign() return proj_grad
@staticmethod def _l1_projected_gradient(grad): "\n Find v that maximizes v'grad onto the unary-norm l1 ball.\n This is the maximization of an inner product over the l1 ball,\n and the optimal (sparse) direction v is found by setting\n v = sign(grad) when abs(grad) is maximum and 0 elsewhere.\n " abs_grad = abs(grad) grad_max = abs_grad.max() argmax_pos = (abs_grad == grad_max) proj_grad = CArray.zeros(shape=grad.shape, sparse=grad.issparse) proj_grad[argmax_pos] = grad[argmax_pos].sign() return proj_grad<|docstring|>Find v that maximizes v'grad onto the unary-norm l1 ball. This is the maximization of an inner product over the l1 ball, and the optimal (sparse) direction v is found by setting v = sign(grad) when abs(grad) is maximum and 0 elsewhere.<|endoftext|>
53407594f1b8b117cf9a31759523d45c9999439245da47f814cf9390c8273087
def _box_projected_gradient(self, x, grad): '\n Exclude from descent direction those features which,\n if modified according to the given descent direction,\n would violate the box constraint.\n\n ' if (self.bounds is None): return grad x_lb = (x.round(6) == CArray(self.bounds.lb).round(6)).logical_and((grad.todense() > 0)).astype(bool) x_ub = (x.round(6) == CArray(self.bounds.ub).round(6)).logical_and((grad.todense() < 0)).astype(bool) grad[(x_lb + x_ub)] = 0 return grad
Exclude from descent direction those features which, if modified according to the given descent direction, would violate the box constraint.
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
_box_projected_gradient
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
def _box_projected_gradient(self, x, grad): '\n Exclude from descent direction those features which,\n if modified according to the given descent direction,\n would violate the box constraint.\n\n ' if (self.bounds is None): return grad x_lb = (x.round(6) == CArray(self.bounds.lb).round(6)).logical_and((grad.todense() > 0)).astype(bool) x_ub = (x.round(6) == CArray(self.bounds.ub).round(6)).logical_and((grad.todense() < 0)).astype(bool) grad[(x_lb + x_ub)] = 0 return grad
def _box_projected_gradient(self, x, grad): '\n Exclude from descent direction those features which,\n if modified according to the given descent direction,\n would violate the box constraint.\n\n ' if (self.bounds is None): return grad x_lb = (x.round(6) == CArray(self.bounds.lb).round(6)).logical_and((grad.todense() > 0)).astype(bool) x_ub = (x.round(6) == CArray(self.bounds.ub).round(6)).logical_and((grad.todense() < 0)).astype(bool) grad[(x_lb + x_ub)] = 0 return grad<|docstring|>Exclude from descent direction those features which, if modified according to the given descent direction, would violate the box constraint.<|endoftext|>
3fc6e39f28b5d207e29dc34ddd2f01179865673537863271f3fe58c0624de9a1
def _xk(self, x, fx, *args): 'Returns a new point after gradient descent.' grad = self._fun.gradient(x, *args) self._grad = grad norm = grad.norm() if (norm < 1e-20): return (x, fx) grad = (grad / norm) grad = self._box_projected_gradient(x, grad) if (self.discrete or ((self.constr is not None) and (self.constr.class_type == 'l1'))): grad = self._l1_projected_gradient(grad) next_point = (x - (grad * self._line_search.eta)) if ((self.constr is not None) and self.constr.is_violated(next_point)): self.logger.debug('Line-search on distance constraint.') grad = CArray((x - self.constr.projection(next_point))) if (self.constr.class_type == 'l1'): grad = grad.sign() (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz) if ((self.bounds is not None) and self.bounds.is_violated(next_point)): self.logger.debug('Line-search on box constraint.') grad = CArray((x - self.bounds.projection(next_point))) (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz) (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz)
Returns a new point after gradient descent.
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
_xk
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
def _xk(self, x, fx, *args): grad = self._fun.gradient(x, *args) self._grad = grad norm = grad.norm() if (norm < 1e-20): return (x, fx) grad = (grad / norm) grad = self._box_projected_gradient(x, grad) if (self.discrete or ((self.constr is not None) and (self.constr.class_type == 'l1'))): grad = self._l1_projected_gradient(grad) next_point = (x - (grad * self._line_search.eta)) if ((self.constr is not None) and self.constr.is_violated(next_point)): self.logger.debug('Line-search on distance constraint.') grad = CArray((x - self.constr.projection(next_point))) if (self.constr.class_type == 'l1'): grad = grad.sign() (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz) if ((self.bounds is not None) and self.bounds.is_violated(next_point)): self.logger.debug('Line-search on box constraint.') grad = CArray((x - self.bounds.projection(next_point))) (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz) (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz)
def _xk(self, x, fx, *args): grad = self._fun.gradient(x, *args) self._grad = grad norm = grad.norm() if (norm < 1e-20): return (x, fx) grad = (grad / norm) grad = self._box_projected_gradient(x, grad) if (self.discrete or ((self.constr is not None) and (self.constr.class_type == 'l1'))): grad = self._l1_projected_gradient(grad) next_point = (x - (grad * self._line_search.eta)) if ((self.constr is not None) and self.constr.is_violated(next_point)): self.logger.debug('Line-search on distance constraint.') grad = CArray((x - self.constr.projection(next_point))) if (self.constr.class_type == 'l1'): grad = grad.sign() (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz) if ((self.bounds is not None) and self.bounds.is_violated(next_point)): self.logger.debug('Line-search on box constraint.') grad = CArray((x - self.bounds.projection(next_point))) (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz) (z, fz) = self._line_search.minimize(x, (- grad), fx) return (z, fz)<|docstring|>Returns a new point after gradient descent.<|endoftext|>
3290c3c8f35a418776e46d4d06f62212150741a9aeb064dd5ef3088d2cff7b1e
def minimize(self, x_init, args=(), **kwargs): '\n Interface to minimizers implementing\n min fun(x)\n s.t. constraint\n\n Parameters\n ----------\n x_init : CArray\n The initial input point.\n args : tuple, optional\n Extra arguments passed to the objective function and its gradient.\n\n Returns\n -------\n f_seq : CArray\n Array containing values of f during optimization.\n x_seq : CArray\n Array containing values of x during optimization.\n\n ' if (len(kwargs) != 0): raise ValueError('{:} does not accept additional parameters.'.format(self.__class__.__name__)) self._f.reset_eval() self._fun.reset_eval() self._init_line_search(eta=self.eta, eta_min=self.eta_min, eta_max=self.eta_max, discrete=self.discrete) if ((self.constr is not None) and (self.constr.radius == 0)): x0 = self.constr.center self._x_seq = CArray.zeros((1, x0.size), sparse=x0.issparse, dtype=x0.dtype) self._f_seq = CArray.zeros(1) self._x_seq[(0, :)] = x0 self._f_seq[0] = self._fun.fun(x0, *args) self._x_opt = x0 return if ((self.bounds is not None) and self.bounds.is_violated(x_init)): x_init = self.bounds.projection(x_init) if ((self.constr is not None) and self.constr.is_violated(x_init)): x_init = self.constr.projection(x_init) if (((self.bounds is not None) and self.bounds.is_violated(x_init)) or ((self.constr is not None) and self.constr.is_violated(x_init))): raise ValueError((('x_init ' + str(x_init)) + ' is outside of feasible domain.')) self._x_seq = CArray.zeros((self.max_iter, x_init.size), sparse=x_init.issparse) if (self.discrete is True): self._x_seq.astype(x_init.dtype) self._f_seq = CArray.zeros(self.max_iter) x = x_init fx = self._fun.fun(x, *args) self._x_seq[(0, :)] = x self._f_seq[0] = fx self.logger.debug(((('Iter.: ' + str(0)) + ', f(x): ') + str(fx))) for i in range(1, self.max_iter): (x, fx) = self._xk(x, fx, *args) self._x_seq[(i, :)] = x self._f_seq[i] = fx self._x_opt = x self.logger.debug(((((('Iter.: ' + str(i)) + ', f(x): ') + str(fx)) + ', norm(gr(x)): ') + str(CArray(self._grad).norm()))) diff = abs((self.f_seq[i].item() - self.f_seq[(i - 1)].item())) if (diff < self.eps): self.logger.debug('Flat region, exiting... ({:.4f} / {:.4f})'.format(self._f_seq[i].item(), self._f_seq[(i - 1)].item())) self._x_seq = self.x_seq[(:(i + 1), :)] self._f_seq = self.f_seq[:(i + 1)] return x self.logger.warning('Maximum iterations reached. Exiting.') return x
Interface to minimizers implementing min fun(x) s.t. constraint Parameters ---------- x_init : CArray The initial input point. args : tuple, optional Extra arguments passed to the objective function and its gradient. Returns ------- f_seq : CArray Array containing values of f during optimization. x_seq : CArray Array containing values of x during optimization.
SecML/src/secml/optim/optimizers/c_optimizer_pgd_ls.py
minimize
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
5
python
def minimize(self, x_init, args=(), **kwargs): '\n Interface to minimizers implementing\n min fun(x)\n s.t. constraint\n\n Parameters\n ----------\n x_init : CArray\n The initial input point.\n args : tuple, optional\n Extra arguments passed to the objective function and its gradient.\n\n Returns\n -------\n f_seq : CArray\n Array containing values of f during optimization.\n x_seq : CArray\n Array containing values of x during optimization.\n\n ' if (len(kwargs) != 0): raise ValueError('{:} does not accept additional parameters.'.format(self.__class__.__name__)) self._f.reset_eval() self._fun.reset_eval() self._init_line_search(eta=self.eta, eta_min=self.eta_min, eta_max=self.eta_max, discrete=self.discrete) if ((self.constr is not None) and (self.constr.radius == 0)): x0 = self.constr.center self._x_seq = CArray.zeros((1, x0.size), sparse=x0.issparse, dtype=x0.dtype) self._f_seq = CArray.zeros(1) self._x_seq[(0, :)] = x0 self._f_seq[0] = self._fun.fun(x0, *args) self._x_opt = x0 return if ((self.bounds is not None) and self.bounds.is_violated(x_init)): x_init = self.bounds.projection(x_init) if ((self.constr is not None) and self.constr.is_violated(x_init)): x_init = self.constr.projection(x_init) if (((self.bounds is not None) and self.bounds.is_violated(x_init)) or ((self.constr is not None) and self.constr.is_violated(x_init))): raise ValueError((('x_init ' + str(x_init)) + ' is outside of feasible domain.')) self._x_seq = CArray.zeros((self.max_iter, x_init.size), sparse=x_init.issparse) if (self.discrete is True): self._x_seq.astype(x_init.dtype) self._f_seq = CArray.zeros(self.max_iter) x = x_init fx = self._fun.fun(x, *args) self._x_seq[(0, :)] = x self._f_seq[0] = fx self.logger.debug(((('Iter.: ' + str(0)) + ', f(x): ') + str(fx))) for i in range(1, self.max_iter): (x, fx) = self._xk(x, fx, *args) self._x_seq[(i, :)] = x self._f_seq[i] = fx self._x_opt = x self.logger.debug(((((('Iter.: ' + str(i)) + ', f(x): ') + str(fx)) + ', norm(gr(x)): ') + str(CArray(self._grad).norm()))) diff = abs((self.f_seq[i].item() - self.f_seq[(i - 1)].item())) if (diff < self.eps): self.logger.debug('Flat region, exiting... ({:.4f} / {:.4f})'.format(self._f_seq[i].item(), self._f_seq[(i - 1)].item())) self._x_seq = self.x_seq[(:(i + 1), :)] self._f_seq = self.f_seq[:(i + 1)] return x self.logger.warning('Maximum iterations reached. Exiting.') return x
def minimize(self, x_init, args=(), **kwargs): '\n Interface to minimizers implementing\n min fun(x)\n s.t. constraint\n\n Parameters\n ----------\n x_init : CArray\n The initial input point.\n args : tuple, optional\n Extra arguments passed to the objective function and its gradient.\n\n Returns\n -------\n f_seq : CArray\n Array containing values of f during optimization.\n x_seq : CArray\n Array containing values of x during optimization.\n\n ' if (len(kwargs) != 0): raise ValueError('{:} does not accept additional parameters.'.format(self.__class__.__name__)) self._f.reset_eval() self._fun.reset_eval() self._init_line_search(eta=self.eta, eta_min=self.eta_min, eta_max=self.eta_max, discrete=self.discrete) if ((self.constr is not None) and (self.constr.radius == 0)): x0 = self.constr.center self._x_seq = CArray.zeros((1, x0.size), sparse=x0.issparse, dtype=x0.dtype) self._f_seq = CArray.zeros(1) self._x_seq[(0, :)] = x0 self._f_seq[0] = self._fun.fun(x0, *args) self._x_opt = x0 return if ((self.bounds is not None) and self.bounds.is_violated(x_init)): x_init = self.bounds.projection(x_init) if ((self.constr is not None) and self.constr.is_violated(x_init)): x_init = self.constr.projection(x_init) if (((self.bounds is not None) and self.bounds.is_violated(x_init)) or ((self.constr is not None) and self.constr.is_violated(x_init))): raise ValueError((('x_init ' + str(x_init)) + ' is outside of feasible domain.')) self._x_seq = CArray.zeros((self.max_iter, x_init.size), sparse=x_init.issparse) if (self.discrete is True): self._x_seq.astype(x_init.dtype) self._f_seq = CArray.zeros(self.max_iter) x = x_init fx = self._fun.fun(x, *args) self._x_seq[(0, :)] = x self._f_seq[0] = fx self.logger.debug(((('Iter.: ' + str(0)) + ', f(x): ') + str(fx))) for i in range(1, self.max_iter): (x, fx) = self._xk(x, fx, *args) self._x_seq[(i, :)] = x self._f_seq[i] = fx self._x_opt = x self.logger.debug(((((('Iter.: ' + str(i)) + ', f(x): ') + str(fx)) + ', norm(gr(x)): ') + str(CArray(self._grad).norm()))) diff = abs((self.f_seq[i].item() - self.f_seq[(i - 1)].item())) if (diff < self.eps): self.logger.debug('Flat region, exiting... ({:.4f} / {:.4f})'.format(self._f_seq[i].item(), self._f_seq[(i - 1)].item())) self._x_seq = self.x_seq[(:(i + 1), :)] self._f_seq = self.f_seq[:(i + 1)] return x self.logger.warning('Maximum iterations reached. Exiting.') return x<|docstring|>Interface to minimizers implementing min fun(x) s.t. constraint Parameters ---------- x_init : CArray The initial input point. args : tuple, optional Extra arguments passed to the objective function and its gradient. Returns ------- f_seq : CArray Array containing values of f during optimization. x_seq : CArray Array containing values of x during optimization.<|endoftext|>
1e9378440af977e4dc572756a79950c2b2a33141ed1fec47fb9c2044763832b7
def get_status(self): '\n Return a string status for the given status event,\n used to display in an app row.\n ' raise NotImplementedError
Return a string status for the given status event, used to display in an app row.
apps/status/status_types.py
get_status
cr0mbly/TTGO-esp32-micropython-watch
6
python
def get_status(self): '\n Return a string status for the given status event,\n used to display in an app row.\n ' raise NotImplementedError
def get_status(self): '\n Return a string status for the given status event,\n used to display in an app row.\n ' raise NotImplementedError<|docstring|>Return a string status for the given status event, used to display in an app row.<|endoftext|>
6e5355d34c9e590d90324dc0ea7a92f8c04c8d28e8e5ff61b2745895e61a5a30
def __init__(__self__, resource_name, opts=None, allocated_storage=None, apply_immediately=None, auto_minor_version_upgrade=None, availability_zone=None, engine_version=None, kms_key_arn=None, multi_az=None, preferred_maintenance_window=None, publicly_accessible=None, replication_instance_class=None, replication_instance_id=None, replication_subnet_group_id=None, tags=None, vpc_security_group_ids=None, __name__=None, __opts__=None): '\n Provides a DMS (Data Migration Service) replication instance resource. DMS replication instances can be created, updated, deleted, and imported.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[float] allocated_storage: The amount of storage (in gigabytes) to be initially allocated for the replication instance.\n :param pulumi.Input[bool] apply_immediately: Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource.\n :param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window.\n :param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the replication instance will be created in.\n :param pulumi.Input[str] engine_version: The engine version number of the replication instance.\n :param pulumi.Input[str] kms_key_arn: The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.\n :param pulumi.Input[bool] multi_az: Specifies if the replication instance is a multi-az deployment. You cannot set the `availability_zone` parameter if the `multi_az` parameter is set to `true`.\n :param pulumi.Input[str] preferred_maintenance_window: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n :param pulumi.Input[bool] publicly_accessible: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address.\n :param pulumi.Input[str] replication_instance_class: The compute and memory capacity of the replication instance as specified by the replication instance class. Can be one of `dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge`\n :param pulumi.Input[str] replication_instance_id: The replication instance identifier. This parameter is stored as a lowercase string.\n :param pulumi.Input[str] replication_subnet_group_id: A subnet group to associate with the replication instance.\n :param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.\n :param pulumi.Input[list] vpc_security_group_ids: A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance.\n ' if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (not resource_name): raise TypeError('Missing resource name argument (for URN creation)') if (not isinstance(resource_name, str)): raise TypeError('Expected resource name to be a string') if (opts and (not isinstance(opts, pulumi.ResourceOptions))): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() __props__['allocated_storage'] = allocated_storage __props__['apply_immediately'] = apply_immediately __props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade __props__['availability_zone'] = availability_zone __props__['engine_version'] = engine_version __props__['kms_key_arn'] = kms_key_arn __props__['multi_az'] = multi_az __props__['preferred_maintenance_window'] = preferred_maintenance_window __props__['publicly_accessible'] = publicly_accessible if (replication_instance_class is None): raise TypeError("Missing required property 'replication_instance_class'") __props__['replication_instance_class'] = replication_instance_class if (replication_instance_id is None): raise TypeError("Missing required property 'replication_instance_id'") __props__['replication_instance_id'] = replication_instance_id __props__['replication_subnet_group_id'] = replication_subnet_group_id __props__['tags'] = tags __props__['vpc_security_group_ids'] = vpc_security_group_ids __props__['replication_instance_arn'] = None __props__['replication_instance_private_ips'] = None __props__['replication_instance_public_ips'] = None super(ReplicationInstance, __self__).__init__('aws:dms/replicationInstance:ReplicationInstance', resource_name, __props__, opts)
Provides a DMS (Data Migration Service) replication instance resource. DMS replication instances can be created, updated, deleted, and imported. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[float] allocated_storage: The amount of storage (in gigabytes) to be initially allocated for the replication instance. :param pulumi.Input[bool] apply_immediately: Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource. :param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window. :param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the replication instance will be created in. :param pulumi.Input[str] engine_version: The engine version number of the replication instance. :param pulumi.Input[str] kms_key_arn: The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. :param pulumi.Input[bool] multi_az: Specifies if the replication instance is a multi-az deployment. You cannot set the `availability_zone` parameter if the `multi_az` parameter is set to `true`. :param pulumi.Input[str] preferred_maintenance_window: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). :param pulumi.Input[bool] publicly_accessible: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. :param pulumi.Input[str] replication_instance_class: The compute and memory capacity of the replication instance as specified by the replication instance class. Can be one of `dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge` :param pulumi.Input[str] replication_instance_id: The replication instance identifier. This parameter is stored as a lowercase string. :param pulumi.Input[str] replication_subnet_group_id: A subnet group to associate with the replication instance. :param pulumi.Input[dict] tags: A mapping of tags to assign to the resource. :param pulumi.Input[list] vpc_security_group_ids: A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance.
sdk/python/pulumi_aws/dms/replication_instance.py
__init__
Charliekenney23/pulumi-aws
0
python
def __init__(__self__, resource_name, opts=None, allocated_storage=None, apply_immediately=None, auto_minor_version_upgrade=None, availability_zone=None, engine_version=None, kms_key_arn=None, multi_az=None, preferred_maintenance_window=None, publicly_accessible=None, replication_instance_class=None, replication_instance_id=None, replication_subnet_group_id=None, tags=None, vpc_security_group_ids=None, __name__=None, __opts__=None): '\n Provides a DMS (Data Migration Service) replication instance resource. DMS replication instances can be created, updated, deleted, and imported.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[float] allocated_storage: The amount of storage (in gigabytes) to be initially allocated for the replication instance.\n :param pulumi.Input[bool] apply_immediately: Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource.\n :param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window.\n :param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the replication instance will be created in.\n :param pulumi.Input[str] engine_version: The engine version number of the replication instance.\n :param pulumi.Input[str] kms_key_arn: The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.\n :param pulumi.Input[bool] multi_az: Specifies if the replication instance is a multi-az deployment. You cannot set the `availability_zone` parameter if the `multi_az` parameter is set to `true`.\n :param pulumi.Input[str] preferred_maintenance_window: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n :param pulumi.Input[bool] publicly_accessible: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address.\n :param pulumi.Input[str] replication_instance_class: The compute and memory capacity of the replication instance as specified by the replication instance class. Can be one of `dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge`\n :param pulumi.Input[str] replication_instance_id: The replication instance identifier. This parameter is stored as a lowercase string.\n :param pulumi.Input[str] replication_subnet_group_id: A subnet group to associate with the replication instance.\n :param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.\n :param pulumi.Input[list] vpc_security_group_ids: A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance.\n ' if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (not resource_name): raise TypeError('Missing resource name argument (for URN creation)') if (not isinstance(resource_name, str)): raise TypeError('Expected resource name to be a string') if (opts and (not isinstance(opts, pulumi.ResourceOptions))): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() __props__['allocated_storage'] = allocated_storage __props__['apply_immediately'] = apply_immediately __props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade __props__['availability_zone'] = availability_zone __props__['engine_version'] = engine_version __props__['kms_key_arn'] = kms_key_arn __props__['multi_az'] = multi_az __props__['preferred_maintenance_window'] = preferred_maintenance_window __props__['publicly_accessible'] = publicly_accessible if (replication_instance_class is None): raise TypeError("Missing required property 'replication_instance_class'") __props__['replication_instance_class'] = replication_instance_class if (replication_instance_id is None): raise TypeError("Missing required property 'replication_instance_id'") __props__['replication_instance_id'] = replication_instance_id __props__['replication_subnet_group_id'] = replication_subnet_group_id __props__['tags'] = tags __props__['vpc_security_group_ids'] = vpc_security_group_ids __props__['replication_instance_arn'] = None __props__['replication_instance_private_ips'] = None __props__['replication_instance_public_ips'] = None super(ReplicationInstance, __self__).__init__('aws:dms/replicationInstance:ReplicationInstance', resource_name, __props__, opts)
def __init__(__self__, resource_name, opts=None, allocated_storage=None, apply_immediately=None, auto_minor_version_upgrade=None, availability_zone=None, engine_version=None, kms_key_arn=None, multi_az=None, preferred_maintenance_window=None, publicly_accessible=None, replication_instance_class=None, replication_instance_id=None, replication_subnet_group_id=None, tags=None, vpc_security_group_ids=None, __name__=None, __opts__=None): '\n Provides a DMS (Data Migration Service) replication instance resource. DMS replication instances can be created, updated, deleted, and imported.\n \n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[float] allocated_storage: The amount of storage (in gigabytes) to be initially allocated for the replication instance.\n :param pulumi.Input[bool] apply_immediately: Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource.\n :param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window.\n :param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the replication instance will be created in.\n :param pulumi.Input[str] engine_version: The engine version number of the replication instance.\n :param pulumi.Input[str] kms_key_arn: The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.\n :param pulumi.Input[bool] multi_az: Specifies if the replication instance is a multi-az deployment. You cannot set the `availability_zone` parameter if the `multi_az` parameter is set to `true`.\n :param pulumi.Input[str] preferred_maintenance_window: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n :param pulumi.Input[bool] publicly_accessible: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address.\n :param pulumi.Input[str] replication_instance_class: The compute and memory capacity of the replication instance as specified by the replication instance class. Can be one of `dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge`\n :param pulumi.Input[str] replication_instance_id: The replication instance identifier. This parameter is stored as a lowercase string.\n :param pulumi.Input[str] replication_subnet_group_id: A subnet group to associate with the replication instance.\n :param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.\n :param pulumi.Input[list] vpc_security_group_ids: A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance.\n ' if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (not resource_name): raise TypeError('Missing resource name argument (for URN creation)') if (not isinstance(resource_name, str)): raise TypeError('Expected resource name to be a string') if (opts and (not isinstance(opts, pulumi.ResourceOptions))): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() __props__['allocated_storage'] = allocated_storage __props__['apply_immediately'] = apply_immediately __props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade __props__['availability_zone'] = availability_zone __props__['engine_version'] = engine_version __props__['kms_key_arn'] = kms_key_arn __props__['multi_az'] = multi_az __props__['preferred_maintenance_window'] = preferred_maintenance_window __props__['publicly_accessible'] = publicly_accessible if (replication_instance_class is None): raise TypeError("Missing required property 'replication_instance_class'") __props__['replication_instance_class'] = replication_instance_class if (replication_instance_id is None): raise TypeError("Missing required property 'replication_instance_id'") __props__['replication_instance_id'] = replication_instance_id __props__['replication_subnet_group_id'] = replication_subnet_group_id __props__['tags'] = tags __props__['vpc_security_group_ids'] = vpc_security_group_ids __props__['replication_instance_arn'] = None __props__['replication_instance_private_ips'] = None __props__['replication_instance_public_ips'] = None super(ReplicationInstance, __self__).__init__('aws:dms/replicationInstance:ReplicationInstance', resource_name, __props__, opts)<|docstring|>Provides a DMS (Data Migration Service) replication instance resource. DMS replication instances can be created, updated, deleted, and imported. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[float] allocated_storage: The amount of storage (in gigabytes) to be initially allocated for the replication instance. :param pulumi.Input[bool] apply_immediately: Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource. :param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window. :param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the replication instance will be created in. :param pulumi.Input[str] engine_version: The engine version number of the replication instance. :param pulumi.Input[str] kms_key_arn: The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. :param pulumi.Input[bool] multi_az: Specifies if the replication instance is a multi-az deployment. You cannot set the `availability_zone` parameter if the `multi_az` parameter is set to `true`. :param pulumi.Input[str] preferred_maintenance_window: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). :param pulumi.Input[bool] publicly_accessible: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. :param pulumi.Input[str] replication_instance_class: The compute and memory capacity of the replication instance as specified by the replication instance class. Can be one of `dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge` :param pulumi.Input[str] replication_instance_id: The replication instance identifier. This parameter is stored as a lowercase string. :param pulumi.Input[str] replication_subnet_group_id: A subnet group to associate with the replication instance. :param pulumi.Input[dict] tags: A mapping of tags to assign to the resource. :param pulumi.Input[list] vpc_security_group_ids: A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance.<|endoftext|>
672b848a439c34d108e57fb442143e8562d7ed1ca6d6eea6542ebc020288c1e4
def delete_statement_with_paper(instance, **kwargs): '\n Deletes statement file when paper object is deleted\n :param instance:\n :param kwargs:\n :return:\n ' instance.statement.delete()
Deletes statement file when paper object is deleted :param instance: :param kwargs: :return:
papers/models.py
delete_statement_with_paper
JakubPrzystasz/StronaProjektyKol
0
python
def delete_statement_with_paper(instance, **kwargs): '\n Deletes statement file when paper object is deleted\n :param instance:\n :param kwargs:\n :return:\n ' instance.statement.delete()
def delete_statement_with_paper(instance, **kwargs): '\n Deletes statement file when paper object is deleted\n :param instance:\n :param kwargs:\n :return:\n ' instance.statement.delete()<|docstring|>Deletes statement file when paper object is deleted :param instance: :param kwargs: :return:<|endoftext|>
685a78b252047a2f23a95212ebb465d4b5f7a7ae91000965fe6caec7e7c2938c
def delete_file_with_object(instance, **kwargs): '\n Deletes files from system when UploadedFile object is deleted from database\n :param instance: UploadedFile object (file that is being deleted)\n :param kwargs:\n :return:\n ' instance.file.delete()
Deletes files from system when UploadedFile object is deleted from database :param instance: UploadedFile object (file that is being deleted) :param kwargs: :return:
papers/models.py
delete_file_with_object
JakubPrzystasz/StronaProjektyKol
0
python
def delete_file_with_object(instance, **kwargs): '\n Deletes files from system when UploadedFile object is deleted from database\n :param instance: UploadedFile object (file that is being deleted)\n :param kwargs:\n :return:\n ' instance.file.delete()
def delete_file_with_object(instance, **kwargs): '\n Deletes files from system when UploadedFile object is deleted from database\n :param instance: UploadedFile object (file that is being deleted)\n :param kwargs:\n :return:\n ' instance.file.delete()<|docstring|>Deletes files from system when UploadedFile object is deleted from database :param instance: UploadedFile object (file that is being deleted) :param kwargs: :return:<|endoftext|>
a17acb3d72d7fda7995855a48c5bd4371b32fca3fe0cf109b1f709e872746c16
def parse(self): '\n Method to start the argument parsing.\n ' return self._arg_parser.parse_args()
Method to start the argument parsing.
prestans/devel/__init__.py
parse
anomaly/prestans
12
python
def parse(self): '\n \n ' return self._arg_parser.parse_args()
def parse(self): '\n \n ' return self._arg_parser.parse_args()<|docstring|>Method to start the argument parsing.<|endoftext|>
381ebf1ec64d735317885e1640f3a2bd0a4fe159b804d6cccc1cbcea29e5e924
def _add_generate_sub_commands(self): '\n Sub commands for generating models for usage by clients.\n Currently supports Google Closure.\n ' gen_parser = self._subparsers_handle.add_parser(name='gen', help='generate client side model stubs, filters') gen_parser.add_argument('-t', '--template', choices=['closure.model', 'closure.filter'], default='closure.model', required=True, dest='template', help='template to use for client side code generation') gen_parser.add_argument('-m', '--model', required=True, dest='models_definition', help='path to models definition file or package') gen_parser.add_argument('-o', '--output', default='.', dest='output', help='output path for generated code') gen_parser.add_argument('-n', '--namespace', required=True, dest='namespace', help='namespace to use with template e.g prestans.data.model') gen_parser.add_argument('-fn', '--filter-namespace', required=False, default=None, dest='filter_namespace', help='filter namespace to use with template e.g prestans.data.filter')
Sub commands for generating models for usage by clients. Currently supports Google Closure.
prestans/devel/__init__.py
_add_generate_sub_commands
anomaly/prestans
12
python
def _add_generate_sub_commands(self): '\n Sub commands for generating models for usage by clients.\n Currently supports Google Closure.\n ' gen_parser = self._subparsers_handle.add_parser(name='gen', help='generate client side model stubs, filters') gen_parser.add_argument('-t', '--template', choices=['closure.model', 'closure.filter'], default='closure.model', required=True, dest='template', help='template to use for client side code generation') gen_parser.add_argument('-m', '--model', required=True, dest='models_definition', help='path to models definition file or package') gen_parser.add_argument('-o', '--output', default='.', dest='output', help='output path for generated code') gen_parser.add_argument('-n', '--namespace', required=True, dest='namespace', help='namespace to use with template e.g prestans.data.model') gen_parser.add_argument('-fn', '--filter-namespace', required=False, default=None, dest='filter_namespace', help='filter namespace to use with template e.g prestans.data.filter')
def _add_generate_sub_commands(self): '\n Sub commands for generating models for usage by clients.\n Currently supports Google Closure.\n ' gen_parser = self._subparsers_handle.add_parser(name='gen', help='generate client side model stubs, filters') gen_parser.add_argument('-t', '--template', choices=['closure.model', 'closure.filter'], default='closure.model', required=True, dest='template', help='template to use for client side code generation') gen_parser.add_argument('-m', '--model', required=True, dest='models_definition', help='path to models definition file or package') gen_parser.add_argument('-o', '--output', default='.', dest='output', help='output path for generated code') gen_parser.add_argument('-n', '--namespace', required=True, dest='namespace', help='namespace to use with template e.g prestans.data.model') gen_parser.add_argument('-fn', '--filter-namespace', required=False, default=None, dest='filter_namespace', help='filter namespace to use with template e.g prestans.data.filter')<|docstring|>Sub commands for generating models for usage by clients. Currently supports Google Closure.<|endoftext|>
30d45bafbe2583d03387491bfddbbd88e7f999294d6e184d453f914277d99a15
def dispatch(self): "\n Start processing the user's commands.\n " if (self._args.sub_command == 'gen'): self._dispatch_gen()
Start processing the user's commands.
prestans/devel/__init__.py
dispatch
anomaly/prestans
12
python
def dispatch(self): "\n \n " if (self._args.sub_command == 'gen'): self._dispatch_gen()
def dispatch(self): "\n \n " if (self._args.sub_command == 'gen'): self._dispatch_gen()<|docstring|>Start processing the user's commands.<|endoftext|>
a60056401e026e83bbcde1fba0ad746b9d769322b171e97308991b7239d783f2
def _dispatch_gen(self): '\n Process the generate subset of commands.\n ' if (not os.path.isdir(self._args.output)): raise exception.Base(('%s is not a writeable directory' % self._args.output)) if (not os.path.isfile(self._args.models_definition)): if (not self.check_package_exists(self._args.models_definition)): raise exception.Base(('failed to locate package or models definitions file at: %s' % self._args.models_definition)) from prestans.devel.gen import Preplate preplate = Preplate(template_type=self._args.template, models_definition=self._args.models_definition, namespace=self._args.namespace, filter_namespace=self._args.filter_namespace, output_directory=self._args.output) preplate.run()
Process the generate subset of commands.
prestans/devel/__init__.py
_dispatch_gen
anomaly/prestans
12
python
def _dispatch_gen(self): '\n \n ' if (not os.path.isdir(self._args.output)): raise exception.Base(('%s is not a writeable directory' % self._args.output)) if (not os.path.isfile(self._args.models_definition)): if (not self.check_package_exists(self._args.models_definition)): raise exception.Base(('failed to locate package or models definitions file at: %s' % self._args.models_definition)) from prestans.devel.gen import Preplate preplate = Preplate(template_type=self._args.template, models_definition=self._args.models_definition, namespace=self._args.namespace, filter_namespace=self._args.filter_namespace, output_directory=self._args.output) preplate.run()
def _dispatch_gen(self): '\n \n ' if (not os.path.isdir(self._args.output)): raise exception.Base(('%s is not a writeable directory' % self._args.output)) if (not os.path.isfile(self._args.models_definition)): if (not self.check_package_exists(self._args.models_definition)): raise exception.Base(('failed to locate package or models definitions file at: %s' % self._args.models_definition)) from prestans.devel.gen import Preplate preplate = Preplate(template_type=self._args.template, models_definition=self._args.models_definition, namespace=self._args.namespace, filter_namespace=self._args.filter_namespace, output_directory=self._args.output) preplate.run()<|docstring|>Process the generate subset of commands.<|endoftext|>
d1080c8bc987c3e4c41a0ccb9300b187f5195390c773c81c6b49182fe9c70980
def parse_args(argv: Optional[Sequence[str]]) -> argparse.Namespace: 'Parse and return the parsed command line arguments.' parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--bind', dest='bind_address', default='127.0.0.1', help='Bind address') parser.add_argument('--ssid', dest='ssid', default='vtrust-flash', help='WiFi SSID') parser.add_argument('--password', dest='password', default='', help='Password for the network') parser.add_argument('--region', dest='region', default='US', help='WiFi Region') parser.add_argument('--token', dest='token', default='00000000', help='Token') parser.add_argument('--secret', dest='secret', default='0101', help='Secret') return parser.parse_args(argv)
Parse and return the parsed command line arguments.
smarthack/smartconfig/smartconfig.py
parse_args
paravoid/tuya-convert
0
python
def parse_args(argv: Optional[Sequence[str]]) -> argparse.Namespace: parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--bind', dest='bind_address', default='127.0.0.1', help='Bind address') parser.add_argument('--ssid', dest='ssid', default='vtrust-flash', help='WiFi SSID') parser.add_argument('--password', dest='password', default=, help='Password for the network') parser.add_argument('--region', dest='region', default='US', help='WiFi Region') parser.add_argument('--token', dest='token', default='00000000', help='Token') parser.add_argument('--secret', dest='secret', default='0101', help='Secret') return parser.parse_args(argv)
def parse_args(argv: Optional[Sequence[str]]) -> argparse.Namespace: parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--bind', dest='bind_address', default='127.0.0.1', help='Bind address') parser.add_argument('--ssid', dest='ssid', default='vtrust-flash', help='WiFi SSID') parser.add_argument('--password', dest='password', default=, help='Password for the network') parser.add_argument('--region', dest='region', default='US', help='WiFi Region') parser.add_argument('--token', dest='token', default='00000000', help='Token') parser.add_argument('--secret', dest='secret', default='0101', help='Secret') return parser.parse_args(argv)<|docstring|>Parse and return the parsed command line arguments.<|endoftext|>
ec7fcbe21fb94e548126a149b74365e00fad7415a91d929975613cdd8454e425
def smartconfig(bind_address: str, options: Tuple[(str, ...)]) -> None: 'Attempt to SmartConfig.\n\n Sends both broadcast and multicast packets, multiple times.\n ' (password, ssid, region, token, secret) = options sock = SmartConfigSocket(bind_address) token_group = ((region + token) + secret) broadcast_body = broadcast.encode_network(password, ssid, token_group) multicast_body = multicast.encode_network(password, ssid, token_group) for _ in range(40): sock.send_multicast(multicast.HEAD) sock.send_broadcast(broadcast.HEAD) for _ in range(10): sock.send_multicast(multicast.HEAD) sock.send_multicast(multicast_body) sock.send_broadcast(broadcast_body)
Attempt to SmartConfig. Sends both broadcast and multicast packets, multiple times.
smarthack/smartconfig/smartconfig.py
smartconfig
paravoid/tuya-convert
0
python
def smartconfig(bind_address: str, options: Tuple[(str, ...)]) -> None: 'Attempt to SmartConfig.\n\n Sends both broadcast and multicast packets, multiple times.\n ' (password, ssid, region, token, secret) = options sock = SmartConfigSocket(bind_address) token_group = ((region + token) + secret) broadcast_body = broadcast.encode_network(password, ssid, token_group) multicast_body = multicast.encode_network(password, ssid, token_group) for _ in range(40): sock.send_multicast(multicast.HEAD) sock.send_broadcast(broadcast.HEAD) for _ in range(10): sock.send_multicast(multicast.HEAD) sock.send_multicast(multicast_body) sock.send_broadcast(broadcast_body)
def smartconfig(bind_address: str, options: Tuple[(str, ...)]) -> None: 'Attempt to SmartConfig.\n\n Sends both broadcast and multicast packets, multiple times.\n ' (password, ssid, region, token, secret) = options sock = SmartConfigSocket(bind_address) token_group = ((region + token) + secret) broadcast_body = broadcast.encode_network(password, ssid, token_group) multicast_body = multicast.encode_network(password, ssid, token_group) for _ in range(40): sock.send_multicast(multicast.HEAD) sock.send_broadcast(broadcast.HEAD) for _ in range(10): sock.send_multicast(multicast.HEAD) sock.send_multicast(multicast_body) sock.send_broadcast(broadcast_body)<|docstring|>Attempt to SmartConfig. Sends both broadcast and multicast packets, multiple times.<|endoftext|>
764e2b8d6b8fa838934908340e4640bcfd46c40c77527e4fa055fe149cb67922
def main(argv: Optional[Sequence[str]]=None) -> None: 'Entry point for CLI users.' logging.basicConfig(format='%(asctime)-15s %(name)s %(levelname)-8s %(message)s', level=logging.INFO) options = parse_args(argv) logger.info('Put the device in EZ config mode (LED should blink fast)') logger.info('Sending SSID %s', options.ssid) logger.info('Sending Password %s', options.password) logger.info('Sending Region %s', options.region) logger.info('Sending Token %s', options.token) logger.info('Sending Secret %s', options.secret) for attempt in range(1, ATTEMPTS): logger.info('Attempting SmartConfig, attempt %d/%d', attempt, ATTEMPTS) smartconfig(options.bind_address, (options.password, options.ssid, options.region, options.token, options.secret)) logger.info('SmartConfig completed.') logger.info('Auto retry in 3s...') time.sleep(3)
Entry point for CLI users.
smarthack/smartconfig/smartconfig.py
main
paravoid/tuya-convert
0
python
def main(argv: Optional[Sequence[str]]=None) -> None: logging.basicConfig(format='%(asctime)-15s %(name)s %(levelname)-8s %(message)s', level=logging.INFO) options = parse_args(argv) logger.info('Put the device in EZ config mode (LED should blink fast)') logger.info('Sending SSID %s', options.ssid) logger.info('Sending Password %s', options.password) logger.info('Sending Region %s', options.region) logger.info('Sending Token %s', options.token) logger.info('Sending Secret %s', options.secret) for attempt in range(1, ATTEMPTS): logger.info('Attempting SmartConfig, attempt %d/%d', attempt, ATTEMPTS) smartconfig(options.bind_address, (options.password, options.ssid, options.region, options.token, options.secret)) logger.info('SmartConfig completed.') logger.info('Auto retry in 3s...') time.sleep(3)
def main(argv: Optional[Sequence[str]]=None) -> None: logging.basicConfig(format='%(asctime)-15s %(name)s %(levelname)-8s %(message)s', level=logging.INFO) options = parse_args(argv) logger.info('Put the device in EZ config mode (LED should blink fast)') logger.info('Sending SSID %s', options.ssid) logger.info('Sending Password %s', options.password) logger.info('Sending Region %s', options.region) logger.info('Sending Token %s', options.token) logger.info('Sending Secret %s', options.secret) for attempt in range(1, ATTEMPTS): logger.info('Attempting SmartConfig, attempt %d/%d', attempt, ATTEMPTS) smartconfig(options.bind_address, (options.password, options.ssid, options.region, options.token, options.secret)) logger.info('SmartConfig completed.') logger.info('Auto retry in 3s...') time.sleep(3)<|docstring|>Entry point for CLI users.<|endoftext|>
0c90334a0e804a2f85f7d2fc91340b78f3342d2744839ebd9b587e5235aa1f94
def __init__(self, address: str, gap: float=GAP): 'Initialize an instance: create a socket, setsockopt() and bind.' self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, MULTICAST_TTL) self._socket.bind((address, 0)) self._gap = gap
Initialize an instance: create a socket, setsockopt() and bind.
smarthack/smartconfig/smartconfig.py
__init__
paravoid/tuya-convert
0
python
def __init__(self, address: str, gap: float=GAP): self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, MULTICAST_TTL) self._socket.bind((address, 0)) self._gap = gap
def __init__(self, address: str, gap: float=GAP): self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, MULTICAST_TTL) self._socket.bind((address, 0)) self._gap = gap<|docstring|>Initialize an instance: create a socket, setsockopt() and bind.<|endoftext|>
0a40d249164a5518caf5b1b5c61c3b8d8ff6cee0b5b55eed7eb290a64ee86a8a
def send_broadcast(self, data: List[int]) -> None: 'Send broadcast packets for the given data.\n\n This encodes the data to the packet length.\n ' for length in data: self._socket.sendto((b'\x00' * length), ('255.255.255.255', 30011)) time.sleep(self._gap)
Send broadcast packets for the given data. This encodes the data to the packet length.
smarthack/smartconfig/smartconfig.py
send_broadcast
paravoid/tuya-convert
0
python
def send_broadcast(self, data: List[int]) -> None: 'Send broadcast packets for the given data.\n\n This encodes the data to the packet length.\n ' for length in data: self._socket.sendto((b'\x00' * length), ('255.255.255.255', 30011)) time.sleep(self._gap)
def send_broadcast(self, data: List[int]) -> None: 'Send broadcast packets for the given data.\n\n This encodes the data to the packet length.\n ' for length in data: self._socket.sendto((b'\x00' * length), ('255.255.255.255', 30011)) time.sleep(self._gap)<|docstring|>Send broadcast packets for the given data. This encodes the data to the packet length.<|endoftext|>