body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
b10bc73b0ee8a52d35df78776b9f5ccebd2b05aaa267af1717833c7feb077211
def __init__(self, name, attributes): 'XML key with attributes.\n\n :param name: key name\n :param attributes: dictionary of attributes {name: value, ...}\n ' self.name = name self.attributes = dict(attributes)
XML key with attributes. :param name: key name :param attributes: dictionary of attributes {name: value, ...}
tests/testflows/helpers/common.py
__init__
psyoblade/ClickHouse
1
python
def __init__(self, name, attributes): 'XML key with attributes.\n\n :param name: key name\n :param attributes: dictionary of attributes {name: value, ...}\n ' self.name = name self.attributes = dict(attributes)
def __init__(self, name, attributes): 'XML key with attributes.\n\n :param name: key name\n :param attributes: dictionary of attributes {name: value, ...}\n ' self.name = name self.attributes = dict(attributes)<|docstring|>XML key with attributes. :param name: key name :param attributes: dictionary of attributes {name: value, ...}<|endoftext|>
5d10b428d950bde8cd555de682ad4612ba5f8b5beb89c14aa6630af8db1a6925
def check_preprocessed_config_is_updated(after_removal=False): 'Check that preprocessed config is updated.' started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{(' > /dev/null' if (not settings.debug) else '')}" while ((time.time() - started) < timeout): exitcode = node.command(command, steps=False).exitcode if after_removal: if (exitcode == 1): break elif (exitcode == 0): break time.sleep(1) if settings.debug: node.command(f'cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}') if after_removal: assert (exitcode == 1), error() else: assert (exitcode == 0), error()
Check that preprocessed config is updated.
tests/testflows/helpers/common.py
check_preprocessed_config_is_updated
psyoblade/ClickHouse
1
python
def check_preprocessed_config_is_updated(after_removal=False): started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{(' > /dev/null' if (not settings.debug) else )}" while ((time.time() - started) < timeout): exitcode = node.command(command, steps=False).exitcode if after_removal: if (exitcode == 1): break elif (exitcode == 0): break time.sleep(1) if settings.debug: node.command(f'cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}') if after_removal: assert (exitcode == 1), error() else: assert (exitcode == 0), error()
def check_preprocessed_config_is_updated(after_removal=False): started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{(' > /dev/null' if (not settings.debug) else )}" while ((time.time() - started) < timeout): exitcode = node.command(command, steps=False).exitcode if after_removal: if (exitcode == 1): break elif (exitcode == 0): break time.sleep(1) if settings.debug: node.command(f'cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}') if after_removal: assert (exitcode == 1), error() else: assert (exitcode == 0), error()<|docstring|>Check that preprocessed config is updated.<|endoftext|>
00e51979aced21356ec35267bca86e4525865b56f61cdd85f403046a682198d2
def wait_for_config_to_be_loaded(user=None): 'Wait for config to be loaded.' if restart: with When('I close terminal to the node to be restarted'): bash.close() with And('I stop ClickHouse to apply the config changes'): node.stop_clickhouse(safe=False) with And('I get the current log size'): cmd = node.cluster.command(None, f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log") logsize = cmd.output.split(' ')[0].strip() with And('I start ClickHouse back up'): node.start_clickhouse(user=user, wait_healthy=wait_healthy) with Then('I tail the log file from using previous log size as the offset'): bash.prompt = bash.__class__.prompt bash.open() bash.send(f'tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log') with Then('I wait for config reload message in the log file'): if restart: bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration", timeout=timeout) else: bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration", timeout=timeout)
Wait for config to be loaded.
tests/testflows/helpers/common.py
wait_for_config_to_be_loaded
psyoblade/ClickHouse
1
python
def wait_for_config_to_be_loaded(user=None): if restart: with When('I close terminal to the node to be restarted'): bash.close() with And('I stop ClickHouse to apply the config changes'): node.stop_clickhouse(safe=False) with And('I get the current log size'): cmd = node.cluster.command(None, f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log") logsize = cmd.output.split(' ')[0].strip() with And('I start ClickHouse back up'): node.start_clickhouse(user=user, wait_healthy=wait_healthy) with Then('I tail the log file from using previous log size as the offset'): bash.prompt = bash.__class__.prompt bash.open() bash.send(f'tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log') with Then('I wait for config reload message in the log file'): if restart: bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration", timeout=timeout) else: bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration", timeout=timeout)
def wait_for_config_to_be_loaded(user=None): if restart: with When('I close terminal to the node to be restarted'): bash.close() with And('I stop ClickHouse to apply the config changes'): node.stop_clickhouse(safe=False) with And('I get the current log size'): cmd = node.cluster.command(None, f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log") logsize = cmd.output.split(' ')[0].strip() with And('I start ClickHouse back up'): node.start_clickhouse(user=user, wait_healthy=wait_healthy) with Then('I tail the log file from using previous log size as the offset'): bash.prompt = bash.__class__.prompt bash.open() bash.send(f'tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log') with Then('I wait for config reload message in the log file'): if restart: bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration", timeout=timeout) else: bash.expect(f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration", timeout=timeout)<|docstring|>Wait for config to be loaded.<|endoftext|>
f88f7c87b375dbe97bf990053b588013b8eb736f20067d2e8a7ffd6d252bf4e6
def get(self, request: Request) -> Response: 'Get.' serializer = TradeDataParameterSerializer(data=request.query_params) serializer.is_valid(raise_exception=True) api(**serializer.validated_data) return Response({'ok': True})
Get.
cryptofeed_werks/views/trades.py
get
globophobe/fastapi-quant-candles
1
python
def get(self, request: Request) -> Response: serializer = TradeDataParameterSerializer(data=request.query_params) serializer.is_valid(raise_exception=True) api(**serializer.validated_data) return Response({'ok': True})
def get(self, request: Request) -> Response: serializer = TradeDataParameterSerializer(data=request.query_params) serializer.is_valid(raise_exception=True) api(**serializer.validated_data) return Response({'ok': True})<|docstring|>Get.<|endoftext|>
bb91afae5d884db50ae5c81dcb9e91b4a1fc54c15ef1c3b811602c8db85666b1
def async_trigger(hass, config, action): 'Listen for events based on configuration.' event = config.get(CONF_EVENT) offset = config.get(CONF_OFFSET) @callback def call_action(): 'Call action with right context.' hass.async_run_job(action, {'trigger': {'platform': 'sun', 'event': event, 'offset': offset}}) if (event == SUN_EVENT_SUNRISE): return async_track_sunrise(hass, call_action, offset) else: return async_track_sunset(hass, call_action, offset)
Listen for events based on configuration.
homeassistant/components/automation/sun.py
async_trigger
gwendalg/home-assistant
13
python
def async_trigger(hass, config, action): event = config.get(CONF_EVENT) offset = config.get(CONF_OFFSET) @callback def call_action(): 'Call action with right context.' hass.async_run_job(action, {'trigger': {'platform': 'sun', 'event': event, 'offset': offset}}) if (event == SUN_EVENT_SUNRISE): return async_track_sunrise(hass, call_action, offset) else: return async_track_sunset(hass, call_action, offset)
def async_trigger(hass, config, action): event = config.get(CONF_EVENT) offset = config.get(CONF_OFFSET) @callback def call_action(): 'Call action with right context.' hass.async_run_job(action, {'trigger': {'platform': 'sun', 'event': event, 'offset': offset}}) if (event == SUN_EVENT_SUNRISE): return async_track_sunrise(hass, call_action, offset) else: return async_track_sunset(hass, call_action, offset)<|docstring|>Listen for events based on configuration.<|endoftext|>
58d019a0fabc78f3e21b9912f8b57e215522f4b64cbd3ca2a9ebf2669d8afc0b
@callback def call_action(): 'Call action with right context.' hass.async_run_job(action, {'trigger': {'platform': 'sun', 'event': event, 'offset': offset}})
Call action with right context.
homeassistant/components/automation/sun.py
call_action
gwendalg/home-assistant
13
python
@callback def call_action(): hass.async_run_job(action, {'trigger': {'platform': 'sun', 'event': event, 'offset': offset}})
@callback def call_action(): hass.async_run_job(action, {'trigger': {'platform': 'sun', 'event': event, 'offset': offset}})<|docstring|>Call action with right context.<|endoftext|>
c9d0a0f61da1bf580fb3c12d8bc9adc4ef2e5ab57151d1da5ad1488b7c764e94
def _validate_syntax(quantsim_config: ConfigDictType): '\n Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss.\n :param quantsim_config: Configuration dictionary to validate\n ' validate(quantsim_config, schema=QUANTSIM_CONFIG_SCHEMA)
Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss. :param quantsim_config: Configuration dictionary to validate
TrainingExtensions/common/src/python/aimet_common/quantsim_config/json_config_importer.py
_validate_syntax
styler00dollar/Colab-docker-aimet
945
python
def _validate_syntax(quantsim_config: ConfigDictType): '\n Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss.\n :param quantsim_config: Configuration dictionary to validate\n ' validate(quantsim_config, schema=QUANTSIM_CONFIG_SCHEMA)
def _validate_syntax(quantsim_config: ConfigDictType): '\n Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss.\n :param quantsim_config: Configuration dictionary to validate\n ' validate(quantsim_config, schema=QUANTSIM_CONFIG_SCHEMA)<|docstring|>Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss. :param quantsim_config: Configuration dictionary to validate<|endoftext|>
67339f15551b7adb174ed7e2cd79b2a60b788fcee945bf368b327a2aff9821c4
def _validate_semantics(quantsim_config: ConfigDictType): '\n Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss.\n :param quantsim_config: Configuration dictionary to validate\n ' default_op_configs = quantsim_config[ConfigDictKeys.DEFAULTS][ConfigDictKeys.OPS] if (ConfigDictKeys.IS_INPUT_QUANTIZED in default_op_configs): logger.error('Currently IS_INPUT_QUANTIZED setting in default configs is not supported') raise NotImplementedError if ((ConfigDictKeys.IS_OUTPUT_QUANTIZED in default_op_configs) and (not default_op_configs[ConfigDictKeys.IS_OUTPUT_QUANTIZED])): logger.error('Currently IS_OUTPUT_QUANTIZED false setting in default configs is not supported') raise NotImplementedError op_type_configs = quantsim_config[ConfigDictKeys.OP_TYPE] for op_type_config in op_type_configs.values(): if ((ConfigDictKeys.IS_INPUT_QUANTIZED in op_type_config) and (not op_type_config[ConfigDictKeys.IS_INPUT_QUANTIZED])): logger.error('IS_INPUT_QUANTIZED false in op configs is currently unsupported.') raise NotImplementedError model_input_configs = quantsim_config[ConfigDictKeys.MODEL_INPUT] if (ConfigDictKeys.IS_INPUT_QUANTIZED in model_input_configs): if (not model_input_configs[ConfigDictKeys.IS_INPUT_QUANTIZED]): logger.error('IS_INPUT_QUANTIZED for model input can only be set to True') raise NotImplementedError model_output_configs = quantsim_config[ConfigDictKeys.MODEL_OUTPUT] if (ConfigDictKeys.IS_OUTPUT_QUANTIZED in model_output_configs): if (not model_output_configs[ConfigDictKeys.IS_OUTPUT_QUANTIZED]): logger.error('IS_OUTPUT_QUANTIZED for model output can only be set to True') raise NotImplementedError
Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss. :param quantsim_config: Configuration dictionary to validate
TrainingExtensions/common/src/python/aimet_common/quantsim_config/json_config_importer.py
_validate_semantics
styler00dollar/Colab-docker-aimet
945
python
def _validate_semantics(quantsim_config: ConfigDictType): '\n Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss.\n :param quantsim_config: Configuration dictionary to validate\n ' default_op_configs = quantsim_config[ConfigDictKeys.DEFAULTS][ConfigDictKeys.OPS] if (ConfigDictKeys.IS_INPUT_QUANTIZED in default_op_configs): logger.error('Currently IS_INPUT_QUANTIZED setting in default configs is not supported') raise NotImplementedError if ((ConfigDictKeys.IS_OUTPUT_QUANTIZED in default_op_configs) and (not default_op_configs[ConfigDictKeys.IS_OUTPUT_QUANTIZED])): logger.error('Currently IS_OUTPUT_QUANTIZED false setting in default configs is not supported') raise NotImplementedError op_type_configs = quantsim_config[ConfigDictKeys.OP_TYPE] for op_type_config in op_type_configs.values(): if ((ConfigDictKeys.IS_INPUT_QUANTIZED in op_type_config) and (not op_type_config[ConfigDictKeys.IS_INPUT_QUANTIZED])): logger.error('IS_INPUT_QUANTIZED false in op configs is currently unsupported.') raise NotImplementedError model_input_configs = quantsim_config[ConfigDictKeys.MODEL_INPUT] if (ConfigDictKeys.IS_INPUT_QUANTIZED in model_input_configs): if (not model_input_configs[ConfigDictKeys.IS_INPUT_QUANTIZED]): logger.error('IS_INPUT_QUANTIZED for model input can only be set to True') raise NotImplementedError model_output_configs = quantsim_config[ConfigDictKeys.MODEL_OUTPUT] if (ConfigDictKeys.IS_OUTPUT_QUANTIZED in model_output_configs): if (not model_output_configs[ConfigDictKeys.IS_OUTPUT_QUANTIZED]): logger.error('IS_OUTPUT_QUANTIZED for model output can only be set to True') raise NotImplementedError
def _validate_semantics(quantsim_config: ConfigDictType): '\n Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss.\n :param quantsim_config: Configuration dictionary to validate\n ' default_op_configs = quantsim_config[ConfigDictKeys.DEFAULTS][ConfigDictKeys.OPS] if (ConfigDictKeys.IS_INPUT_QUANTIZED in default_op_configs): logger.error('Currently IS_INPUT_QUANTIZED setting in default configs is not supported') raise NotImplementedError if ((ConfigDictKeys.IS_OUTPUT_QUANTIZED in default_op_configs) and (not default_op_configs[ConfigDictKeys.IS_OUTPUT_QUANTIZED])): logger.error('Currently IS_OUTPUT_QUANTIZED false setting in default configs is not supported') raise NotImplementedError op_type_configs = quantsim_config[ConfigDictKeys.OP_TYPE] for op_type_config in op_type_configs.values(): if ((ConfigDictKeys.IS_INPUT_QUANTIZED in op_type_config) and (not op_type_config[ConfigDictKeys.IS_INPUT_QUANTIZED])): logger.error('IS_INPUT_QUANTIZED false in op configs is currently unsupported.') raise NotImplementedError model_input_configs = quantsim_config[ConfigDictKeys.MODEL_INPUT] if (ConfigDictKeys.IS_INPUT_QUANTIZED in model_input_configs): if (not model_input_configs[ConfigDictKeys.IS_INPUT_QUANTIZED]): logger.error('IS_INPUT_QUANTIZED for model input can only be set to True') raise NotImplementedError model_output_configs = quantsim_config[ConfigDictKeys.MODEL_OUTPUT] if (ConfigDictKeys.IS_OUTPUT_QUANTIZED in model_output_configs): if (not model_output_configs[ConfigDictKeys.IS_OUTPUT_QUANTIZED]): logger.error('IS_OUTPUT_QUANTIZED for model output can only be set to True') raise NotImplementedError<|docstring|>Validate config dict syntax, ensuring keys and values are as expected. Throw an exception if anything is amiss. :param quantsim_config: Configuration dictionary to validate<|endoftext|>
f354c7d19404f1feff476b2ea7093cac39fec29217b91f79b3129cebbbb9f417
def _convert_configs_values_to_bool(dictionary: Dict): '\n Recursively traverse all key value pairs in dictionary and set any string values representing booleans to\n booleans.\n :param dictionary: Dictionary to set values to True or False if applicable\n ' for (key, value) in dictionary.items(): if (value == 'True'): dictionary[key] = True elif (value == 'False'): dictionary[key] = False elif isinstance(value, List): for item in value: if isinstance(item, Dict): _convert_configs_values_to_bool(item) elif isinstance(value, Dict): _convert_configs_values_to_bool(value) else: pass
Recursively traverse all key value pairs in dictionary and set any string values representing booleans to booleans. :param dictionary: Dictionary to set values to True or False if applicable
TrainingExtensions/common/src/python/aimet_common/quantsim_config/json_config_importer.py
_convert_configs_values_to_bool
styler00dollar/Colab-docker-aimet
945
python
def _convert_configs_values_to_bool(dictionary: Dict): '\n Recursively traverse all key value pairs in dictionary and set any string values representing booleans to\n booleans.\n :param dictionary: Dictionary to set values to True or False if applicable\n ' for (key, value) in dictionary.items(): if (value == 'True'): dictionary[key] = True elif (value == 'False'): dictionary[key] = False elif isinstance(value, List): for item in value: if isinstance(item, Dict): _convert_configs_values_to_bool(item) elif isinstance(value, Dict): _convert_configs_values_to_bool(value) else: pass
def _convert_configs_values_to_bool(dictionary: Dict): '\n Recursively traverse all key value pairs in dictionary and set any string values representing booleans to\n booleans.\n :param dictionary: Dictionary to set values to True or False if applicable\n ' for (key, value) in dictionary.items(): if (value == 'True'): dictionary[key] = True elif (value == 'False'): dictionary[key] = False elif isinstance(value, List): for item in value: if isinstance(item, Dict): _convert_configs_values_to_bool(item) elif isinstance(value, Dict): _convert_configs_values_to_bool(value) else: pass<|docstring|>Recursively traverse all key value pairs in dictionary and set any string values representing booleans to booleans. :param dictionary: Dictionary to set values to True or False if applicable<|endoftext|>
49ccf25b3d8370d698c3eee2516cd0b70a538785e4e0623ae9320007d7c50f6c
@classmethod def import_json_config_file(cls, config_file: str) -> ConfigDictType: '\n Import json config file, run syntax and semantic validation, and return configs as a dictionary\n :param config_file: Config file to parse\n :return: Quantsim configs dictionary\n ' try: with open(config_file) as configs: try: quantsim_configs = json.load(configs) except json.decoder.JSONDecodeError: logger.error('Error parsing json config file') raise AssertionError except IOError: logger.error('Could not open json config file') raise AssertionError _validate_syntax(quantsim_configs) _convert_configs_values_to_bool(quantsim_configs) _validate_semantics(quantsim_configs) return quantsim_configs
Import json config file, run syntax and semantic validation, and return configs as a dictionary :param config_file: Config file to parse :return: Quantsim configs dictionary
TrainingExtensions/common/src/python/aimet_common/quantsim_config/json_config_importer.py
import_json_config_file
styler00dollar/Colab-docker-aimet
945
python
@classmethod def import_json_config_file(cls, config_file: str) -> ConfigDictType: '\n Import json config file, run syntax and semantic validation, and return configs as a dictionary\n :param config_file: Config file to parse\n :return: Quantsim configs dictionary\n ' try: with open(config_file) as configs: try: quantsim_configs = json.load(configs) except json.decoder.JSONDecodeError: logger.error('Error parsing json config file') raise AssertionError except IOError: logger.error('Could not open json config file') raise AssertionError _validate_syntax(quantsim_configs) _convert_configs_values_to_bool(quantsim_configs) _validate_semantics(quantsim_configs) return quantsim_configs
@classmethod def import_json_config_file(cls, config_file: str) -> ConfigDictType: '\n Import json config file, run syntax and semantic validation, and return configs as a dictionary\n :param config_file: Config file to parse\n :return: Quantsim configs dictionary\n ' try: with open(config_file) as configs: try: quantsim_configs = json.load(configs) except json.decoder.JSONDecodeError: logger.error('Error parsing json config file') raise AssertionError except IOError: logger.error('Could not open json config file') raise AssertionError _validate_syntax(quantsim_configs) _convert_configs_values_to_bool(quantsim_configs) _validate_semantics(quantsim_configs) return quantsim_configs<|docstring|>Import json config file, run syntax and semantic validation, and return configs as a dictionary :param config_file: Config file to parse :return: Quantsim configs dictionary<|endoftext|>
bd7068e46a35216a4f41e987039b099afea70f83a4296603cd5655a11229baf2
def simplify_graph(graph): 'Optional method to simplify graph by removing dummy points.' change = True while change: change = False for (pos, succs) in graph.items(): n = len(succs) if (n == 2): ((p1, d1), (p2, d2)) = succs.items() del graph[pos] del graph[p1][pos] del graph[p2][pos] graph[p1][p2] = (d1 + d2) graph[p2][p1] = (d1 + d2) change = True break return graph
Optional method to simplify graph by removing dummy points.
python/2019/day20.py
simplify_graph
SylvainDe/aoc
0
python
def simplify_graph(graph): change = True while change: change = False for (pos, succs) in graph.items(): n = len(succs) if (n == 2): ((p1, d1), (p2, d2)) = succs.items() del graph[pos] del graph[p1][pos] del graph[p2][pos] graph[p1][p2] = (d1 + d2) graph[p2][p1] = (d1 + d2) change = True break return graph
def simplify_graph(graph): change = True while change: change = False for (pos, succs) in graph.items(): n = len(succs) if (n == 2): ((p1, d1), (p2, d2)) = succs.items() del graph[pos] del graph[p1][pos] del graph[p2][pos] graph[p1][p2] = (d1 + d2) graph[p2][p1] = (d1 + d2) change = True break return graph<|docstring|>Optional method to simplify graph by removing dummy points.<|endoftext|>
17480eb93a7fa614f251b448c8f4159a815f92bee725b9181ddafd7e7760d089
@click.command('test_wheel') @click.option('--wheels-config', default=xdg_config_file(name='wheels.yml'), type=click.Path(file_okay=True, writable=False, resolve_path=True), help='Path to wheels config file') @click.option('-w', '--wheel-dir', default=getcwd(), type=click.Path(file_okay=False), help='Test wheels in WHEEL-DIR') @click.option('--osk', default=xdg_config_file(name='osk.txt'), type=click.Path(dir_okay=True, writable=False, resolve_path=False), help=('File containing OSK, if the guest requires it (default: %s)' % xdg_config_file(name='osk.txt'))) @click.option('-i', '--image', multiple=True, help='Name of image(s) (in wheels config) under which wheel is building') @click.option('--qemu-port', default=None, help='Connect to running QEMU instance on PORT') @click.argument('wheel') @pass_context def cli(ctx, wheels_config, wheel_dir, osk, image, qemu_port, wheel): ' Test a wheel.\n ' if (not image): fatal('At least one image must be specified') try: ran_tests = False wheel_dir = abspath(wheel_dir) for forge in build_forges(ctx.config, wheels_config, wheel, images=image, osk_file=osk, qemu_port=qemu_port): info("Testing wheel on image '%s': %s", forge.image.name, wheel) names = forge.get_expected_names() debug('Expecting wheel files in %s:\n %s', wheel_dir, '\n '.join(names)) for (py, name) in zip(forge.image.pythons, names): _test_wheel(forge, py, name, forge.wheel_config.skip_tests, wheel_dir) ran_tests = True assert ran_tests, 'No tests ran' except KeyError: fatal('Package not found in %s: %s', wheels_config, wheel, exception=True) except Exception: fatal('Tests failed', exception=True)
Test a wheel.
starforge/commands/cmd_test_wheel.py
cli
natefoo/starforge
8
python
@click.command('test_wheel') @click.option('--wheels-config', default=xdg_config_file(name='wheels.yml'), type=click.Path(file_okay=True, writable=False, resolve_path=True), help='Path to wheels config file') @click.option('-w', '--wheel-dir', default=getcwd(), type=click.Path(file_okay=False), help='Test wheels in WHEEL-DIR') @click.option('--osk', default=xdg_config_file(name='osk.txt'), type=click.Path(dir_okay=True, writable=False, resolve_path=False), help=('File containing OSK, if the guest requires it (default: %s)' % xdg_config_file(name='osk.txt'))) @click.option('-i', '--image', multiple=True, help='Name of image(s) (in wheels config) under which wheel is building') @click.option('--qemu-port', default=None, help='Connect to running QEMU instance on PORT') @click.argument('wheel') @pass_context def cli(ctx, wheels_config, wheel_dir, osk, image, qemu_port, wheel): ' \n ' if (not image): fatal('At least one image must be specified') try: ran_tests = False wheel_dir = abspath(wheel_dir) for forge in build_forges(ctx.config, wheels_config, wheel, images=image, osk_file=osk, qemu_port=qemu_port): info("Testing wheel on image '%s': %s", forge.image.name, wheel) names = forge.get_expected_names() debug('Expecting wheel files in %s:\n %s', wheel_dir, '\n '.join(names)) for (py, name) in zip(forge.image.pythons, names): _test_wheel(forge, py, name, forge.wheel_config.skip_tests, wheel_dir) ran_tests = True assert ran_tests, 'No tests ran' except KeyError: fatal('Package not found in %s: %s', wheels_config, wheel, exception=True) except Exception: fatal('Tests failed', exception=True)
@click.command('test_wheel') @click.option('--wheels-config', default=xdg_config_file(name='wheels.yml'), type=click.Path(file_okay=True, writable=False, resolve_path=True), help='Path to wheels config file') @click.option('-w', '--wheel-dir', default=getcwd(), type=click.Path(file_okay=False), help='Test wheels in WHEEL-DIR') @click.option('--osk', default=xdg_config_file(name='osk.txt'), type=click.Path(dir_okay=True, writable=False, resolve_path=False), help=('File containing OSK, if the guest requires it (default: %s)' % xdg_config_file(name='osk.txt'))) @click.option('-i', '--image', multiple=True, help='Name of image(s) (in wheels config) under which wheel is building') @click.option('--qemu-port', default=None, help='Connect to running QEMU instance on PORT') @click.argument('wheel') @pass_context def cli(ctx, wheels_config, wheel_dir, osk, image, qemu_port, wheel): ' \n ' if (not image): fatal('At least one image must be specified') try: ran_tests = False wheel_dir = abspath(wheel_dir) for forge in build_forges(ctx.config, wheels_config, wheel, images=image, osk_file=osk, qemu_port=qemu_port): info("Testing wheel on image '%s': %s", forge.image.name, wheel) names = forge.get_expected_names() debug('Expecting wheel files in %s:\n %s', wheel_dir, '\n '.join(names)) for (py, name) in zip(forge.image.pythons, names): _test_wheel(forge, py, name, forge.wheel_config.skip_tests, wheel_dir) ran_tests = True assert ran_tests, 'No tests ran' except KeyError: fatal('Package not found in %s: %s', wheels_config, wheel, exception=True) except Exception: fatal('Tests failed', exception=True)<|docstring|>Test a wheel.<|endoftext|>
e24a65e8346a0e3ebc5f20f5e8da332c4d3dd0c9abc1261166905f0ddc5f0bc7
def nan_to_num(prediction): ' Function converts nan values to numerical\n\n :return prediction: prediction without nans\n ' if np.array([np.isnan(_) for _ in prediction]).any(): prediction = np.nan_to_num(prediction) return prediction
Function converts nan values to numerical :return prediction: prediction without nans
fedot/core/operations/evaluation/operation_implementations/models/discriminant_analysis.py
nan_to_num
bahia14/Fedot_Times_Series_Forecast
358
python
def nan_to_num(prediction): ' Function converts nan values to numerical\n\n :return prediction: prediction without nans\n ' if np.array([np.isnan(_) for _ in prediction]).any(): prediction = np.nan_to_num(prediction) return prediction
def nan_to_num(prediction): ' Function converts nan values to numerical\n\n :return prediction: prediction without nans\n ' if np.array([np.isnan(_) for _ in prediction]).any(): prediction = np.nan_to_num(prediction) return prediction<|docstring|>Function converts nan values to numerical :return prediction: prediction without nans<|endoftext|>
4f78cdb2b37878d3a1860b59a988bbdb7494e370083dfaa89aa0685e43bbadb7
def fit(self, train_data): ' Method fit model on a dataset\n\n :param train_data: data to train the model\n ' self.model.fit(train_data.features, train_data.target) return self.model
Method fit model on a dataset :param train_data: data to train the model
fedot/core/operations/evaluation/operation_implementations/models/discriminant_analysis.py
fit
bahia14/Fedot_Times_Series_Forecast
358
python
def fit(self, train_data): ' Method fit model on a dataset\n\n :param train_data: data to train the model\n ' self.model.fit(train_data.features, train_data.target) return self.model
def fit(self, train_data): ' Method fit model on a dataset\n\n :param train_data: data to train the model\n ' self.model.fit(train_data.features, train_data.target) return self.model<|docstring|>Method fit model on a dataset :param train_data: data to train the model<|endoftext|>
934d0a671693c5cb2847d46a7215e4ebf747a805ac6d450df35ecc9e351563f6
def predict(self, input_data, is_fit_pipeline_stage: Optional[bool]=None): ' Method make prediction with labels of classes\n\n :param input_data: data with features to process\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n ' prediction = self.model.predict(input_data.features) prediction = nan_to_num(prediction) return prediction
Method make prediction with labels of classes :param input_data: data with features to process :param is_fit_pipeline_stage: is this fit or predict stage for pipeline
fedot/core/operations/evaluation/operation_implementations/models/discriminant_analysis.py
predict
bahia14/Fedot_Times_Series_Forecast
358
python
def predict(self, input_data, is_fit_pipeline_stage: Optional[bool]=None): ' Method make prediction with labels of classes\n\n :param input_data: data with features to process\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n ' prediction = self.model.predict(input_data.features) prediction = nan_to_num(prediction) return prediction
def predict(self, input_data, is_fit_pipeline_stage: Optional[bool]=None): ' Method make prediction with labels of classes\n\n :param input_data: data with features to process\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n ' prediction = self.model.predict(input_data.features) prediction = nan_to_num(prediction) return prediction<|docstring|>Method make prediction with labels of classes :param input_data: data with features to process :param is_fit_pipeline_stage: is this fit or predict stage for pipeline<|endoftext|>
3dba83b15140e7c92538b8e8220c7d752ff57c3c957ddcbe0f69e63094648600
def predict_proba(self, input_data): ' Method make prediction with probabilities of classes\n\n :param input_data: data with features to process\n ' prediction = self.model.predict_proba(input_data.features) prediction = nan_to_num(prediction) return prediction
Method make prediction with probabilities of classes :param input_data: data with features to process
fedot/core/operations/evaluation/operation_implementations/models/discriminant_analysis.py
predict_proba
bahia14/Fedot_Times_Series_Forecast
358
python
def predict_proba(self, input_data): ' Method make prediction with probabilities of classes\n\n :param input_data: data with features to process\n ' prediction = self.model.predict_proba(input_data.features) prediction = nan_to_num(prediction) return prediction
def predict_proba(self, input_data): ' Method make prediction with probabilities of classes\n\n :param input_data: data with features to process\n ' prediction = self.model.predict_proba(input_data.features) prediction = nan_to_num(prediction) return prediction<|docstring|>Method make prediction with probabilities of classes :param input_data: data with features to process<|endoftext|>
2eb697b7aa19d8a50ccdb0c16fc6e0fe2be889ba82e78fe5fbad0eb0ae6fe5ea
def get_params(self): ' Method return parameters, which can be optimized for particular\n operation\n ' return self.model.get_params()
Method return parameters, which can be optimized for particular operation
fedot/core/operations/evaluation/operation_implementations/models/discriminant_analysis.py
get_params
bahia14/Fedot_Times_Series_Forecast
358
python
def get_params(self): ' Method return parameters, which can be optimized for particular\n operation\n ' return self.model.get_params()
def get_params(self): ' Method return parameters, which can be optimized for particular\n operation\n ' return self.model.get_params()<|docstring|>Method return parameters, which can be optimized for particular operation<|endoftext|>
9c817774916e37d7da38cfbb662015cd5a00efebf98ff20cfc36582c5646606e
def l2_regularization(W, reg_strength): '\n Computes L2 regularization loss on weights and its gradient\n\n Arguments:\n W, np array - weights\n reg_strength - float value\n\n Returns:\n loss, single value - l2 regularization loss\n gradient, np.array same shape as W - gradient of weight by l2 loss\n ' raise Exception('Not implemented!') return (loss, grad)
Computes L2 regularization loss on weights and its gradient Arguments: W, np array - weights reg_strength - float value Returns: loss, single value - l2 regularization loss gradient, np.array same shape as W - gradient of weight by l2 loss
assignments/assignment3/layers.py
l2_regularization
Unattend/dlcourse_ai
0
python
def l2_regularization(W, reg_strength): '\n Computes L2 regularization loss on weights and its gradient\n\n Arguments:\n W, np array - weights\n reg_strength - float value\n\n Returns:\n loss, single value - l2 regularization loss\n gradient, np.array same shape as W - gradient of weight by l2 loss\n ' raise Exception('Not implemented!') return (loss, grad)
def l2_regularization(W, reg_strength): '\n Computes L2 regularization loss on weights and its gradient\n\n Arguments:\n W, np array - weights\n reg_strength - float value\n\n Returns:\n loss, single value - l2 regularization loss\n gradient, np.array same shape as W - gradient of weight by l2 loss\n ' raise Exception('Not implemented!') return (loss, grad)<|docstring|>Computes L2 regularization loss on weights and its gradient Arguments: W, np array - weights reg_strength - float value Returns: loss, single value - l2 regularization loss gradient, np.array same shape as W - gradient of weight by l2 loss<|endoftext|>
4c79543118320f49e0f5ae7f23a3ab4d8a9878cb09d6d148ce158976f35755e1
def softmax_with_cross_entropy(preds, target_index): '\n Computes softmax and cross-entropy loss for model predictions,\n including the gradient\n\n Arguments:\n predictions, np array, shape is either (N) or (batch_size, N) -\n classifier output\n target_index: np array of int, shape is (1) or (batch_size) -\n index of the true class for given sample(s)\n\n Returns:\n loss, single value - cross-entropy loss\n dprediction, np array same shape as predictions - gradient of predictions by loss value\n ' prob = (- np.exp((preds - np.max(preds, axis=1, keepdims=True)))) prob /= np.sum(prob, axis=1, keepdims=True) probs = prob.reshape(preds.shape) t1 = np.take_along_axis(probs, (target_index.reshape((- 1), 1) - 1), axis=1) loss = (- np.log(t1)) for (i, k) in enumerate(target_index): probs[(i, (k - 1))] -= 1 probs /= len(target_index) return (loss.mean(), probs)
Computes softmax and cross-entropy loss for model predictions, including the gradient Arguments: predictions, np array, shape is either (N) or (batch_size, N) - classifier output target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss, single value - cross-entropy loss dprediction, np array same shape as predictions - gradient of predictions by loss value
assignments/assignment3/layers.py
softmax_with_cross_entropy
Unattend/dlcourse_ai
0
python
def softmax_with_cross_entropy(preds, target_index): '\n Computes softmax and cross-entropy loss for model predictions,\n including the gradient\n\n Arguments:\n predictions, np array, shape is either (N) or (batch_size, N) -\n classifier output\n target_index: np array of int, shape is (1) or (batch_size) -\n index of the true class for given sample(s)\n\n Returns:\n loss, single value - cross-entropy loss\n dprediction, np array same shape as predictions - gradient of predictions by loss value\n ' prob = (- np.exp((preds - np.max(preds, axis=1, keepdims=True)))) prob /= np.sum(prob, axis=1, keepdims=True) probs = prob.reshape(preds.shape) t1 = np.take_along_axis(probs, (target_index.reshape((- 1), 1) - 1), axis=1) loss = (- np.log(t1)) for (i, k) in enumerate(target_index): probs[(i, (k - 1))] -= 1 probs /= len(target_index) return (loss.mean(), probs)
def softmax_with_cross_entropy(preds, target_index): '\n Computes softmax and cross-entropy loss for model predictions,\n including the gradient\n\n Arguments:\n predictions, np array, shape is either (N) or (batch_size, N) -\n classifier output\n target_index: np array of int, shape is (1) or (batch_size) -\n index of the true class for given sample(s)\n\n Returns:\n loss, single value - cross-entropy loss\n dprediction, np array same shape as predictions - gradient of predictions by loss value\n ' prob = (- np.exp((preds - np.max(preds, axis=1, keepdims=True)))) prob /= np.sum(prob, axis=1, keepdims=True) probs = prob.reshape(preds.shape) t1 = np.take_along_axis(probs, (target_index.reshape((- 1), 1) - 1), axis=1) loss = (- np.log(t1)) for (i, k) in enumerate(target_index): probs[(i, (k - 1))] -= 1 probs /= len(target_index) return (loss.mean(), probs)<|docstring|>Computes softmax and cross-entropy loss for model predictions, including the gradient Arguments: predictions, np array, shape is either (N) or (batch_size, N) - classifier output target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss, single value - cross-entropy loss dprediction, np array same shape as predictions - gradient of predictions by loss value<|endoftext|>
a9c902b4dc2ea293848e2806b990b2a87d98b81af8dc3935318476abf8cab26a
def __init__(self, in_channels, out_channels, filter_size, padding): "\n Initializes the layer\n \n Arguments:\n in_channels, int - number of input channels\n out_channels, int - number of output channels\n filter_size, int - size of the conv filter\n padding, int - number of 'pixels' to pad on each side\n " self.filter_size = filter_size self.in_channels = in_channels self.out_channels = out_channels self.W = Param(np.random.randn(filter_size, filter_size, in_channels, out_channels)) self.B = Param(np.zeros(out_channels)) self.padding = padding self.X_with_pad = None
Initializes the layer Arguments: in_channels, int - number of input channels out_channels, int - number of output channels filter_size, int - size of the conv filter padding, int - number of 'pixels' to pad on each side
assignments/assignment3/layers.py
__init__
Unattend/dlcourse_ai
0
python
def __init__(self, in_channels, out_channels, filter_size, padding): "\n Initializes the layer\n \n Arguments:\n in_channels, int - number of input channels\n out_channels, int - number of output channels\n filter_size, int - size of the conv filter\n padding, int - number of 'pixels' to pad on each side\n " self.filter_size = filter_size self.in_channels = in_channels self.out_channels = out_channels self.W = Param(np.random.randn(filter_size, filter_size, in_channels, out_channels)) self.B = Param(np.zeros(out_channels)) self.padding = padding self.X_with_pad = None
def __init__(self, in_channels, out_channels, filter_size, padding): "\n Initializes the layer\n \n Arguments:\n in_channels, int - number of input channels\n out_channels, int - number of output channels\n filter_size, int - size of the conv filter\n padding, int - number of 'pixels' to pad on each side\n " self.filter_size = filter_size self.in_channels = in_channels self.out_channels = out_channels self.W = Param(np.random.randn(filter_size, filter_size, in_channels, out_channels)) self.B = Param(np.zeros(out_channels)) self.padding = padding self.X_with_pad = None<|docstring|>Initializes the layer Arguments: in_channels, int - number of input channels out_channels, int - number of output channels filter_size, int - size of the conv filter padding, int - number of 'pixels' to pad on each side<|endoftext|>
a3f1998be95d724d5766b8a4f7728a37709576df897091336aa083e48e1d44d8
def __init__(self, pool_size, stride): '\n Initializes the max pool\n\n Arguments:\n pool_size, int - area to pool\n stride, int - step size between pooling windows\n ' self.pool_size = pool_size self.stride = stride self.X = None
Initializes the max pool Arguments: pool_size, int - area to pool stride, int - step size between pooling windows
assignments/assignment3/layers.py
__init__
Unattend/dlcourse_ai
0
python
def __init__(self, pool_size, stride): '\n Initializes the max pool\n\n Arguments:\n pool_size, int - area to pool\n stride, int - step size between pooling windows\n ' self.pool_size = pool_size self.stride = stride self.X = None
def __init__(self, pool_size, stride): '\n Initializes the max pool\n\n Arguments:\n pool_size, int - area to pool\n stride, int - step size between pooling windows\n ' self.pool_size = pool_size self.stride = stride self.X = None<|docstring|>Initializes the max pool Arguments: pool_size, int - area to pool stride, int - step size between pooling windows<|endoftext|>
4f43350bec69324df318b3d31052195fad81d78a2c81990d81636b050c99dec0
def __init__(self, genetic_table='default', start_codon=None, translation=None, location=None, boost=1.0): 'Initialize.' self.translation = translation location = Location.from_data(location) if ((location is not None) and (location.strand not in [(- 1), 1])): location = Location(location.start, location.end, 1) self.set_location(location) self.start_codon = start_codon self.boost = boost if (genetic_table == 'default'): genetic_table = self.default_genetic_table self.genetic_table = genetic_table self.initialize_translation_from_problem = (translation is None) self.initialize_location_from_problem = (location is None) self.backtranslation_table = get_backtranslation_table(genetic_table)
Initialize.
dnachisel/builtin_specifications/EnforceTranslation.py
__init__
Edinburgh-Genome-Foundry/DnaChisel
124
python
def __init__(self, genetic_table='default', start_codon=None, translation=None, location=None, boost=1.0): self.translation = translation location = Location.from_data(location) if ((location is not None) and (location.strand not in [(- 1), 1])): location = Location(location.start, location.end, 1) self.set_location(location) self.start_codon = start_codon self.boost = boost if (genetic_table == 'default'): genetic_table = self.default_genetic_table self.genetic_table = genetic_table self.initialize_translation_from_problem = (translation is None) self.initialize_location_from_problem = (location is None) self.backtranslation_table = get_backtranslation_table(genetic_table)
def __init__(self, genetic_table='default', start_codon=None, translation=None, location=None, boost=1.0): self.translation = translation location = Location.from_data(location) if ((location is not None) and (location.strand not in [(- 1), 1])): location = Location(location.start, location.end, 1) self.set_location(location) self.start_codon = start_codon self.boost = boost if (genetic_table == 'default'): genetic_table = self.default_genetic_table self.genetic_table = genetic_table self.initialize_translation_from_problem = (translation is None) self.initialize_location_from_problem = (location is None) self.backtranslation_table = get_backtranslation_table(genetic_table)<|docstring|>Initialize.<|endoftext|>
27702806f835654574db6d23412e468cd1c4200ec0d164cca5052121da2f1ce2
def set_location(self, location): 'Check that the location length is valid before setting it.' if (location is not None): if (len(location) % 3): raise ValueError(('Location length in Codon Specifications should be a 3x. Location %s has length %d' % (location, len(location)))) if ((self.translation is not None) and (len(location) != (3 * len(self.translation)))): raise ValueError(('Window size (%d bp) incompatible with translation (%d aa)' % (len(location), len(self.translation)))) self.location = location
Check that the location length is valid before setting it.
dnachisel/builtin_specifications/EnforceTranslation.py
set_location
Edinburgh-Genome-Foundry/DnaChisel
124
python
def set_location(self, location): if (location is not None): if (len(location) % 3): raise ValueError(('Location length in Codon Specifications should be a 3x. Location %s has length %d' % (location, len(location)))) if ((self.translation is not None) and (len(location) != (3 * len(self.translation)))): raise ValueError(('Window size (%d bp) incompatible with translation (%d aa)' % (len(location), len(self.translation)))) self.location = location
def set_location(self, location): if (location is not None): if (len(location) % 3): raise ValueError(('Location length in Codon Specifications should be a 3x. Location %s has length %d' % (location, len(location)))) if ((self.translation is not None) and (len(location) != (3 * len(self.translation)))): raise ValueError(('Window size (%d bp) incompatible with translation (%d aa)' % (len(location), len(self.translation)))) self.location = location<|docstring|>Check that the location length is valid before setting it.<|endoftext|>
467e6b4a33d9059ed9c34d85f1b6cc300760e6eca81e25abac1a2d15d30798b9
def initialized_on_problem(self, problem, role): 'Get translation from the sequence if it is not already set.' result = self._copy_with_full_span_if_no_location(problem) if (result.translation is None): subsequence = result.location.extract_sequence(problem.sequence) translation = translate(subsequence, table=result.genetic_table, assume_start_codon=(result.start_codon is not None)) result = result.copy_with_changes(translation=translation) if (len(result.location) != (3 * len(result.translation))): raise ValueError(('Window size (%d bp) incompatible with translation (%d aa)' % (len(result.location), len(result.translation)))) if ((result.start_codon is not None) and (result.translation[0] != 'M')): raise ValueError(("Spec. %s specificies a start_codon parameter, but the translation at this location does not start with Met. Maybe you should provide a 'translation' parameter or set up a different Genetic Code." % result.label(use_short_form=True))) return result
Get translation from the sequence if it is not already set.
dnachisel/builtin_specifications/EnforceTranslation.py
initialized_on_problem
Edinburgh-Genome-Foundry/DnaChisel
124
python
def initialized_on_problem(self, problem, role): result = self._copy_with_full_span_if_no_location(problem) if (result.translation is None): subsequence = result.location.extract_sequence(problem.sequence) translation = translate(subsequence, table=result.genetic_table, assume_start_codon=(result.start_codon is not None)) result = result.copy_with_changes(translation=translation) if (len(result.location) != (3 * len(result.translation))): raise ValueError(('Window size (%d bp) incompatible with translation (%d aa)' % (len(result.location), len(result.translation)))) if ((result.start_codon is not None) and (result.translation[0] != 'M')): raise ValueError(("Spec. %s specificies a start_codon parameter, but the translation at this location does not start with Met. Maybe you should provide a 'translation' parameter or set up a different Genetic Code." % result.label(use_short_form=True))) return result
def initialized_on_problem(self, problem, role): result = self._copy_with_full_span_if_no_location(problem) if (result.translation is None): subsequence = result.location.extract_sequence(problem.sequence) translation = translate(subsequence, table=result.genetic_table, assume_start_codon=(result.start_codon is not None)) result = result.copy_with_changes(translation=translation) if (len(result.location) != (3 * len(result.translation))): raise ValueError(('Window size (%d bp) incompatible with translation (%d aa)' % (len(result.location), len(result.translation)))) if ((result.start_codon is not None) and (result.translation[0] != 'M')): raise ValueError(("Spec. %s specificies a start_codon parameter, but the translation at this location does not start with Met. Maybe you should provide a 'translation' parameter or set up a different Genetic Code." % result.label(use_short_form=True))) return result<|docstring|>Get translation from the sequence if it is not already set.<|endoftext|>
c38e2ee43995641a190429b3573d107dadfee697cf8edd7f50c5c7e3e705fe53
def evaluate(self, problem): 'Score is the number of wrong-translation codons.' location = (self.location if (self.location is not None) else Location(0, len(problem.sequence), 1)) subsequence = location.extract_sequence(problem.sequence) translation = translate(subsequence, table=self.genetic_table, assume_start_codon=(self.start_codon is not None)) errors_locations = [self.codon_index_to_location(index) for (index, amino_acid) in enumerate(translation) if (amino_acid != self.translation[index])] return SpecEvaluation(self, problem, score=(- len(errors_locations)), locations=errors_locations, message=('All OK.' if (len(errors_locations) == 0) else ('Wrong translation at locations %s' % errors_locations)))
Score is the number of wrong-translation codons.
dnachisel/builtin_specifications/EnforceTranslation.py
evaluate
Edinburgh-Genome-Foundry/DnaChisel
124
python
def evaluate(self, problem): location = (self.location if (self.location is not None) else Location(0, len(problem.sequence), 1)) subsequence = location.extract_sequence(problem.sequence) translation = translate(subsequence, table=self.genetic_table, assume_start_codon=(self.start_codon is not None)) errors_locations = [self.codon_index_to_location(index) for (index, amino_acid) in enumerate(translation) if (amino_acid != self.translation[index])] return SpecEvaluation(self, problem, score=(- len(errors_locations)), locations=errors_locations, message=('All OK.' if (len(errors_locations) == 0) else ('Wrong translation at locations %s' % errors_locations)))
def evaluate(self, problem): location = (self.location if (self.location is not None) else Location(0, len(problem.sequence), 1)) subsequence = location.extract_sequence(problem.sequence) translation = translate(subsequence, table=self.genetic_table, assume_start_codon=(self.start_codon is not None)) errors_locations = [self.codon_index_to_location(index) for (index, amino_acid) in enumerate(translation) if (amino_acid != self.translation[index])] return SpecEvaluation(self, problem, score=(- len(errors_locations)), locations=errors_locations, message=('All OK.' if (len(errors_locations) == 0) else ('Wrong translation at locations %s' % errors_locations)))<|docstring|>Score is the number of wrong-translation codons.<|endoftext|>
7e50bbe3bdb6b40e06c0f996666dd9cb8bce4d6d480444574ff405498b206d6f
def __init__(self, sequence, heuristic, max_mutation_rate): ' This class represents a single gene sequence.\n \n A gene sequence is composed of tasks. Each spot in the sequence,\n known as an allele, can be one of several genes specified by the \n Genome class.' self.sequence = sequence self.length = len(sequence) self.heuristic = heuristic self.max_mutation_rate = max_mutation_rate self.fitness = self.heuristic.get_fitness(self.sequence)
This class represents a single gene sequence. A gene sequence is composed of tasks. Each spot in the sequence, known as an allele, can be one of several genes specified by the Genome class.
evoschedule/app/gene_sequence.py
__init__
TadayoshiCarvajal/EvoSchedule
0
python
def __init__(self, sequence, heuristic, max_mutation_rate): ' This class represents a single gene sequence.\n \n A gene sequence is composed of tasks. Each spot in the sequence,\n known as an allele, can be one of several genes specified by the \n Genome class.' self.sequence = sequence self.length = len(sequence) self.heuristic = heuristic self.max_mutation_rate = max_mutation_rate self.fitness = self.heuristic.get_fitness(self.sequence)
def __init__(self, sequence, heuristic, max_mutation_rate): ' This class represents a single gene sequence.\n \n A gene sequence is composed of tasks. Each spot in the sequence,\n known as an allele, can be one of several genes specified by the \n Genome class.' self.sequence = sequence self.length = len(sequence) self.heuristic = heuristic self.max_mutation_rate = max_mutation_rate self.fitness = self.heuristic.get_fitness(self.sequence)<|docstring|>This class represents a single gene sequence. A gene sequence is composed of tasks. Each spot in the sequence, known as an allele, can be one of several genes specified by the Genome class.<|endoftext|>
79c5a0e3dd019cde773ba7bb7ef8368b4c1467de1b878efe62b63da78cb086a4
def get_mutated(self): ' Generates the child sequence of this sequence.' from random import randint from math import ceil max_swaps = ceil((self.max_mutation_rate * self.length)) n_swaps = randint(0, max_swaps) mutated = list(self.sequence) for i in range(n_swaps): a = randint(0, (self.length - 1)) b = randint(0, (self.length - 1)) (mutated[a], mutated[b]) = (mutated[b], mutated[a]) mutated_gene_sequence = GeneSequence(mutated, self.heuristic, self.max_mutation_rate) return mutated_gene_sequence
Generates the child sequence of this sequence.
evoschedule/app/gene_sequence.py
get_mutated
TadayoshiCarvajal/EvoSchedule
0
python
def get_mutated(self): ' ' from random import randint from math import ceil max_swaps = ceil((self.max_mutation_rate * self.length)) n_swaps = randint(0, max_swaps) mutated = list(self.sequence) for i in range(n_swaps): a = randint(0, (self.length - 1)) b = randint(0, (self.length - 1)) (mutated[a], mutated[b]) = (mutated[b], mutated[a]) mutated_gene_sequence = GeneSequence(mutated, self.heuristic, self.max_mutation_rate) return mutated_gene_sequence
def get_mutated(self): ' ' from random import randint from math import ceil max_swaps = ceil((self.max_mutation_rate * self.length)) n_swaps = randint(0, max_swaps) mutated = list(self.sequence) for i in range(n_swaps): a = randint(0, (self.length - 1)) b = randint(0, (self.length - 1)) (mutated[a], mutated[b]) = (mutated[b], mutated[a]) mutated_gene_sequence = GeneSequence(mutated, self.heuristic, self.max_mutation_rate) return mutated_gene_sequence<|docstring|>Generates the child sequence of this sequence.<|endoftext|>
01aa751bf5110b9758a16b94ad4ccfddb771f924b8f1ef71225a6e7e722e4655
def consolidate_schedule(self): ' Combines same task in adjacent time windows to a single time frame.\n i.e., 12:00AM - 01:00PM - Sleep\n 01:00PM - 02:00PM - Sleep\n 02:00PM - 03:00PM - Sleep\n 03:00PM - 04:00PM - Sleep\n 04:00PM - 05:00PM - Sleep\n \n gets combined into\n 12:00AM - 05:00PM - Sleep' time_windows = self.heuristic.time_windows task = self.sequence[0] start = time_windows[0][0] time_frames = [] for i in range(len(time_windows)): if (self.sequence[i] != task): end = time_windows[i][0] time_frames.append((start, end, task)) task = self.sequence[i] start = time_windows[i][0] end = time_windows[0][0] time_frames.append((start, end, task)) return time_frames
Combines same task in adjacent time windows to a single time frame. i.e., 12:00AM - 01:00PM - Sleep 01:00PM - 02:00PM - Sleep 02:00PM - 03:00PM - Sleep 03:00PM - 04:00PM - Sleep 04:00PM - 05:00PM - Sleep gets combined into 12:00AM - 05:00PM - Sleep
evoschedule/app/gene_sequence.py
consolidate_schedule
TadayoshiCarvajal/EvoSchedule
0
python
def consolidate_schedule(self): ' Combines same task in adjacent time windows to a single time frame.\n i.e., 12:00AM - 01:00PM - Sleep\n 01:00PM - 02:00PM - Sleep\n 02:00PM - 03:00PM - Sleep\n 03:00PM - 04:00PM - Sleep\n 04:00PM - 05:00PM - Sleep\n \n gets combined into\n 12:00AM - 05:00PM - Sleep' time_windows = self.heuristic.time_windows task = self.sequence[0] start = time_windows[0][0] time_frames = [] for i in range(len(time_windows)): if (self.sequence[i] != task): end = time_windows[i][0] time_frames.append((start, end, task)) task = self.sequence[i] start = time_windows[i][0] end = time_windows[0][0] time_frames.append((start, end, task)) return time_frames
def consolidate_schedule(self): ' Combines same task in adjacent time windows to a single time frame.\n i.e., 12:00AM - 01:00PM - Sleep\n 01:00PM - 02:00PM - Sleep\n 02:00PM - 03:00PM - Sleep\n 03:00PM - 04:00PM - Sleep\n 04:00PM - 05:00PM - Sleep\n \n gets combined into\n 12:00AM - 05:00PM - Sleep' time_windows = self.heuristic.time_windows task = self.sequence[0] start = time_windows[0][0] time_frames = [] for i in range(len(time_windows)): if (self.sequence[i] != task): end = time_windows[i][0] time_frames.append((start, end, task)) task = self.sequence[i] start = time_windows[i][0] end = time_windows[0][0] time_frames.append((start, end, task)) return time_frames<|docstring|>Combines same task in adjacent time windows to a single time frame. i.e., 12:00AM - 01:00PM - Sleep 01:00PM - 02:00PM - Sleep 02:00PM - 03:00PM - Sleep 03:00PM - 04:00PM - Sleep 04:00PM - 05:00PM - Sleep gets combined into 12:00AM - 05:00PM - Sleep<|endoftext|>
e839d95d8d9b95d318332153a257e269654d0b72caf84125740f3ef82b0a6ec2
def get_schedule(self): ' Returns the consolidated schedule.' s = '' for time_frame in self.consolidate_schedule(): start = time_frame[0] end = time_frame[1] task = time_frame[2] s += f'''{start:10s} - {end:10s} === {task:10s} ''' return s
Returns the consolidated schedule.
evoschedule/app/gene_sequence.py
get_schedule
TadayoshiCarvajal/EvoSchedule
0
python
def get_schedule(self): ' ' s = for time_frame in self.consolidate_schedule(): start = time_frame[0] end = time_frame[1] task = time_frame[2] s += f'{start:10s} - {end:10s} === {task:10s} ' return s
def get_schedule(self): ' ' s = for time_frame in self.consolidate_schedule(): start = time_frame[0] end = time_frame[1] task = time_frame[2] s += f'{start:10s} - {end:10s} === {task:10s} ' return s<|docstring|>Returns the consolidated schedule.<|endoftext|>
cf4939c70e9a8c14a9eaa2357d7f179344108bcc0a40558134db27de1efb85b1
def replace_consistent_dropout(module: torch.nn.Module, inplace: Optional[bool]=True) -> torch.nn.Module: "\n Recursively replaces dropout modules in `module` such that dropout is performed\n regardless of the model's mode *and uses the same mask across batches*.\n The mask is refreshed each time `model.eval()` is invoked but the mask is guaranteed\n to be consistent across all batch (different masks for each item *within* the batch).\n\n Args:\n module (`torch.nn.Module`): PyTorch module object\n inplace (bool, optional): If `True`, the `model` is modified *in-place*. If `False`, `model` is not modified and a new model is cloned.\n\n Returns:\n `torch.nn.Module`: Same `module` instance if `inplace` is `False`, else a brand new module.\n " if (not inplace): module = copy.deepcopy(module) _replace_dropout(module, prefix='Consistent') return module
Recursively replaces dropout modules in `module` such that dropout is performed regardless of the model's mode *and uses the same mask across batches*. The mask is refreshed each time `model.eval()` is invoked but the mask is guaranteed to be consistent across all batch (different masks for each item *within* the batch). Args: module (`torch.nn.Module`): PyTorch module object inplace (bool, optional): If `True`, the `model` is modified *in-place*. If `False`, `model` is not modified and a new model is cloned. Returns: `torch.nn.Module`: Same `module` instance if `inplace` is `False`, else a brand new module.
experiments/thesis/exploration/very_consistent_presnet/train.py
replace_consistent_dropout
jiahfong/alr
2
python
def replace_consistent_dropout(module: torch.nn.Module, inplace: Optional[bool]=True) -> torch.nn.Module: "\n Recursively replaces dropout modules in `module` such that dropout is performed\n regardless of the model's mode *and uses the same mask across batches*.\n The mask is refreshed each time `model.eval()` is invoked but the mask is guaranteed\n to be consistent across all batch (different masks for each item *within* the batch).\n\n Args:\n module (`torch.nn.Module`): PyTorch module object\n inplace (bool, optional): If `True`, the `model` is modified *in-place*. If `False`, `model` is not modified and a new model is cloned.\n\n Returns:\n `torch.nn.Module`: Same `module` instance if `inplace` is `False`, else a brand new module.\n " if (not inplace): module = copy.deepcopy(module) _replace_dropout(module, prefix='Consistent') return module
def replace_consistent_dropout(module: torch.nn.Module, inplace: Optional[bool]=True) -> torch.nn.Module: "\n Recursively replaces dropout modules in `module` such that dropout is performed\n regardless of the model's mode *and uses the same mask across batches*.\n The mask is refreshed each time `model.eval()` is invoked but the mask is guaranteed\n to be consistent across all batch (different masks for each item *within* the batch).\n\n Args:\n module (`torch.nn.Module`): PyTorch module object\n inplace (bool, optional): If `True`, the `model` is modified *in-place*. If `False`, `model` is not modified and a new model is cloned.\n\n Returns:\n `torch.nn.Module`: Same `module` instance if `inplace` is `False`, else a brand new module.\n " if (not inplace): module = copy.deepcopy(module) _replace_dropout(module, prefix='Consistent') return module<|docstring|>Recursively replaces dropout modules in `module` such that dropout is performed regardless of the model's mode *and uses the same mask across batches*. The mask is refreshed each time `model.eval()` is invoked but the mask is guaranteed to be consistent across all batch (different masks for each item *within* the batch). Args: module (`torch.nn.Module`): PyTorch module object inplace (bool, optional): If `True`, the `model` is modified *in-place*. If `False`, `model` is not modified and a new model is cloned. Returns: `torch.nn.Module`: Same `module` instance if `inplace` is `False`, else a brand new module.<|endoftext|>
ca75d33a35a234f9c0dfd7a40db69eb0cf5168682c57878e38eb25b33de07714
def __init__(self, model: nn.Module, forward: Optional[int]=100, reduce: Optional[str]='logsumexp', inplace: Optional[bool]=True, output_transform: Optional[Callable[([torch.Tensor], torch.Tensor)]]=None, fast: Optional[bool]=False): '\n A wrapper that turns a regular PyTorch module into one that implements\n `Monte Carlo Dropout <https://arxiv.org/abs/1506.02142>`_ (Gal & Ghahramani, 2016).\n\n Args:\n model (`nn.Module`): `torch.nn.Module` object. This model\'s forward pass\n should return (log) probabilities. I.e. the final layer should\n be `softmax` or `log_softmax`. Otherwise, `output_transform` can\n be used to convert `model`\'s output into probabilities.\n forward (int, optional): number of stochastic forward passes\n reduce (str, optional): either `"logsumexp"` or `"mean"`. This is used to reduce the\n n `forward` stochastic passes during evaluation. If `model` or `output_transform`\n returns probabilities (i.e. `F.softmax`), this should be `"mean"`;\n otherwise it should be "logsumexp" if they return log-probabilities (i.e. `F.log_softmax`).\n [default = `"logsumexp"`]\n inplace (bool, optional): if `True`, the `model` is modified *in-place* when the dropout layers are\n replaced. If `False`, `model` is not modified and a new model is cloned.\n output_transform (callable, optional): model\'s output is given as input and the output of this\n callable is expected to return (log) probabilities.\n fast (bool, optional): if true, :meth:`stochastic_forward` will stack the batch dimension for faster\n MC dropout passes. If false, then forward passes are called in a for-loop. Note,\n the former will consume (`forward`) more memory.\n Attributes:\n base_model (`nn.Module`): provided base model (a clone if `inplace=True`)\n n_forward (int): number of forward passes (`forward`)\n ' super(MCDropout, self).__init__() self.base_model = replace_consistent_dropout(model, inplace=inplace) self.n_forward = forward self._output_transform = (output_transform if (output_transform is not None) else (lambda x: x)) self._reduce = reduce.lower() assert (self._reduce in {'logsumexp', 'mean'}) self._fast = fast self.snap()
A wrapper that turns a regular PyTorch module into one that implements `Monte Carlo Dropout <https://arxiv.org/abs/1506.02142>`_ (Gal & Ghahramani, 2016). Args: model (`nn.Module`): `torch.nn.Module` object. This model's forward pass should return (log) probabilities. I.e. the final layer should be `softmax` or `log_softmax`. Otherwise, `output_transform` can be used to convert `model`'s output into probabilities. forward (int, optional): number of stochastic forward passes reduce (str, optional): either `"logsumexp"` or `"mean"`. This is used to reduce the n `forward` stochastic passes during evaluation. If `model` or `output_transform` returns probabilities (i.e. `F.softmax`), this should be `"mean"`; otherwise it should be "logsumexp" if they return log-probabilities (i.e. `F.log_softmax`). [default = `"logsumexp"`] inplace (bool, optional): if `True`, the `model` is modified *in-place* when the dropout layers are replaced. If `False`, `model` is not modified and a new model is cloned. output_transform (callable, optional): model's output is given as input and the output of this callable is expected to return (log) probabilities. fast (bool, optional): if true, :meth:`stochastic_forward` will stack the batch dimension for faster MC dropout passes. If false, then forward passes are called in a for-loop. Note, the former will consume (`forward`) more memory. Attributes: base_model (`nn.Module`): provided base model (a clone if `inplace=True`) n_forward (int): number of forward passes (`forward`)
experiments/thesis/exploration/very_consistent_presnet/train.py
__init__
jiahfong/alr
2
python
def __init__(self, model: nn.Module, forward: Optional[int]=100, reduce: Optional[str]='logsumexp', inplace: Optional[bool]=True, output_transform: Optional[Callable[([torch.Tensor], torch.Tensor)]]=None, fast: Optional[bool]=False): '\n A wrapper that turns a regular PyTorch module into one that implements\n `Monte Carlo Dropout <https://arxiv.org/abs/1506.02142>`_ (Gal & Ghahramani, 2016).\n\n Args:\n model (`nn.Module`): `torch.nn.Module` object. This model\'s forward pass\n should return (log) probabilities. I.e. the final layer should\n be `softmax` or `log_softmax`. Otherwise, `output_transform` can\n be used to convert `model`\'s output into probabilities.\n forward (int, optional): number of stochastic forward passes\n reduce (str, optional): either `"logsumexp"` or `"mean"`. This is used to reduce the\n n `forward` stochastic passes during evaluation. If `model` or `output_transform`\n returns probabilities (i.e. `F.softmax`), this should be `"mean"`;\n otherwise it should be "logsumexp" if they return log-probabilities (i.e. `F.log_softmax`).\n [default = `"logsumexp"`]\n inplace (bool, optional): if `True`, the `model` is modified *in-place* when the dropout layers are\n replaced. If `False`, `model` is not modified and a new model is cloned.\n output_transform (callable, optional): model\'s output is given as input and the output of this\n callable is expected to return (log) probabilities.\n fast (bool, optional): if true, :meth:`stochastic_forward` will stack the batch dimension for faster\n MC dropout passes. If false, then forward passes are called in a for-loop. Note,\n the former will consume (`forward`) more memory.\n Attributes:\n base_model (`nn.Module`): provided base model (a clone if `inplace=True`)\n n_forward (int): number of forward passes (`forward`)\n ' super(MCDropout, self).__init__() self.base_model = replace_consistent_dropout(model, inplace=inplace) self.n_forward = forward self._output_transform = (output_transform if (output_transform is not None) else (lambda x: x)) self._reduce = reduce.lower() assert (self._reduce in {'logsumexp', 'mean'}) self._fast = fast self.snap()
def __init__(self, model: nn.Module, forward: Optional[int]=100, reduce: Optional[str]='logsumexp', inplace: Optional[bool]=True, output_transform: Optional[Callable[([torch.Tensor], torch.Tensor)]]=None, fast: Optional[bool]=False): '\n A wrapper that turns a regular PyTorch module into one that implements\n `Monte Carlo Dropout <https://arxiv.org/abs/1506.02142>`_ (Gal & Ghahramani, 2016).\n\n Args:\n model (`nn.Module`): `torch.nn.Module` object. This model\'s forward pass\n should return (log) probabilities. I.e. the final layer should\n be `softmax` or `log_softmax`. Otherwise, `output_transform` can\n be used to convert `model`\'s output into probabilities.\n forward (int, optional): number of stochastic forward passes\n reduce (str, optional): either `"logsumexp"` or `"mean"`. This is used to reduce the\n n `forward` stochastic passes during evaluation. If `model` or `output_transform`\n returns probabilities (i.e. `F.softmax`), this should be `"mean"`;\n otherwise it should be "logsumexp" if they return log-probabilities (i.e. `F.log_softmax`).\n [default = `"logsumexp"`]\n inplace (bool, optional): if `True`, the `model` is modified *in-place* when the dropout layers are\n replaced. If `False`, `model` is not modified and a new model is cloned.\n output_transform (callable, optional): model\'s output is given as input and the output of this\n callable is expected to return (log) probabilities.\n fast (bool, optional): if true, :meth:`stochastic_forward` will stack the batch dimension for faster\n MC dropout passes. If false, then forward passes are called in a for-loop. Note,\n the former will consume (`forward`) more memory.\n Attributes:\n base_model (`nn.Module`): provided base model (a clone if `inplace=True`)\n n_forward (int): number of forward passes (`forward`)\n ' super(MCDropout, self).__init__() self.base_model = replace_consistent_dropout(model, inplace=inplace) self.n_forward = forward self._output_transform = (output_transform if (output_transform is not None) else (lambda x: x)) self._reduce = reduce.lower() assert (self._reduce in {'logsumexp', 'mean'}) self._fast = fast self.snap()<|docstring|>A wrapper that turns a regular PyTorch module into one that implements `Monte Carlo Dropout <https://arxiv.org/abs/1506.02142>`_ (Gal & Ghahramani, 2016). Args: model (`nn.Module`): `torch.nn.Module` object. This model's forward pass should return (log) probabilities. I.e. the final layer should be `softmax` or `log_softmax`. Otherwise, `output_transform` can be used to convert `model`'s output into probabilities. forward (int, optional): number of stochastic forward passes reduce (str, optional): either `"logsumexp"` or `"mean"`. This is used to reduce the n `forward` stochastic passes during evaluation. If `model` or `output_transform` returns probabilities (i.e. `F.softmax`), this should be `"mean"`; otherwise it should be "logsumexp" if they return log-probabilities (i.e. `F.log_softmax`). [default = `"logsumexp"`] inplace (bool, optional): if `True`, the `model` is modified *in-place* when the dropout layers are replaced. If `False`, `model` is not modified and a new model is cloned. output_transform (callable, optional): model's output is given as input and the output of this callable is expected to return (log) probabilities. fast (bool, optional): if true, :meth:`stochastic_forward` will stack the batch dimension for faster MC dropout passes. If false, then forward passes are called in a for-loop. Note, the former will consume (`forward`) more memory. Attributes: base_model (`nn.Module`): provided base model (a clone if `inplace=True`) n_forward (int): number of forward passes (`forward`)<|endoftext|>
24be77d9dbd7b2d3744195a63c41b12ccf3b950644963a25543c57fa2f3eabb4
def forward(self, x: torch.Tensor) -> torch.Tensor: '\n Forward pass. *Note, this function has a different behaviour in eval mode*.\n It returns the (log) mean score of :meth:`stochastic_forward` passes. In other words,\n if `self.training` is `False`, the following is returned instead:\n\n .. code:: python\n\n # if reduce = "logsumexp"\n torch.logsumexp(self.stochastic_forward(x), dim=0) - log(self.n_forward)\n\n # if reduce = "mean"\n torch.mean(self.stochastic_forward(x), dim=0)\n\n\n Args:\n x (`torch.Tensor`): input tensor, any size\n\n Returns:\n `torch.Tensor`:\n output tensor of size :math:`N \\times C` where\n :math:`N` is the batch size and :math:`C` is the number of target classes.\n\n Note:\n if a single forward pass is required during eval mode, one could use the following\n instead: `base_model(x)`\n ' if self.training: return self._output_transform(self.base_model(x)) if (self._reduce == 'mean'): return torch.mean(self.stochastic_forward(x), dim=0) return (torch.logsumexp(self.stochastic_forward(x), dim=0) - math.log(self.n_forward))
Forward pass. *Note, this function has a different behaviour in eval mode*. It returns the (log) mean score of :meth:`stochastic_forward` passes. In other words, if `self.training` is `False`, the following is returned instead: .. code:: python # if reduce = "logsumexp" torch.logsumexp(self.stochastic_forward(x), dim=0) - log(self.n_forward) # if reduce = "mean" torch.mean(self.stochastic_forward(x), dim=0) Args: x (`torch.Tensor`): input tensor, any size Returns: `torch.Tensor`: output tensor of size :math:`N \times C` where :math:`N` is the batch size and :math:`C` is the number of target classes. Note: if a single forward pass is required during eval mode, one could use the following instead: `base_model(x)`
experiments/thesis/exploration/very_consistent_presnet/train.py
forward
jiahfong/alr
2
python
def forward(self, x: torch.Tensor) -> torch.Tensor: '\n Forward pass. *Note, this function has a different behaviour in eval mode*.\n It returns the (log) mean score of :meth:`stochastic_forward` passes. In other words,\n if `self.training` is `False`, the following is returned instead:\n\n .. code:: python\n\n # if reduce = "logsumexp"\n torch.logsumexp(self.stochastic_forward(x), dim=0) - log(self.n_forward)\n\n # if reduce = "mean"\n torch.mean(self.stochastic_forward(x), dim=0)\n\n\n Args:\n x (`torch.Tensor`): input tensor, any size\n\n Returns:\n `torch.Tensor`:\n output tensor of size :math:`N \\times C` where\n :math:`N` is the batch size and :math:`C` is the number of target classes.\n\n Note:\n if a single forward pass is required during eval mode, one could use the following\n instead: `base_model(x)`\n ' if self.training: return self._output_transform(self.base_model(x)) if (self._reduce == 'mean'): return torch.mean(self.stochastic_forward(x), dim=0) return (torch.logsumexp(self.stochastic_forward(x), dim=0) - math.log(self.n_forward))
def forward(self, x: torch.Tensor) -> torch.Tensor: '\n Forward pass. *Note, this function has a different behaviour in eval mode*.\n It returns the (log) mean score of :meth:`stochastic_forward` passes. In other words,\n if `self.training` is `False`, the following is returned instead:\n\n .. code:: python\n\n # if reduce = "logsumexp"\n torch.logsumexp(self.stochastic_forward(x), dim=0) - log(self.n_forward)\n\n # if reduce = "mean"\n torch.mean(self.stochastic_forward(x), dim=0)\n\n\n Args:\n x (`torch.Tensor`): input tensor, any size\n\n Returns:\n `torch.Tensor`:\n output tensor of size :math:`N \\times C` where\n :math:`N` is the batch size and :math:`C` is the number of target classes.\n\n Note:\n if a single forward pass is required during eval mode, one could use the following\n instead: `base_model(x)`\n ' if self.training: return self._output_transform(self.base_model(x)) if (self._reduce == 'mean'): return torch.mean(self.stochastic_forward(x), dim=0) return (torch.logsumexp(self.stochastic_forward(x), dim=0) - math.log(self.n_forward))<|docstring|>Forward pass. *Note, this function has a different behaviour in eval mode*. It returns the (log) mean score of :meth:`stochastic_forward` passes. In other words, if `self.training` is `False`, the following is returned instead: .. code:: python # if reduce = "logsumexp" torch.logsumexp(self.stochastic_forward(x), dim=0) - log(self.n_forward) # if reduce = "mean" torch.mean(self.stochastic_forward(x), dim=0) Args: x (`torch.Tensor`): input tensor, any size Returns: `torch.Tensor`: output tensor of size :math:`N \times C` where :math:`N` is the batch size and :math:`C` is the number of target classes. Note: if a single forward pass is required during eval mode, one could use the following instead: `base_model(x)`<|endoftext|>
0866cff8af6e27427394aa75391174cf901d2ebe373d82295ddf084d073e1072
def stochastic_forward(self, x: torch.Tensor) -> torch.Tensor: '\n Returns a :math:`m \\times N \\times C` `torch.Tensor` where:\n\n 1. :math:`m` is equal to `self.n_forward`\n 2. :math:`N` is the batch size, equal to `x.size(0)`\n 3. :math:`C` is the number of units in the final layer (e.g. number of classes in a classification model)\n\n Args:\n x (`torch.Tensor`): input tensor\n\n Returns:\n `torch.Tensor`: output tensor of shape :math:`m \\times N \\times C`\n\n Raises:\n RuntimeError: Occurs when the machine runs out of memory and `fast` was set to true.\n ' if self._fast: size = x.size() x = self._repeat_n(x, self.n_forward) assert (x.size() == ((size[0] * self.n_forward), *size[1:])) try: preds = self._output_transform(self.base_model(x)) preds = preds.view(self.n_forward, (- 1), *preds.size()[1:]) except RuntimeError as e: raise RuntimeError('Ran out of memory. Try reducing batch size orreducing the number of MC dropout samples. Alternatively, switch offfast MC dropout.') from e else: preds = torch.stack([self._output_transform(self.base_model(x)) for _ in range(self.n_forward)]) assert (preds.size(0) == self.n_forward) return preds
Returns a :math:`m \times N \times C` `torch.Tensor` where: 1. :math:`m` is equal to `self.n_forward` 2. :math:`N` is the batch size, equal to `x.size(0)` 3. :math:`C` is the number of units in the final layer (e.g. number of classes in a classification model) Args: x (`torch.Tensor`): input tensor Returns: `torch.Tensor`: output tensor of shape :math:`m \times N \times C` Raises: RuntimeError: Occurs when the machine runs out of memory and `fast` was set to true.
experiments/thesis/exploration/very_consistent_presnet/train.py
stochastic_forward
jiahfong/alr
2
python
def stochastic_forward(self, x: torch.Tensor) -> torch.Tensor: '\n Returns a :math:`m \\times N \\times C` `torch.Tensor` where:\n\n 1. :math:`m` is equal to `self.n_forward`\n 2. :math:`N` is the batch size, equal to `x.size(0)`\n 3. :math:`C` is the number of units in the final layer (e.g. number of classes in a classification model)\n\n Args:\n x (`torch.Tensor`): input tensor\n\n Returns:\n `torch.Tensor`: output tensor of shape :math:`m \\times N \\times C`\n\n Raises:\n RuntimeError: Occurs when the machine runs out of memory and `fast` was set to true.\n ' if self._fast: size = x.size() x = self._repeat_n(x, self.n_forward) assert (x.size() == ((size[0] * self.n_forward), *size[1:])) try: preds = self._output_transform(self.base_model(x)) preds = preds.view(self.n_forward, (- 1), *preds.size()[1:]) except RuntimeError as e: raise RuntimeError('Ran out of memory. Try reducing batch size orreducing the number of MC dropout samples. Alternatively, switch offfast MC dropout.') from e else: preds = torch.stack([self._output_transform(self.base_model(x)) for _ in range(self.n_forward)]) assert (preds.size(0) == self.n_forward) return preds
def stochastic_forward(self, x: torch.Tensor) -> torch.Tensor: '\n Returns a :math:`m \\times N \\times C` `torch.Tensor` where:\n\n 1. :math:`m` is equal to `self.n_forward`\n 2. :math:`N` is the batch size, equal to `x.size(0)`\n 3. :math:`C` is the number of units in the final layer (e.g. number of classes in a classification model)\n\n Args:\n x (`torch.Tensor`): input tensor\n\n Returns:\n `torch.Tensor`: output tensor of shape :math:`m \\times N \\times C`\n\n Raises:\n RuntimeError: Occurs when the machine runs out of memory and `fast` was set to true.\n ' if self._fast: size = x.size() x = self._repeat_n(x, self.n_forward) assert (x.size() == ((size[0] * self.n_forward), *size[1:])) try: preds = self._output_transform(self.base_model(x)) preds = preds.view(self.n_forward, (- 1), *preds.size()[1:]) except RuntimeError as e: raise RuntimeError('Ran out of memory. Try reducing batch size orreducing the number of MC dropout samples. Alternatively, switch offfast MC dropout.') from e else: preds = torch.stack([self._output_transform(self.base_model(x)) for _ in range(self.n_forward)]) assert (preds.size(0) == self.n_forward) return preds<|docstring|>Returns a :math:`m \times N \times C` `torch.Tensor` where: 1. :math:`m` is equal to `self.n_forward` 2. :math:`N` is the batch size, equal to `x.size(0)` 3. :math:`C` is the number of units in the final layer (e.g. number of classes in a classification model) Args: x (`torch.Tensor`): input tensor Returns: `torch.Tensor`: output tensor of shape :math:`m \times N \times C` Raises: RuntimeError: Occurs when the machine runs out of memory and `fast` was set to true.<|endoftext|>
7b4dcb64152285eee87aebc634aea0cf00e7fc6d83d9696b562a84cbc3f65760
@staticmethod def _repeat_n(x: torch.Tensor, n: int) -> torch.Tensor: '\n Repeat the data in x `n` times along the batch dimension.\n\n Args:\n x (torch.Tensor): input tensor, the batch dimension is assumed to be 0.\n n (int): number of repeats\n\n Returns:\n torch.Tensor: output tensor\n\n Raises:\n RuntimeError: Occurs when the machine runs out of memory.\n ' try: out = x.repeat(n, *([1] * (x.ndim - 1))) except RuntimeError as e: raise RuntimeError('Ran out of memory. Try reducing batch size orreducing the number of MC dropout samples. Alternatively, switch offfast MC dropout.') from e return out
Repeat the data in x `n` times along the batch dimension. Args: x (torch.Tensor): input tensor, the batch dimension is assumed to be 0. n (int): number of repeats Returns: torch.Tensor: output tensor Raises: RuntimeError: Occurs when the machine runs out of memory.
experiments/thesis/exploration/very_consistent_presnet/train.py
_repeat_n
jiahfong/alr
2
python
@staticmethod def _repeat_n(x: torch.Tensor, n: int) -> torch.Tensor: '\n Repeat the data in x `n` times along the batch dimension.\n\n Args:\n x (torch.Tensor): input tensor, the batch dimension is assumed to be 0.\n n (int): number of repeats\n\n Returns:\n torch.Tensor: output tensor\n\n Raises:\n RuntimeError: Occurs when the machine runs out of memory.\n ' try: out = x.repeat(n, *([1] * (x.ndim - 1))) except RuntimeError as e: raise RuntimeError('Ran out of memory. Try reducing batch size orreducing the number of MC dropout samples. Alternatively, switch offfast MC dropout.') from e return out
@staticmethod def _repeat_n(x: torch.Tensor, n: int) -> torch.Tensor: '\n Repeat the data in x `n` times along the batch dimension.\n\n Args:\n x (torch.Tensor): input tensor, the batch dimension is assumed to be 0.\n n (int): number of repeats\n\n Returns:\n torch.Tensor: output tensor\n\n Raises:\n RuntimeError: Occurs when the machine runs out of memory.\n ' try: out = x.repeat(n, *([1] * (x.ndim - 1))) except RuntimeError as e: raise RuntimeError('Ran out of memory. Try reducing batch size orreducing the number of MC dropout samples. Alternatively, switch offfast MC dropout.') from e return out<|docstring|>Repeat the data in x `n` times along the batch dimension. Args: x (torch.Tensor): input tensor, the batch dimension is assumed to be 0. n (int): number of repeats Returns: torch.Tensor: output tensor Raises: RuntimeError: Occurs when the machine runs out of memory.<|endoftext|>
ae396fc9be78cda44dcd681655c38521606cae57ec184aae3add3bd051359228
def __init__(self, id_f_score_blast_P=(- 1), pident=(- 1), length=(- 1), mismatch=(- 1), gapopen=(- 1), pstart=(- 1), pend=(- 1), bstart=(- 1), bend=(- 1), evalue=(- 1), bitscore=(- 1), plen=(- 1), blen=(- 1), FK_ppi_couple=(- 1)): "\n Constructor of the Score Blast P object. All the parameters have a default value\n\n :param id_f_score_blast_P: id of the bast score - -1 if unknown\n :param pident: blast score\n :param length: i don't know - -1 if unknown\n :param mismatch: i don't know - -1 if unknown\n :param gapopen: i don't know - -1 if unknown\n :param pstart: i don't know - -1 if unknown\n :param pend: i don't know - -1 if unknown\n :param bstart: i don't know - -1 if unknown\n :param bend: i don't know - -1 if unknown\n :param evalue: i don't know - -1 if unknown\n :param bitscore: i don't know - -1 if unknown\n :param plen: i don't know - -1 if unknown\n :param blen: i don't know - -1 if unknown\n :param FK_ppi_couple: id of the PPI couple - -1 if unknown\n\n\n :type id_f_score_blast_P: int - required\n :type pident: float - required \n :type length: int - required\n :type mismatch: int - required\n :type gapopen: int - required\n :type pstart: int - required\n :type pend: int - required\n :type bstart: int - required\n :type bend: int - required\n :type evalue: double - required\n :type bitscore: double - required\n :type plen: int - required\n :type blen: int - required\n :type FK_ppi_couple: int - required\n " self.id_f_score_blast_P = id_f_score_blast_P self.pident = pident self.length = length self.mismatch = mismatch self.gapopen = gapopen self.pstart = pstart self.pend = pend self.bstart = bstart self.bend = bend self.evalue = evalue self.bitscore = bitscore self.plen = plen self.blen = blen self.FK_ppi_couple = FK_ppi_couple
Constructor of the Score Blast P object. All the parameters have a default value :param id_f_score_blast_P: id of the bast score - -1 if unknown :param pident: blast score :param length: i don't know - -1 if unknown :param mismatch: i don't know - -1 if unknown :param gapopen: i don't know - -1 if unknown :param pstart: i don't know - -1 if unknown :param pend: i don't know - -1 if unknown :param bstart: i don't know - -1 if unknown :param bend: i don't know - -1 if unknown :param evalue: i don't know - -1 if unknown :param bitscore: i don't know - -1 if unknown :param plen: i don't know - -1 if unknown :param blen: i don't know - -1 if unknown :param FK_ppi_couple: id of the PPI couple - -1 if unknown :type id_f_score_blast_P: int - required :type pident: float - required :type length: int - required :type mismatch: int - required :type gapopen: int - required :type pstart: int - required :type pend: int - required :type bstart: int - required :type bend: int - required :type evalue: double - required :type bitscore: double - required :type plen: int - required :type blen: int - required :type FK_ppi_couple: int - required
objects_new/F_score_blast_P_new.py
__init__
VictorTruan/inphinity
1
python
def __init__(self, id_f_score_blast_P=(- 1), pident=(- 1), length=(- 1), mismatch=(- 1), gapopen=(- 1), pstart=(- 1), pend=(- 1), bstart=(- 1), bend=(- 1), evalue=(- 1), bitscore=(- 1), plen=(- 1), blen=(- 1), FK_ppi_couple=(- 1)): "\n Constructor of the Score Blast P object. All the parameters have a default value\n\n :param id_f_score_blast_P: id of the bast score - -1 if unknown\n :param pident: blast score\n :param length: i don't know - -1 if unknown\n :param mismatch: i don't know - -1 if unknown\n :param gapopen: i don't know - -1 if unknown\n :param pstart: i don't know - -1 if unknown\n :param pend: i don't know - -1 if unknown\n :param bstart: i don't know - -1 if unknown\n :param bend: i don't know - -1 if unknown\n :param evalue: i don't know - -1 if unknown\n :param bitscore: i don't know - -1 if unknown\n :param plen: i don't know - -1 if unknown\n :param blen: i don't know - -1 if unknown\n :param FK_ppi_couple: id of the PPI couple - -1 if unknown\n\n\n :type id_f_score_blast_P: int - required\n :type pident: float - required \n :type length: int - required\n :type mismatch: int - required\n :type gapopen: int - required\n :type pstart: int - required\n :type pend: int - required\n :type bstart: int - required\n :type bend: int - required\n :type evalue: double - required\n :type bitscore: double - required\n :type plen: int - required\n :type blen: int - required\n :type FK_ppi_couple: int - required\n " self.id_f_score_blast_P = id_f_score_blast_P self.pident = pident self.length = length self.mismatch = mismatch self.gapopen = gapopen self.pstart = pstart self.pend = pend self.bstart = bstart self.bend = bend self.evalue = evalue self.bitscore = bitscore self.plen = plen self.blen = blen self.FK_ppi_couple = FK_ppi_couple
def __init__(self, id_f_score_blast_P=(- 1), pident=(- 1), length=(- 1), mismatch=(- 1), gapopen=(- 1), pstart=(- 1), pend=(- 1), bstart=(- 1), bend=(- 1), evalue=(- 1), bitscore=(- 1), plen=(- 1), blen=(- 1), FK_ppi_couple=(- 1)): "\n Constructor of the Score Blast P object. All the parameters have a default value\n\n :param id_f_score_blast_P: id of the bast score - -1 if unknown\n :param pident: blast score\n :param length: i don't know - -1 if unknown\n :param mismatch: i don't know - -1 if unknown\n :param gapopen: i don't know - -1 if unknown\n :param pstart: i don't know - -1 if unknown\n :param pend: i don't know - -1 if unknown\n :param bstart: i don't know - -1 if unknown\n :param bend: i don't know - -1 if unknown\n :param evalue: i don't know - -1 if unknown\n :param bitscore: i don't know - -1 if unknown\n :param plen: i don't know - -1 if unknown\n :param blen: i don't know - -1 if unknown\n :param FK_ppi_couple: id of the PPI couple - -1 if unknown\n\n\n :type id_f_score_blast_P: int - required\n :type pident: float - required \n :type length: int - required\n :type mismatch: int - required\n :type gapopen: int - required\n :type pstart: int - required\n :type pend: int - required\n :type bstart: int - required\n :type bend: int - required\n :type evalue: double - required\n :type bitscore: double - required\n :type plen: int - required\n :type blen: int - required\n :type FK_ppi_couple: int - required\n " self.id_f_score_blast_P = id_f_score_blast_P self.pident = pident self.length = length self.mismatch = mismatch self.gapopen = gapopen self.pstart = pstart self.pend = pend self.bstart = bstart self.bend = bend self.evalue = evalue self.bitscore = bitscore self.plen = plen self.blen = blen self.FK_ppi_couple = FK_ppi_couple<|docstring|>Constructor of the Score Blast P object. All the parameters have a default value :param id_f_score_blast_P: id of the bast score - -1 if unknown :param pident: blast score :param length: i don't know - -1 if unknown :param mismatch: i don't know - -1 if unknown :param gapopen: i don't know - -1 if unknown :param pstart: i don't know - -1 if unknown :param pend: i don't know - -1 if unknown :param bstart: i don't know - -1 if unknown :param bend: i don't know - -1 if unknown :param evalue: i don't know - -1 if unknown :param bitscore: i don't know - -1 if unknown :param plen: i don't know - -1 if unknown :param blen: i don't know - -1 if unknown :param FK_ppi_couple: id of the PPI couple - -1 if unknown :type id_f_score_blast_P: int - required :type pident: float - required :type length: int - required :type mismatch: int - required :type gapopen: int - required :type pstart: int - required :type pend: int - required :type bstart: int - required :type bend: int - required :type evalue: double - required :type bitscore: double - required :type plen: int - required :type blen: int - required :type FK_ppi_couple: int - required<|endoftext|>
4514682f3cdce48ab25b165caecdc5e2271c1d4434bff5e1c02802ab4c2c46ac
def get_all_f_blast_p_scores(self): '\n return an array with all the baslt score in the database\n\n :return: array of score\n :rtype: array(F_score_blast)\n ' listOfScoreBlast = [] sqlObj = _F_score_blast_P_sql() results = sqlObj.select_all_score_blast_p_all_attributes() for element in results: listOfScoreBlast.append(F_score_blast_P(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7], element[8], element[9], element[10], element[11], element[12], element[13])) return listOfScoreBlast
return an array with all the baslt score in the database :return: array of score :rtype: array(F_score_blast)
objects_new/F_score_blast_P_new.py
get_all_f_blast_p_scores
VictorTruan/inphinity
1
python
def get_all_f_blast_p_scores(self): '\n return an array with all the baslt score in the database\n\n :return: array of score\n :rtype: array(F_score_blast)\n ' listOfScoreBlast = [] sqlObj = _F_score_blast_P_sql() results = sqlObj.select_all_score_blast_p_all_attributes() for element in results: listOfScoreBlast.append(F_score_blast_P(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7], element[8], element[9], element[10], element[11], element[12], element[13])) return listOfScoreBlast
def get_all_f_blast_p_scores(self): '\n return an array with all the baslt score in the database\n\n :return: array of score\n :rtype: array(F_score_blast)\n ' listOfScoreBlast = [] sqlObj = _F_score_blast_P_sql() results = sqlObj.select_all_score_blast_p_all_attributes() for element in results: listOfScoreBlast.append(F_score_blast_P(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7], element[8], element[9], element[10], element[11], element[12], element[13])) return listOfScoreBlast<|docstring|>return an array with all the baslt score in the database :return: array of score :rtype: array(F_score_blast)<|endoftext|>
93ccc4a0480c47f33f6c589c4cc983c6a64a56ba91b9f05c13ed51b9e56de90a
def create_f_score_blast_p_no_verification(self): '\n Insert a f_score_blast in the database WITHOUT ANY VERIFICATION\n \n The id of the f_score_blase is updated\n\n :return: id of the f_score_blast created\n :rtype: int\n ' value_f_score_blast = None sqlObj = _F_score_blast_P_sql() value_f_score_blast = sqlObj.insert_f_score_blast_p(self.pident, self.length, self.mismatch, self.gapopen, self.pstart, self.pend, self.bstart, self.bend, self.evalue, self.bitscore, self.plen, self.blen, self.FK_ppi_couple) self.id_f_score_blast_P = value_f_score_blast return value_f_score_blast
Insert a f_score_blast in the database WITHOUT ANY VERIFICATION The id of the f_score_blase is updated :return: id of the f_score_blast created :rtype: int
objects_new/F_score_blast_P_new.py
create_f_score_blast_p_no_verification
VictorTruan/inphinity
1
python
def create_f_score_blast_p_no_verification(self): '\n Insert a f_score_blast in the database WITHOUT ANY VERIFICATION\n \n The id of the f_score_blase is updated\n\n :return: id of the f_score_blast created\n :rtype: int\n ' value_f_score_blast = None sqlObj = _F_score_blast_P_sql() value_f_score_blast = sqlObj.insert_f_score_blast_p(self.pident, self.length, self.mismatch, self.gapopen, self.pstart, self.pend, self.bstart, self.bend, self.evalue, self.bitscore, self.plen, self.blen, self.FK_ppi_couple) self.id_f_score_blast_P = value_f_score_blast return value_f_score_blast
def create_f_score_blast_p_no_verification(self): '\n Insert a f_score_blast in the database WITHOUT ANY VERIFICATION\n \n The id of the f_score_blase is updated\n\n :return: id of the f_score_blast created\n :rtype: int\n ' value_f_score_blast = None sqlObj = _F_score_blast_P_sql() value_f_score_blast = sqlObj.insert_f_score_blast_p(self.pident, self.length, self.mismatch, self.gapopen, self.pstart, self.pend, self.bstart, self.bend, self.evalue, self.bitscore, self.plen, self.blen, self.FK_ppi_couple) self.id_f_score_blast_P = value_f_score_blast return value_f_score_blast<|docstring|>Insert a f_score_blast in the database WITHOUT ANY VERIFICATION The id of the f_score_blase is updated :return: id of the f_score_blast created :rtype: int<|endoftext|>
6941ddee1859777a4013dcf83ac7025940b39fa4deb313f5207460473790f325
def delete_FK_score_bast_P_by_fk_PPI_couple(FK_ppi_couple): '\n remove a FK_score_bast_P given fk_protein\n :NOTE it verify for the phage and bacterium protein\n\n :param id_protein: id of the protein\n\n :type id_protein: int - required\n\n :return: COG_prot it removed\n :rtype: int\n ' sqlObj = _F_score_blast_P_sql() id_couple = sqlObj.remove_F_score_blast_p_by_prot_id(FK_ppi_couple) return id_couple
remove a FK_score_bast_P given fk_protein :NOTE it verify for the phage and bacterium protein :param id_protein: id of the protein :type id_protein: int - required :return: COG_prot it removed :rtype: int
objects_new/F_score_blast_P_new.py
delete_FK_score_bast_P_by_fk_PPI_couple
VictorTruan/inphinity
1
python
def delete_FK_score_bast_P_by_fk_PPI_couple(FK_ppi_couple): '\n remove a FK_score_bast_P given fk_protein\n :NOTE it verify for the phage and bacterium protein\n\n :param id_protein: id of the protein\n\n :type id_protein: int - required\n\n :return: COG_prot it removed\n :rtype: int\n ' sqlObj = _F_score_blast_P_sql() id_couple = sqlObj.remove_F_score_blast_p_by_prot_id(FK_ppi_couple) return id_couple
def delete_FK_score_bast_P_by_fk_PPI_couple(FK_ppi_couple): '\n remove a FK_score_bast_P given fk_protein\n :NOTE it verify for the phage and bacterium protein\n\n :param id_protein: id of the protein\n\n :type id_protein: int - required\n\n :return: COG_prot it removed\n :rtype: int\n ' sqlObj = _F_score_blast_P_sql() id_couple = sqlObj.remove_F_score_blast_p_by_prot_id(FK_ppi_couple) return id_couple<|docstring|>remove a FK_score_bast_P given fk_protein :NOTE it verify for the phage and bacterium protein :param id_protein: id of the protein :type id_protein: int - required :return: COG_prot it removed :rtype: int<|endoftext|>
e5f878e53e4eaa60d0dfe85ff96de1ecd8feb202521e946874c94ab38d48e467
def __str__(self): '\n Overwrite of the str method\n ' message_str = 'ID Blast P: {0:d}, FK PPI couple: {1:d}, Score {4:.3f}, pident: {5:.3f}, length: {6:d}, mismatch: {7:d}, gapopen: {8:d}, pstart: {9:d}, pend: {10:d}, bstart: {11:d}, bend: {12:d}, bitscore: {13:.3f}, plen: {14:d}, blen: {15:d}'.format(self.id_f_score_blast_P, self.FK_ppi_couple, self.evalue, self.pident, self.length, self.mismatch, self.gapopen, self.pstart, self.pend, self.bstart, self.bend, self.bitscore, self.plen, self.blen) return message_str
Overwrite of the str method
objects_new/F_score_blast_P_new.py
__str__
VictorTruan/inphinity
1
python
def __str__(self): '\n \n ' message_str = 'ID Blast P: {0:d}, FK PPI couple: {1:d}, Score {4:.3f}, pident: {5:.3f}, length: {6:d}, mismatch: {7:d}, gapopen: {8:d}, pstart: {9:d}, pend: {10:d}, bstart: {11:d}, bend: {12:d}, bitscore: {13:.3f}, plen: {14:d}, blen: {15:d}'.format(self.id_f_score_blast_P, self.FK_ppi_couple, self.evalue, self.pident, self.length, self.mismatch, self.gapopen, self.pstart, self.pend, self.bstart, self.bend, self.bitscore, self.plen, self.blen) return message_str
def __str__(self): '\n \n ' message_str = 'ID Blast P: {0:d}, FK PPI couple: {1:d}, Score {4:.3f}, pident: {5:.3f}, length: {6:d}, mismatch: {7:d}, gapopen: {8:d}, pstart: {9:d}, pend: {10:d}, bstart: {11:d}, bend: {12:d}, bitscore: {13:.3f}, plen: {14:d}, blen: {15:d}'.format(self.id_f_score_blast_P, self.FK_ppi_couple, self.evalue, self.pident, self.length, self.mismatch, self.gapopen, self.pstart, self.pend, self.bstart, self.bend, self.bitscore, self.plen, self.blen) return message_str<|docstring|>Overwrite of the str method<|endoftext|>
b8fe3cc77519eebcee6adfc1944344e5ef15d79b0d9dadbcac4c12c865452bc8
def initiate_fast_reaction(data_dir): '\n intiate file named "reaction_info_base.json" based on file named "reaction_labelling.csv"\n ' (new_old_index_dict, new_ind_reaction_dict) = psri.parse_reaction_and_its_index(data_dir) rxn_pair_dict = dict() unpaired = dict() for (_, val1) in enumerate(new_old_index_dict): this_value = int(new_old_index_dict[val1]) neg_value = ((- 1) * this_value) if (neg_value in unpaired): rxn_pair_dict.update({int(val1): unpaired[neg_value]}) rxn_pair_dict.update({unpaired[neg_value]: int(val1)}) unpaired.pop(neg_value) else: unpaired.update({this_value: int(val1)}) rxn_info = {} for (_, val1) in enumerate(new_ind_reaction_dict): entry = {str(val1): {'formula': new_ind_reaction_dict[val1], 'time_scale': (- 100), 'reverse_reaction': 'None'}} if (int(val1) in rxn_pair_dict): entry[str(val1)]['reverse_reaction'] = str(rxn_pair_dict[int(val1)]) rxn_info.update(entry) fn0 = os.path.join(data_dir, 'input', 'reaction_info_base_backup.json') fn1 = os.path.join(data_dir, 'input', 'reaction_info_base.json') if os.path.isfile(fn1): copy2(fn1, fn0) rwc.write_configuration(rxn_info, fn1)
intiate file named "reaction_info_base.json" based on file named "reaction_labelling.csv"
chattering.py
initiate_fast_reaction
AdamPI314/catalytic_cycle
0
python
def initiate_fast_reaction(data_dir): '\n \n ' (new_old_index_dict, new_ind_reaction_dict) = psri.parse_reaction_and_its_index(data_dir) rxn_pair_dict = dict() unpaired = dict() for (_, val1) in enumerate(new_old_index_dict): this_value = int(new_old_index_dict[val1]) neg_value = ((- 1) * this_value) if (neg_value in unpaired): rxn_pair_dict.update({int(val1): unpaired[neg_value]}) rxn_pair_dict.update({unpaired[neg_value]: int(val1)}) unpaired.pop(neg_value) else: unpaired.update({this_value: int(val1)}) rxn_info = {} for (_, val1) in enumerate(new_ind_reaction_dict): entry = {str(val1): {'formula': new_ind_reaction_dict[val1], 'time_scale': (- 100), 'reverse_reaction': 'None'}} if (int(val1) in rxn_pair_dict): entry[str(val1)]['reverse_reaction'] = str(rxn_pair_dict[int(val1)]) rxn_info.update(entry) fn0 = os.path.join(data_dir, 'input', 'reaction_info_base_backup.json') fn1 = os.path.join(data_dir, 'input', 'reaction_info_base.json') if os.path.isfile(fn1): copy2(fn1, fn0) rwc.write_configuration(rxn_info, fn1)
def initiate_fast_reaction(data_dir): '\n \n ' (new_old_index_dict, new_ind_reaction_dict) = psri.parse_reaction_and_its_index(data_dir) rxn_pair_dict = dict() unpaired = dict() for (_, val1) in enumerate(new_old_index_dict): this_value = int(new_old_index_dict[val1]) neg_value = ((- 1) * this_value) if (neg_value in unpaired): rxn_pair_dict.update({int(val1): unpaired[neg_value]}) rxn_pair_dict.update({unpaired[neg_value]: int(val1)}) unpaired.pop(neg_value) else: unpaired.update({this_value: int(val1)}) rxn_info = {} for (_, val1) in enumerate(new_ind_reaction_dict): entry = {str(val1): {'formula': new_ind_reaction_dict[val1], 'time_scale': (- 100), 'reverse_reaction': 'None'}} if (int(val1) in rxn_pair_dict): entry[str(val1)]['reverse_reaction'] = str(rxn_pair_dict[int(val1)]) rxn_info.update(entry) fn0 = os.path.join(data_dir, 'input', 'reaction_info_base_backup.json') fn1 = os.path.join(data_dir, 'input', 'reaction_info_base.json') if os.path.isfile(fn1): copy2(fn1, fn0) rwc.write_configuration(rxn_info, fn1)<|docstring|>intiate file named "reaction_info_base.json" based on file named "reaction_labelling.csv"<|endoftext|>
a9191a522b93d3f8386bbc42594464fa73601868cbf5ed643a4bf89c0282c972
def update_fast_reaction(data_dir, tau=0.7, end_t=1.0, tag='M'): '\n update fast reaction based on reference trajectory\n ' fn0 = os.path.join(data_dir, 'input', 'reaction_info_base_backup.json') fn1 = os.path.join(data_dir, 'input', 'reaction_info_base.json') rxn_info = rwc.read_configuration(fn1) time_v = np.loadtxt(os.path.join(data_dir, 'output', (('time_dlsode_' + str(tag)) + '.csv')), delimiter=',') rxn_rates = np.loadtxt(os.path.join(data_dir, 'output', (('reaction_rate_dlsode_' + str(tag)) + '.csv')), delimiter=',') actual_time = (float(tau) * float(end_t)) for (_, val) in enumerate(rxn_info): actual_rate = interpolation.interp1d(time_v, rxn_rates[(:, int(val))], actual_time) if (actual_rate != 0): time_scale = np.log10(actual_rate) if ((time_scale >= (- 100)) and (time_scale <= 100)): rxn_info[val]['time_scale'] = time_scale if os.path.isfile(fn1): copy2(fn1, fn0) rwc.write_configuration(rxn_info, fn1)
update fast reaction based on reference trajectory
chattering.py
update_fast_reaction
AdamPI314/catalytic_cycle
0
python
def update_fast_reaction(data_dir, tau=0.7, end_t=1.0, tag='M'): '\n \n ' fn0 = os.path.join(data_dir, 'input', 'reaction_info_base_backup.json') fn1 = os.path.join(data_dir, 'input', 'reaction_info_base.json') rxn_info = rwc.read_configuration(fn1) time_v = np.loadtxt(os.path.join(data_dir, 'output', (('time_dlsode_' + str(tag)) + '.csv')), delimiter=',') rxn_rates = np.loadtxt(os.path.join(data_dir, 'output', (('reaction_rate_dlsode_' + str(tag)) + '.csv')), delimiter=',') actual_time = (float(tau) * float(end_t)) for (_, val) in enumerate(rxn_info): actual_rate = interpolation.interp1d(time_v, rxn_rates[(:, int(val))], actual_time) if (actual_rate != 0): time_scale = np.log10(actual_rate) if ((time_scale >= (- 100)) and (time_scale <= 100)): rxn_info[val]['time_scale'] = time_scale if os.path.isfile(fn1): copy2(fn1, fn0) rwc.write_configuration(rxn_info, fn1)
def update_fast_reaction(data_dir, tau=0.7, end_t=1.0, tag='M'): '\n \n ' fn0 = os.path.join(data_dir, 'input', 'reaction_info_base_backup.json') fn1 = os.path.join(data_dir, 'input', 'reaction_info_base.json') rxn_info = rwc.read_configuration(fn1) time_v = np.loadtxt(os.path.join(data_dir, 'output', (('time_dlsode_' + str(tag)) + '.csv')), delimiter=',') rxn_rates = np.loadtxt(os.path.join(data_dir, 'output', (('reaction_rate_dlsode_' + str(tag)) + '.csv')), delimiter=',') actual_time = (float(tau) * float(end_t)) for (_, val) in enumerate(rxn_info): actual_rate = interpolation.interp1d(time_v, rxn_rates[(:, int(val))], actual_time) if (actual_rate != 0): time_scale = np.log10(actual_rate) if ((time_scale >= (- 100)) and (time_scale <= 100)): rxn_info[val]['time_scale'] = time_scale if os.path.isfile(fn1): copy2(fn1, fn0) rwc.write_configuration(rxn_info, fn1)<|docstring|>update fast reaction based on reference trajectory<|endoftext|>
15027d2bac890f10649e11c3cd7c500cbd696c344a57ec7a8b72ea0b865ae1c2
def fast_reaction_w2f(data_dir, threshold=(- 7)): '\n prepare "fast_reaction.json" file, this file will be\n 1) manually changed later\n 2) used to generate chattering information later\n ' fast_transition = {} counter = 0 fn_rib = os.path.join(data_dir, 'input', 'reaction_info_base.json') rxn_info = rwc.read_configuration(fn_rib) unpaired_fast_rxn = set() for (_, val) in enumerate(rxn_info): if ((rxn_info[val]['reverse_reaction'] != 'None') and (float(rxn_info[val]['time_scale']) >= threshold)): this_rxn = str(val) paired_rxn = rxn_info[val]['reverse_reaction'] if (paired_rxn not in unpaired_fast_rxn): unpaired_fast_rxn.add(this_rxn) else: entry = {str(counter): {'formula1': rxn_info[paired_rxn]['formula'], 'formula2': rxn_info[this_rxn]['formula'], 'reaction1': int(paired_rxn), 'reaction2': int(this_rxn)}} fast_transition.update(entry) counter += 1 fn_frb0 = os.path.join(data_dir, 'input', 'fast_reaction_base_backup.json') fn_frb1 = os.path.join(data_dir, 'input', 'fast_reaction_base.json') if os.path.isfile(fn_frb1): copy2(fn_frb1, fn_frb0) rwc.write_configuration(fast_transition, fn_frb1)
prepare "fast_reaction.json" file, this file will be 1) manually changed later 2) used to generate chattering information later
chattering.py
fast_reaction_w2f
AdamPI314/catalytic_cycle
0
python
def fast_reaction_w2f(data_dir, threshold=(- 7)): '\n prepare "fast_reaction.json" file, this file will be\n 1) manually changed later\n 2) used to generate chattering information later\n ' fast_transition = {} counter = 0 fn_rib = os.path.join(data_dir, 'input', 'reaction_info_base.json') rxn_info = rwc.read_configuration(fn_rib) unpaired_fast_rxn = set() for (_, val) in enumerate(rxn_info): if ((rxn_info[val]['reverse_reaction'] != 'None') and (float(rxn_info[val]['time_scale']) >= threshold)): this_rxn = str(val) paired_rxn = rxn_info[val]['reverse_reaction'] if (paired_rxn not in unpaired_fast_rxn): unpaired_fast_rxn.add(this_rxn) else: entry = {str(counter): {'formula1': rxn_info[paired_rxn]['formula'], 'formula2': rxn_info[this_rxn]['formula'], 'reaction1': int(paired_rxn), 'reaction2': int(this_rxn)}} fast_transition.update(entry) counter += 1 fn_frb0 = os.path.join(data_dir, 'input', 'fast_reaction_base_backup.json') fn_frb1 = os.path.join(data_dir, 'input', 'fast_reaction_base.json') if os.path.isfile(fn_frb1): copy2(fn_frb1, fn_frb0) rwc.write_configuration(fast_transition, fn_frb1)
def fast_reaction_w2f(data_dir, threshold=(- 7)): '\n prepare "fast_reaction.json" file, this file will be\n 1) manually changed later\n 2) used to generate chattering information later\n ' fast_transition = {} counter = 0 fn_rib = os.path.join(data_dir, 'input', 'reaction_info_base.json') rxn_info = rwc.read_configuration(fn_rib) unpaired_fast_rxn = set() for (_, val) in enumerate(rxn_info): if ((rxn_info[val]['reverse_reaction'] != 'None') and (float(rxn_info[val]['time_scale']) >= threshold)): this_rxn = str(val) paired_rxn = rxn_info[val]['reverse_reaction'] if (paired_rxn not in unpaired_fast_rxn): unpaired_fast_rxn.add(this_rxn) else: entry = {str(counter): {'formula1': rxn_info[paired_rxn]['formula'], 'formula2': rxn_info[this_rxn]['formula'], 'reaction1': int(paired_rxn), 'reaction2': int(this_rxn)}} fast_transition.update(entry) counter += 1 fn_frb0 = os.path.join(data_dir, 'input', 'fast_reaction_base_backup.json') fn_frb1 = os.path.join(data_dir, 'input', 'fast_reaction_base.json') if os.path.isfile(fn_frb1): copy2(fn_frb1, fn_frb0) rwc.write_configuration(fast_transition, fn_frb1)<|docstring|>prepare "fast_reaction.json" file, this file will be 1) manually changed later 2) used to generate chattering information later<|endoftext|>
8b54f2b56d8880b3df451d2ae1a5a73e523236b0d43e21b0778a3114a6651eb9
def generate_fast_rxn_chattering_spe(data_dir): '\n generate fast reaction and chattering species based on four files\n 0) species_information.json\n 1) reaction_information.json\n 2) atom_scheme.json\n 3) fast_reaction_base.json\n\n save file named "fast_transition.json", this file will be used to update file\n "local_settings.py" manually\n ' f_n_si = os.path.join(data_dir, 'input', 'species_information.json') f_n_ri = os.path.join(data_dir, 'input', 'reaction_information.json') f_n_as = os.path.join(data_dir, 'input', 'atom_scheme.json') f_n_frb = os.path.join(data_dir, 'input', 'fast_reaction_base.json') spe_info = rwc.read_configuration(f_n_si) rxn_info = rwc.read_configuration(f_n_ri) atom_scheme = rwc.read_configuration(f_n_as) fast_rxn_base = rwc.read_configuration(f_n_frb) fast_transition = [] for (_, val1) in enumerate(fast_rxn_base): entry = {} rxn_1_idx = fast_rxn_base[val1]['reaction1'] rxn_2_idx = fast_rxn_base[val1]['reaction2'] reactant1 = rxn_info[str(rxn_1_idx)]['net_reactant'] reactant2 = rxn_info[str(rxn_2_idx)]['net_reactant'] entry.update({'formula1': fast_rxn_base[val1]['formula1']}) entry.update({'formula2': fast_rxn_base[val1]['formula2']}) entry.update({'rxn': [int(rxn_1_idx), int(rxn_2_idx)]}) entry.update({'spe': {}}) s_1_idx = 'None' s_2_idx = 'None' for atom_followed in atom_scheme: for (_, val2) in enumerate(reactant1): spe_idx = reactant1[val2]['species_index'] spe_name = spe_info[spe_idx]['name'] if (spe_name in atom_scheme[atom_followed]): s_1_idx = str(spe_idx) break for (_, val2) in enumerate(reactant2): spe_idx = reactant2[val2]['species_index'] spe_name = spe_info[spe_idx]['name'] if (spe_name in atom_scheme[atom_followed]): s_2_idx = str(spe_idx) break if ((s_1_idx != 'None') and (s_2_idx != 'None')): entry['spe'].update({atom_followed: [int(s_1_idx), int(s_2_idx)]}) fast_transition.append(entry) fn_ft0 = os.path.join(data_dir, 'input', 'fast_transition_backup.json') fn_ft1 = os.path.join(data_dir, 'input', 'fast_transition.json') if os.path.isfile(fn_ft1): copy2(fn_ft1, fn_ft0) rwc.write_configuration(fast_transition, fn_ft1)
generate fast reaction and chattering species based on four files 0) species_information.json 1) reaction_information.json 2) atom_scheme.json 3) fast_reaction_base.json save file named "fast_transition.json", this file will be used to update file "local_settings.py" manually
chattering.py
generate_fast_rxn_chattering_spe
AdamPI314/catalytic_cycle
0
python
def generate_fast_rxn_chattering_spe(data_dir): '\n generate fast reaction and chattering species based on four files\n 0) species_information.json\n 1) reaction_information.json\n 2) atom_scheme.json\n 3) fast_reaction_base.json\n\n save file named "fast_transition.json", this file will be used to update file\n "local_settings.py" manually\n ' f_n_si = os.path.join(data_dir, 'input', 'species_information.json') f_n_ri = os.path.join(data_dir, 'input', 'reaction_information.json') f_n_as = os.path.join(data_dir, 'input', 'atom_scheme.json') f_n_frb = os.path.join(data_dir, 'input', 'fast_reaction_base.json') spe_info = rwc.read_configuration(f_n_si) rxn_info = rwc.read_configuration(f_n_ri) atom_scheme = rwc.read_configuration(f_n_as) fast_rxn_base = rwc.read_configuration(f_n_frb) fast_transition = [] for (_, val1) in enumerate(fast_rxn_base): entry = {} rxn_1_idx = fast_rxn_base[val1]['reaction1'] rxn_2_idx = fast_rxn_base[val1]['reaction2'] reactant1 = rxn_info[str(rxn_1_idx)]['net_reactant'] reactant2 = rxn_info[str(rxn_2_idx)]['net_reactant'] entry.update({'formula1': fast_rxn_base[val1]['formula1']}) entry.update({'formula2': fast_rxn_base[val1]['formula2']}) entry.update({'rxn': [int(rxn_1_idx), int(rxn_2_idx)]}) entry.update({'spe': {}}) s_1_idx = 'None' s_2_idx = 'None' for atom_followed in atom_scheme: for (_, val2) in enumerate(reactant1): spe_idx = reactant1[val2]['species_index'] spe_name = spe_info[spe_idx]['name'] if (spe_name in atom_scheme[atom_followed]): s_1_idx = str(spe_idx) break for (_, val2) in enumerate(reactant2): spe_idx = reactant2[val2]['species_index'] spe_name = spe_info[spe_idx]['name'] if (spe_name in atom_scheme[atom_followed]): s_2_idx = str(spe_idx) break if ((s_1_idx != 'None') and (s_2_idx != 'None')): entry['spe'].update({atom_followed: [int(s_1_idx), int(s_2_idx)]}) fast_transition.append(entry) fn_ft0 = os.path.join(data_dir, 'input', 'fast_transition_backup.json') fn_ft1 = os.path.join(data_dir, 'input', 'fast_transition.json') if os.path.isfile(fn_ft1): copy2(fn_ft1, fn_ft0) rwc.write_configuration(fast_transition, fn_ft1)
def generate_fast_rxn_chattering_spe(data_dir): '\n generate fast reaction and chattering species based on four files\n 0) species_information.json\n 1) reaction_information.json\n 2) atom_scheme.json\n 3) fast_reaction_base.json\n\n save file named "fast_transition.json", this file will be used to update file\n "local_settings.py" manually\n ' f_n_si = os.path.join(data_dir, 'input', 'species_information.json') f_n_ri = os.path.join(data_dir, 'input', 'reaction_information.json') f_n_as = os.path.join(data_dir, 'input', 'atom_scheme.json') f_n_frb = os.path.join(data_dir, 'input', 'fast_reaction_base.json') spe_info = rwc.read_configuration(f_n_si) rxn_info = rwc.read_configuration(f_n_ri) atom_scheme = rwc.read_configuration(f_n_as) fast_rxn_base = rwc.read_configuration(f_n_frb) fast_transition = [] for (_, val1) in enumerate(fast_rxn_base): entry = {} rxn_1_idx = fast_rxn_base[val1]['reaction1'] rxn_2_idx = fast_rxn_base[val1]['reaction2'] reactant1 = rxn_info[str(rxn_1_idx)]['net_reactant'] reactant2 = rxn_info[str(rxn_2_idx)]['net_reactant'] entry.update({'formula1': fast_rxn_base[val1]['formula1']}) entry.update({'formula2': fast_rxn_base[val1]['formula2']}) entry.update({'rxn': [int(rxn_1_idx), int(rxn_2_idx)]}) entry.update({'spe': {}}) s_1_idx = 'None' s_2_idx = 'None' for atom_followed in atom_scheme: for (_, val2) in enumerate(reactant1): spe_idx = reactant1[val2]['species_index'] spe_name = spe_info[spe_idx]['name'] if (spe_name in atom_scheme[atom_followed]): s_1_idx = str(spe_idx) break for (_, val2) in enumerate(reactant2): spe_idx = reactant2[val2]['species_index'] spe_name = spe_info[spe_idx]['name'] if (spe_name in atom_scheme[atom_followed]): s_2_idx = str(spe_idx) break if ((s_1_idx != 'None') and (s_2_idx != 'None')): entry['spe'].update({atom_followed: [int(s_1_idx), int(s_2_idx)]}) fast_transition.append(entry) fn_ft0 = os.path.join(data_dir, 'input', 'fast_transition_backup.json') fn_ft1 = os.path.join(data_dir, 'input', 'fast_transition.json') if os.path.isfile(fn_ft1): copy2(fn_ft1, fn_ft0) rwc.write_configuration(fast_transition, fn_ft1)<|docstring|>generate fast reaction and chattering species based on four files 0) species_information.json 1) reaction_information.json 2) atom_scheme.json 3) fast_reaction_base.json save file named "fast_transition.json", this file will be used to update file "local_settings.py" manually<|endoftext|>
69bd15726f77e59536908b362607a70bfc7ce92a9aa20b241c2ecab8750c2a82
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info(self, mock_fetch): 'Verify that get_community_info returns data from the cache' config = OrgConfig({}, 'test') config._community_info_cache = {'Kōkua': {'name': 'Kōkua'}} info = config.get_community_info('Kōkua') self.assertEqual(info['name'], 'Kōkua') mock_fetch.assert_not_called()
Verify that get_community_info returns data from the cache
cumulusci/core/tests/test_config.py
test_community_info
umeditor/CumulusCI
0
python
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info(self, mock_fetch): config = OrgConfig({}, 'test') config._community_info_cache = {'Kōkua': {'name': 'Kōkua'}} info = config.get_community_info('Kōkua') self.assertEqual(info['name'], 'Kōkua') mock_fetch.assert_not_called()
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info(self, mock_fetch): config = OrgConfig({}, 'test') config._community_info_cache = {'Kōkua': {'name': 'Kōkua'}} info = config.get_community_info('Kōkua') self.assertEqual(info['name'], 'Kōkua') mock_fetch.assert_not_called()<|docstring|>Verify that get_community_info returns data from the cache<|endoftext|>
8ae7373fa240137a710534a98c2d29229285316e73afac3fc22040e84f457e79
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_auto_refresh_cache(self, mock_fetch): 'Verify that the internal cache is automatically refreshed\n\n The cache should be refreshed automatically if the requested community\n is not in the cache.\n ' mock_fetch.return_value = {'Kōkua': {'name': 'Kōkua'}} config = OrgConfig({}, 'test') config._community_info_cache = {} info = config.get_community_info('Kōkua') mock_fetch.assert_called() self.assertEqual(info['name'], 'Kōkua')
Verify that the internal cache is automatically refreshed The cache should be refreshed automatically if the requested community is not in the cache.
cumulusci/core/tests/test_config.py
test_community_info_auto_refresh_cache
umeditor/CumulusCI
0
python
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_auto_refresh_cache(self, mock_fetch): 'Verify that the internal cache is automatically refreshed\n\n The cache should be refreshed automatically if the requested community\n is not in the cache.\n ' mock_fetch.return_value = {'Kōkua': {'name': 'Kōkua'}} config = OrgConfig({}, 'test') config._community_info_cache = {} info = config.get_community_info('Kōkua') mock_fetch.assert_called() self.assertEqual(info['name'], 'Kōkua')
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_auto_refresh_cache(self, mock_fetch): 'Verify that the internal cache is automatically refreshed\n\n The cache should be refreshed automatically if the requested community\n is not in the cache.\n ' mock_fetch.return_value = {'Kōkua': {'name': 'Kōkua'}} config = OrgConfig({}, 'test') config._community_info_cache = {} info = config.get_community_info('Kōkua') mock_fetch.assert_called() self.assertEqual(info['name'], 'Kōkua')<|docstring|>Verify that the internal cache is automatically refreshed The cache should be refreshed automatically if the requested community is not in the cache.<|endoftext|>
054229845869646a1174de896882da57969d586ae38576c5be526c07dde47e5a
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_force_refresh(self, mock_fetch): 'Verify that the force_refresh parameter has an effect' mock_fetch.return_value = {'Kōkua': {'name': 'Kōkua'}} config = OrgConfig({}, 'test') config._community_info_cache = {'Kōkua': {'name': 'Kōkua'}} config.get_community_info('Kōkua') mock_fetch.assert_not_called() config.get_community_info('Kōkua', force_refresh=True) mock_fetch.assert_called()
Verify that the force_refresh parameter has an effect
cumulusci/core/tests/test_config.py
test_community_info_force_refresh
umeditor/CumulusCI
0
python
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_force_refresh(self, mock_fetch): mock_fetch.return_value = {'Kōkua': {'name': 'Kōkua'}} config = OrgConfig({}, 'test') config._community_info_cache = {'Kōkua': {'name': 'Kōkua'}} config.get_community_info('Kōkua') mock_fetch.assert_not_called() config.get_community_info('Kōkua', force_refresh=True) mock_fetch.assert_called()
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_force_refresh(self, mock_fetch): mock_fetch.return_value = {'Kōkua': {'name': 'Kōkua'}} config = OrgConfig({}, 'test') config._community_info_cache = {'Kōkua': {'name': 'Kōkua'}} config.get_community_info('Kōkua') mock_fetch.assert_not_called() config.get_community_info('Kōkua', force_refresh=True) mock_fetch.assert_called()<|docstring|>Verify that the force_refresh parameter has an effect<|endoftext|>
e2ac3bedf1b2493fd49b1b40b156a53132f05f2d0fc97de47e1164a60a9d6b0b
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_exception(self, mock_fetch): "Verify an exception is thrown when the community doesn't exist" config = OrgConfig({}, 'test') expected_exception = "Unable to find community information for 'bogus'" with self.assertRaisesRegex(Exception, expected_exception): config.get_community_info('bogus')
Verify an exception is thrown when the community doesn't exist
cumulusci/core/tests/test_config.py
test_community_info_exception
umeditor/CumulusCI
0
python
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_exception(self, mock_fetch): config = OrgConfig({}, 'test') expected_exception = "Unable to find community information for 'bogus'" with self.assertRaisesRegex(Exception, expected_exception): config.get_community_info('bogus')
@mock.patch('cumulusci.core.config.OrgConfig._fetch_community_info') def test_community_info_exception(self, mock_fetch): config = OrgConfig({}, 'test') expected_exception = "Unable to find community information for 'bogus'" with self.assertRaisesRegex(Exception, expected_exception): config.get_community_info('bogus')<|docstring|>Verify an exception is thrown when the community doesn't exist<|endoftext|>
664b539b8223fc592fd46c2cb04b2bf8fbf869000a9c3e45e8acdd494aaf8ae7
def make_encoder(base_depth, activation, latent_size, code_size): 'Creates the encoder function.\n\n Args:\n base_depth: Layer base depth in encoder net.\n activation: Activation function in hidden layers.\n latent_size: The number of latent variables in the code.\n code_size: The dimensionality of each latent variable.\n\n Returns:\n encoder: A `callable` mapping a `Tensor` of images to a `Tensor` of shape\n `[..., latent_size, code_size]`.\n ' conv = functools.partial(tf.keras.layers.Conv2D, padding='SAME', activation=activation) encoder_net = tf.keras.Sequential([conv(base_depth, 5, 1), conv(base_depth, 5, 2), conv((2 * base_depth), 5, 1), conv((2 * base_depth), 5, 2), conv((4 * latent_size), 7, padding='VALID'), tf.keras.layers.Flatten(), tf.keras.layers.Dense((latent_size * code_size), activation=None), tf.keras.layers.Reshape([latent_size, code_size])]) def encoder(images): 'Encodes a batch of images.\n\n Args:\n images: A `Tensor` representing the inputs to be encoded, of shape `[...,\n channels]`.\n\n Returns:\n codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`.\n It represents latent vectors to be matched with the codebook.\n ' images = ((2 * tf.cast(images, dtype=tf.float32)) - 1) codes = encoder_net(images) return codes return encoder
Creates the encoder function. Args: base_depth: Layer base depth in encoder net. activation: Activation function in hidden layers. latent_size: The number of latent variables in the code. code_size: The dimensionality of each latent variable. Returns: encoder: A `callable` mapping a `Tensor` of images to a `Tensor` of shape `[..., latent_size, code_size]`.
tensorflow_probability/examples/vq_vae.py
make_encoder
joshchang/probability
3,670
python
def make_encoder(base_depth, activation, latent_size, code_size): 'Creates the encoder function.\n\n Args:\n base_depth: Layer base depth in encoder net.\n activation: Activation function in hidden layers.\n latent_size: The number of latent variables in the code.\n code_size: The dimensionality of each latent variable.\n\n Returns:\n encoder: A `callable` mapping a `Tensor` of images to a `Tensor` of shape\n `[..., latent_size, code_size]`.\n ' conv = functools.partial(tf.keras.layers.Conv2D, padding='SAME', activation=activation) encoder_net = tf.keras.Sequential([conv(base_depth, 5, 1), conv(base_depth, 5, 2), conv((2 * base_depth), 5, 1), conv((2 * base_depth), 5, 2), conv((4 * latent_size), 7, padding='VALID'), tf.keras.layers.Flatten(), tf.keras.layers.Dense((latent_size * code_size), activation=None), tf.keras.layers.Reshape([latent_size, code_size])]) def encoder(images): 'Encodes a batch of images.\n\n Args:\n images: A `Tensor` representing the inputs to be encoded, of shape `[...,\n channels]`.\n\n Returns:\n codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`.\n It represents latent vectors to be matched with the codebook.\n ' images = ((2 * tf.cast(images, dtype=tf.float32)) - 1) codes = encoder_net(images) return codes return encoder
def make_encoder(base_depth, activation, latent_size, code_size): 'Creates the encoder function.\n\n Args:\n base_depth: Layer base depth in encoder net.\n activation: Activation function in hidden layers.\n latent_size: The number of latent variables in the code.\n code_size: The dimensionality of each latent variable.\n\n Returns:\n encoder: A `callable` mapping a `Tensor` of images to a `Tensor` of shape\n `[..., latent_size, code_size]`.\n ' conv = functools.partial(tf.keras.layers.Conv2D, padding='SAME', activation=activation) encoder_net = tf.keras.Sequential([conv(base_depth, 5, 1), conv(base_depth, 5, 2), conv((2 * base_depth), 5, 1), conv((2 * base_depth), 5, 2), conv((4 * latent_size), 7, padding='VALID'), tf.keras.layers.Flatten(), tf.keras.layers.Dense((latent_size * code_size), activation=None), tf.keras.layers.Reshape([latent_size, code_size])]) def encoder(images): 'Encodes a batch of images.\n\n Args:\n images: A `Tensor` representing the inputs to be encoded, of shape `[...,\n channels]`.\n\n Returns:\n codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`.\n It represents latent vectors to be matched with the codebook.\n ' images = ((2 * tf.cast(images, dtype=tf.float32)) - 1) codes = encoder_net(images) return codes return encoder<|docstring|>Creates the encoder function. Args: base_depth: Layer base depth in encoder net. activation: Activation function in hidden layers. latent_size: The number of latent variables in the code. code_size: The dimensionality of each latent variable. Returns: encoder: A `callable` mapping a `Tensor` of images to a `Tensor` of shape `[..., latent_size, code_size]`.<|endoftext|>
ef0fefb155d8af46466b5dc79b4941010c68e9870db1a9ed1239152dd1692529
def make_decoder(base_depth, activation, input_size, output_shape): 'Creates the decoder function.\n\n Args:\n base_depth: Layer base depth in decoder net.\n activation: Activation function in hidden layers.\n input_size: The flattened latent input shape as an int.\n output_shape: The output image shape as a list.\n\n Returns:\n decoder: A `callable` mapping a `Tensor` of encodings to a\n `tfd.Distribution` instance over images.\n ' deconv = functools.partial(tf.keras.layers.Conv2DTranspose, padding='SAME', activation=activation) conv = functools.partial(tf.keras.layers.Conv2D, padding='SAME', activation=activation) decoder_net = tf.keras.Sequential([tf.keras.layers.Reshape((1, 1, input_size)), deconv((2 * base_depth), 7, padding='VALID'), deconv((2 * base_depth), 5), deconv((2 * base_depth), 5, 2), deconv(base_depth, 5), deconv(base_depth, 5, 2), deconv(base_depth, 5), conv(output_shape[(- 1)], 5, activation=None), tf.keras.layers.Reshape(output_shape)]) def decoder(codes): 'Builds a distribution over images given codes.\n\n Args:\n codes: A `Tensor` representing the inputs to be decoded, of shape `[...,\n code_size]`.\n\n Returns:\n decoder_distribution: A multivariate `Bernoulli` distribution.\n ' logits = decoder_net(codes) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name='decoder_distribution') return decoder
Creates the decoder function. Args: base_depth: Layer base depth in decoder net. activation: Activation function in hidden layers. input_size: The flattened latent input shape as an int. output_shape: The output image shape as a list. Returns: decoder: A `callable` mapping a `Tensor` of encodings to a `tfd.Distribution` instance over images.
tensorflow_probability/examples/vq_vae.py
make_decoder
joshchang/probability
3,670
python
def make_decoder(base_depth, activation, input_size, output_shape): 'Creates the decoder function.\n\n Args:\n base_depth: Layer base depth in decoder net.\n activation: Activation function in hidden layers.\n input_size: The flattened latent input shape as an int.\n output_shape: The output image shape as a list.\n\n Returns:\n decoder: A `callable` mapping a `Tensor` of encodings to a\n `tfd.Distribution` instance over images.\n ' deconv = functools.partial(tf.keras.layers.Conv2DTranspose, padding='SAME', activation=activation) conv = functools.partial(tf.keras.layers.Conv2D, padding='SAME', activation=activation) decoder_net = tf.keras.Sequential([tf.keras.layers.Reshape((1, 1, input_size)), deconv((2 * base_depth), 7, padding='VALID'), deconv((2 * base_depth), 5), deconv((2 * base_depth), 5, 2), deconv(base_depth, 5), deconv(base_depth, 5, 2), deconv(base_depth, 5), conv(output_shape[(- 1)], 5, activation=None), tf.keras.layers.Reshape(output_shape)]) def decoder(codes): 'Builds a distribution over images given codes.\n\n Args:\n codes: A `Tensor` representing the inputs to be decoded, of shape `[...,\n code_size]`.\n\n Returns:\n decoder_distribution: A multivariate `Bernoulli` distribution.\n ' logits = decoder_net(codes) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name='decoder_distribution') return decoder
def make_decoder(base_depth, activation, input_size, output_shape): 'Creates the decoder function.\n\n Args:\n base_depth: Layer base depth in decoder net.\n activation: Activation function in hidden layers.\n input_size: The flattened latent input shape as an int.\n output_shape: The output image shape as a list.\n\n Returns:\n decoder: A `callable` mapping a `Tensor` of encodings to a\n `tfd.Distribution` instance over images.\n ' deconv = functools.partial(tf.keras.layers.Conv2DTranspose, padding='SAME', activation=activation) conv = functools.partial(tf.keras.layers.Conv2D, padding='SAME', activation=activation) decoder_net = tf.keras.Sequential([tf.keras.layers.Reshape((1, 1, input_size)), deconv((2 * base_depth), 7, padding='VALID'), deconv((2 * base_depth), 5), deconv((2 * base_depth), 5, 2), deconv(base_depth, 5), deconv(base_depth, 5, 2), deconv(base_depth, 5), conv(output_shape[(- 1)], 5, activation=None), tf.keras.layers.Reshape(output_shape)]) def decoder(codes): 'Builds a distribution over images given codes.\n\n Args:\n codes: A `Tensor` representing the inputs to be decoded, of shape `[...,\n code_size]`.\n\n Returns:\n decoder_distribution: A multivariate `Bernoulli` distribution.\n ' logits = decoder_net(codes) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name='decoder_distribution') return decoder<|docstring|>Creates the decoder function. Args: base_depth: Layer base depth in decoder net. activation: Activation function in hidden layers. input_size: The flattened latent input shape as an int. output_shape: The output image shape as a list. Returns: decoder: A `callable` mapping a `Tensor` of encodings to a `tfd.Distribution` instance over images.<|endoftext|>
2b6863548693e74f7db1049f824a3a9c47e476006abdda13c6eb6db15c85ad40
def add_ema_control_dependencies(vector_quantizer, one_hot_assignments, codes, commitment_loss, decay): 'Add control dependencies to the commmitment loss to update the codebook.\n\n Args:\n vector_quantizer: An instance of the VectorQuantizer class.\n one_hot_assignments: The one-hot vectors corresponding to the matched\n codebook entry for each code in the batch.\n codes: A `float`-like `Tensor` containing the latent vectors to be compared\n to the codebook.\n commitment_loss: The commitment loss from comparing the encoder outputs to\n their neighboring codebook entries.\n decay: Decay factor for exponential moving average.\n\n Returns:\n commitment_loss: Commitment loss with control dependencies.\n ' updated_ema_count = moving_averages.assign_moving_average(vector_quantizer.ema_count, tf.reduce_sum(input_tensor=one_hot_assignments, axis=[0, 1]), decay, zero_debias=False) updated_ema_means = moving_averages.assign_moving_average(vector_quantizer.ema_means, tf.reduce_sum(input_tensor=(tf.expand_dims(codes, 2) * tf.expand_dims(one_hot_assignments, 3)), axis=[0, 1]), decay, zero_debias=False) perturbed_ema_count = (updated_ema_count + 1e-05) with tf.control_dependencies([commitment_loss]): update_means = tf.compat.v1.assign(vector_quantizer.codebook, (updated_ema_means / perturbed_ema_count[(..., tf.newaxis)])) with tf.control_dependencies([update_means]): return tf.identity(commitment_loss)
Add control dependencies to the commmitment loss to update the codebook. Args: vector_quantizer: An instance of the VectorQuantizer class. one_hot_assignments: The one-hot vectors corresponding to the matched codebook entry for each code in the batch. codes: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook. commitment_loss: The commitment loss from comparing the encoder outputs to their neighboring codebook entries. decay: Decay factor for exponential moving average. Returns: commitment_loss: Commitment loss with control dependencies.
tensorflow_probability/examples/vq_vae.py
add_ema_control_dependencies
joshchang/probability
3,670
python
def add_ema_control_dependencies(vector_quantizer, one_hot_assignments, codes, commitment_loss, decay): 'Add control dependencies to the commmitment loss to update the codebook.\n\n Args:\n vector_quantizer: An instance of the VectorQuantizer class.\n one_hot_assignments: The one-hot vectors corresponding to the matched\n codebook entry for each code in the batch.\n codes: A `float`-like `Tensor` containing the latent vectors to be compared\n to the codebook.\n commitment_loss: The commitment loss from comparing the encoder outputs to\n their neighboring codebook entries.\n decay: Decay factor for exponential moving average.\n\n Returns:\n commitment_loss: Commitment loss with control dependencies.\n ' updated_ema_count = moving_averages.assign_moving_average(vector_quantizer.ema_count, tf.reduce_sum(input_tensor=one_hot_assignments, axis=[0, 1]), decay, zero_debias=False) updated_ema_means = moving_averages.assign_moving_average(vector_quantizer.ema_means, tf.reduce_sum(input_tensor=(tf.expand_dims(codes, 2) * tf.expand_dims(one_hot_assignments, 3)), axis=[0, 1]), decay, zero_debias=False) perturbed_ema_count = (updated_ema_count + 1e-05) with tf.control_dependencies([commitment_loss]): update_means = tf.compat.v1.assign(vector_quantizer.codebook, (updated_ema_means / perturbed_ema_count[(..., tf.newaxis)])) with tf.control_dependencies([update_means]): return tf.identity(commitment_loss)
def add_ema_control_dependencies(vector_quantizer, one_hot_assignments, codes, commitment_loss, decay): 'Add control dependencies to the commmitment loss to update the codebook.\n\n Args:\n vector_quantizer: An instance of the VectorQuantizer class.\n one_hot_assignments: The one-hot vectors corresponding to the matched\n codebook entry for each code in the batch.\n codes: A `float`-like `Tensor` containing the latent vectors to be compared\n to the codebook.\n commitment_loss: The commitment loss from comparing the encoder outputs to\n their neighboring codebook entries.\n decay: Decay factor for exponential moving average.\n\n Returns:\n commitment_loss: Commitment loss with control dependencies.\n ' updated_ema_count = moving_averages.assign_moving_average(vector_quantizer.ema_count, tf.reduce_sum(input_tensor=one_hot_assignments, axis=[0, 1]), decay, zero_debias=False) updated_ema_means = moving_averages.assign_moving_average(vector_quantizer.ema_means, tf.reduce_sum(input_tensor=(tf.expand_dims(codes, 2) * tf.expand_dims(one_hot_assignments, 3)), axis=[0, 1]), decay, zero_debias=False) perturbed_ema_count = (updated_ema_count + 1e-05) with tf.control_dependencies([commitment_loss]): update_means = tf.compat.v1.assign(vector_quantizer.codebook, (updated_ema_means / perturbed_ema_count[(..., tf.newaxis)])) with tf.control_dependencies([update_means]): return tf.identity(commitment_loss)<|docstring|>Add control dependencies to the commmitment loss to update the codebook. Args: vector_quantizer: An instance of the VectorQuantizer class. one_hot_assignments: The one-hot vectors corresponding to the matched codebook entry for each code in the batch. codes: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook. commitment_loss: The commitment loss from comparing the encoder outputs to their neighboring codebook entries. decay: Decay factor for exponential moving average. Returns: commitment_loss: Commitment loss with control dependencies.<|endoftext|>
302bfa9a9afb405d410a4e7b00d08191f0294029947b2e90dbd870161eb06ed2
def save_imgs(x, fname): 'Helper method to save a grid of images to a PNG file.\n\n Args:\n x: A numpy array of shape [n_images, height, width].\n fname: The filename to write to (including extension).\n ' n = x.shape[0] fig = figure.Figure(figsize=(n, 1), frameon=False) canvas = backend_agg.FigureCanvasAgg(fig) for i in range(n): ax = fig.add_subplot(1, n, (i + 1)) ax.imshow(x[i].squeeze(), interpolation='none', cmap=cm.get_cmap('binary')) ax.axis('off') canvas.print_figure(fname, format='png') print(('saved %s' % fname))
Helper method to save a grid of images to a PNG file. Args: x: A numpy array of shape [n_images, height, width]. fname: The filename to write to (including extension).
tensorflow_probability/examples/vq_vae.py
save_imgs
joshchang/probability
3,670
python
def save_imgs(x, fname): 'Helper method to save a grid of images to a PNG file.\n\n Args:\n x: A numpy array of shape [n_images, height, width].\n fname: The filename to write to (including extension).\n ' n = x.shape[0] fig = figure.Figure(figsize=(n, 1), frameon=False) canvas = backend_agg.FigureCanvasAgg(fig) for i in range(n): ax = fig.add_subplot(1, n, (i + 1)) ax.imshow(x[i].squeeze(), interpolation='none', cmap=cm.get_cmap('binary')) ax.axis('off') canvas.print_figure(fname, format='png') print(('saved %s' % fname))
def save_imgs(x, fname): 'Helper method to save a grid of images to a PNG file.\n\n Args:\n x: A numpy array of shape [n_images, height, width].\n fname: The filename to write to (including extension).\n ' n = x.shape[0] fig = figure.Figure(figsize=(n, 1), frameon=False) canvas = backend_agg.FigureCanvasAgg(fig) for i in range(n): ax = fig.add_subplot(1, n, (i + 1)) ax.imshow(x[i].squeeze(), interpolation='none', cmap=cm.get_cmap('binary')) ax.axis('off') canvas.print_figure(fname, format='png') print(('saved %s' % fname))<|docstring|>Helper method to save a grid of images to a PNG file. Args: x: A numpy array of shape [n_images, height, width]. fname: The filename to write to (including extension).<|endoftext|>
cff8bb1b9d42de871129e45159bd6bb3e41d06ac558bcc9d53ab6cadd4b799af
def visualize_training(images_val, reconstructed_images_val, random_images_val, log_dir, prefix, viz_n=10): 'Helper method to save images visualizing model reconstructions.\n\n Args:\n images_val: Numpy array containing a batch of input images.\n reconstructed_images_val: Numpy array giving the expected output\n (mean) of the decoder.\n random_images_val: Optionally, a Numpy array giving the expected output\n (mean) of decoding samples from the prior, or `None`.\n log_dir: The directory to write images (Python `str`).\n prefix: A specific label for the saved visualizations, which\n determines their filenames (Python `str`).\n viz_n: The number of images from each batch to visualize (Python `int`).\n ' save_imgs(images_val[:viz_n], os.path.join(log_dir, '{}_inputs.png'.format(prefix))) save_imgs(reconstructed_images_val[:viz_n], os.path.join(log_dir, '{}_reconstructions.png'.format(prefix))) if (random_images_val is not None): save_imgs(random_images_val[:viz_n], os.path.join(log_dir, '{}_prior_samples.png'.format(prefix)))
Helper method to save images visualizing model reconstructions. Args: images_val: Numpy array containing a batch of input images. reconstructed_images_val: Numpy array giving the expected output (mean) of the decoder. random_images_val: Optionally, a Numpy array giving the expected output (mean) of decoding samples from the prior, or `None`. log_dir: The directory to write images (Python `str`). prefix: A specific label for the saved visualizations, which determines their filenames (Python `str`). viz_n: The number of images from each batch to visualize (Python `int`).
tensorflow_probability/examples/vq_vae.py
visualize_training
joshchang/probability
3,670
python
def visualize_training(images_val, reconstructed_images_val, random_images_val, log_dir, prefix, viz_n=10): 'Helper method to save images visualizing model reconstructions.\n\n Args:\n images_val: Numpy array containing a batch of input images.\n reconstructed_images_val: Numpy array giving the expected output\n (mean) of the decoder.\n random_images_val: Optionally, a Numpy array giving the expected output\n (mean) of decoding samples from the prior, or `None`.\n log_dir: The directory to write images (Python `str`).\n prefix: A specific label for the saved visualizations, which\n determines their filenames (Python `str`).\n viz_n: The number of images from each batch to visualize (Python `int`).\n ' save_imgs(images_val[:viz_n], os.path.join(log_dir, '{}_inputs.png'.format(prefix))) save_imgs(reconstructed_images_val[:viz_n], os.path.join(log_dir, '{}_reconstructions.png'.format(prefix))) if (random_images_val is not None): save_imgs(random_images_val[:viz_n], os.path.join(log_dir, '{}_prior_samples.png'.format(prefix)))
def visualize_training(images_val, reconstructed_images_val, random_images_val, log_dir, prefix, viz_n=10): 'Helper method to save images visualizing model reconstructions.\n\n Args:\n images_val: Numpy array containing a batch of input images.\n reconstructed_images_val: Numpy array giving the expected output\n (mean) of the decoder.\n random_images_val: Optionally, a Numpy array giving the expected output\n (mean) of decoding samples from the prior, or `None`.\n log_dir: The directory to write images (Python `str`).\n prefix: A specific label for the saved visualizations, which\n determines their filenames (Python `str`).\n viz_n: The number of images from each batch to visualize (Python `int`).\n ' save_imgs(images_val[:viz_n], os.path.join(log_dir, '{}_inputs.png'.format(prefix))) save_imgs(reconstructed_images_val[:viz_n], os.path.join(log_dir, '{}_reconstructions.png'.format(prefix))) if (random_images_val is not None): save_imgs(random_images_val[:viz_n], os.path.join(log_dir, '{}_prior_samples.png'.format(prefix)))<|docstring|>Helper method to save images visualizing model reconstructions. Args: images_val: Numpy array containing a batch of input images. reconstructed_images_val: Numpy array giving the expected output (mean) of the decoder. random_images_val: Optionally, a Numpy array giving the expected output (mean) of decoding samples from the prior, or `None`. log_dir: The directory to write images (Python `str`). prefix: A specific label for the saved visualizations, which determines their filenames (Python `str`). viz_n: The number of images from each batch to visualize (Python `int`).<|endoftext|>
37e41fded47481161e369e1d0dcea9220dde646e4160d5aa9733d77c4c98d5d1
def build_fake_data(num_examples=10): 'Builds fake MNIST-style data for unit testing.' class Dummy(object): pass num_examples = 10 mnist_data = Dummy() mnist_data.train = Dummy() mnist_data.train.images = np.float32(np.random.randn(num_examples, np.prod(IMAGE_SHAPE))) mnist_data.train.labels = np.int32(np.random.permutation(np.arange(num_examples))) mnist_data.train.num_examples = num_examples mnist_data.validation = Dummy() mnist_data.validation.images = np.float32(np.random.randn(num_examples, np.prod(IMAGE_SHAPE))) mnist_data.validation.labels = np.int32(np.random.permutation(np.arange(num_examples))) mnist_data.validation.num_examples = num_examples return mnist_data
Builds fake MNIST-style data for unit testing.
tensorflow_probability/examples/vq_vae.py
build_fake_data
joshchang/probability
3,670
python
def build_fake_data(num_examples=10): class Dummy(object): pass num_examples = 10 mnist_data = Dummy() mnist_data.train = Dummy() mnist_data.train.images = np.float32(np.random.randn(num_examples, np.prod(IMAGE_SHAPE))) mnist_data.train.labels = np.int32(np.random.permutation(np.arange(num_examples))) mnist_data.train.num_examples = num_examples mnist_data.validation = Dummy() mnist_data.validation.images = np.float32(np.random.randn(num_examples, np.prod(IMAGE_SHAPE))) mnist_data.validation.labels = np.int32(np.random.permutation(np.arange(num_examples))) mnist_data.validation.num_examples = num_examples return mnist_data
def build_fake_data(num_examples=10): class Dummy(object): pass num_examples = 10 mnist_data = Dummy() mnist_data.train = Dummy() mnist_data.train.images = np.float32(np.random.randn(num_examples, np.prod(IMAGE_SHAPE))) mnist_data.train.labels = np.int32(np.random.permutation(np.arange(num_examples))) mnist_data.train.num_examples = num_examples mnist_data.validation = Dummy() mnist_data.validation.images = np.float32(np.random.randn(num_examples, np.prod(IMAGE_SHAPE))) mnist_data.validation.labels = np.int32(np.random.permutation(np.arange(num_examples))) mnist_data.validation.num_examples = num_examples return mnist_data<|docstring|>Builds fake MNIST-style data for unit testing.<|endoftext|>
e5f91f491ad25edf44f4091c5ed706e6a8cd59f036b353f554f619d37feb9e5f
def download(directory, filename): 'Downloads a file.' filepath = os.path.join(directory, filename) if tf.io.gfile.exists(filepath): return filepath if (not tf.io.gfile.exists(directory)): tf.io.gfile.makedirs(directory) url = os.path.join(BERNOULLI_PATH, filename) print(('Downloading %s to %s' % (url, filepath))) urllib.request.urlretrieve(url, filepath) return filepath
Downloads a file.
tensorflow_probability/examples/vq_vae.py
download
joshchang/probability
3,670
python
def download(directory, filename): filepath = os.path.join(directory, filename) if tf.io.gfile.exists(filepath): return filepath if (not tf.io.gfile.exists(directory)): tf.io.gfile.makedirs(directory) url = os.path.join(BERNOULLI_PATH, filename) print(('Downloading %s to %s' % (url, filepath))) urllib.request.urlretrieve(url, filepath) return filepath
def download(directory, filename): filepath = os.path.join(directory, filename) if tf.io.gfile.exists(filepath): return filepath if (not tf.io.gfile.exists(directory)): tf.io.gfile.makedirs(directory) url = os.path.join(BERNOULLI_PATH, filename) print(('Downloading %s to %s' % (url, filepath))) urllib.request.urlretrieve(url, filepath) return filepath<|docstring|>Downloads a file.<|endoftext|>
5268614d55021babf29036e4be6fa710987f070da370adf853f75c6651bcead9
def load_bernoulli_mnist_dataset(directory, split_name): "Returns Hugo Larochelle's binary static MNIST tf.data.Dataset." amat_file = download(directory, FILE_TEMPLATE.format(split=split_name)) dataset = tf.data.TextLineDataset(amat_file) str_to_arr = (lambda string: np.array([(c == b'1') for c in string.split()])) def _parser(s): booltensor = tf.compat.v1.py_func(str_to_arr, [s], tf.bool) reshaped = tf.reshape(booltensor, [28, 28, 1]) return (tf.cast(reshaped, dtype=tf.float32), tf.constant(0, tf.int32)) return dataset.map(_parser)
Returns Hugo Larochelle's binary static MNIST tf.data.Dataset.
tensorflow_probability/examples/vq_vae.py
load_bernoulli_mnist_dataset
joshchang/probability
3,670
python
def load_bernoulli_mnist_dataset(directory, split_name): amat_file = download(directory, FILE_TEMPLATE.format(split=split_name)) dataset = tf.data.TextLineDataset(amat_file) str_to_arr = (lambda string: np.array([(c == b'1') for c in string.split()])) def _parser(s): booltensor = tf.compat.v1.py_func(str_to_arr, [s], tf.bool) reshaped = tf.reshape(booltensor, [28, 28, 1]) return (tf.cast(reshaped, dtype=tf.float32), tf.constant(0, tf.int32)) return dataset.map(_parser)
def load_bernoulli_mnist_dataset(directory, split_name): amat_file = download(directory, FILE_TEMPLATE.format(split=split_name)) dataset = tf.data.TextLineDataset(amat_file) str_to_arr = (lambda string: np.array([(c == b'1') for c in string.split()])) def _parser(s): booltensor = tf.compat.v1.py_func(str_to_arr, [s], tf.bool) reshaped = tf.reshape(booltensor, [28, 28, 1]) return (tf.cast(reshaped, dtype=tf.float32), tf.constant(0, tf.int32)) return dataset.map(_parser)<|docstring|>Returns Hugo Larochelle's binary static MNIST tf.data.Dataset.<|endoftext|>
2f1f8cd0bdc83251b811d5b2bc095577675085659fa2cd979241017d2a933af6
def build_input_pipeline(data_dir, batch_size, heldout_size, mnist_type): 'Builds an Iterator switching between train and heldout data.' if (mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]): if (mnist_type == MnistType.FAKE_DATA): mnist_data = build_fake_data() else: mnist_data = mnist.read_data_sets(data_dir) training_dataset = tf.data.Dataset.from_tensor_slices((mnist_data.train.images, np.int32(mnist_data.train.labels))) heldout_dataset = tf.data.Dataset.from_tensor_slices((mnist_data.validation.images, np.int32(mnist_data.validation.labels))) elif (mnist_type == MnistType.BERNOULLI): training_dataset = load_bernoulli_mnist_dataset(data_dir, 'train') heldout_dataset = load_bernoulli_mnist_dataset(data_dir, 'valid') else: raise ValueError('Unknown MNIST type.') training_batches = training_dataset.repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) heldout_frozen = heldout_dataset.take(heldout_size).repeat().batch(heldout_size) heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen) handle = tf.compat.v1.placeholder(tf.string, shape=[]) feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(handle, training_batches.output_types, training_batches.output_shapes) (images, labels) = feedable_iterator.get_next() images = tf.reshape(images, shape=([(- 1)] + IMAGE_SHAPE)) if (mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]): images = tf.cast((images > 0.5), dtype=tf.int32) return (images, labels, handle, training_iterator, heldout_iterator)
Builds an Iterator switching between train and heldout data.
tensorflow_probability/examples/vq_vae.py
build_input_pipeline
joshchang/probability
3,670
python
def build_input_pipeline(data_dir, batch_size, heldout_size, mnist_type): if (mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]): if (mnist_type == MnistType.FAKE_DATA): mnist_data = build_fake_data() else: mnist_data = mnist.read_data_sets(data_dir) training_dataset = tf.data.Dataset.from_tensor_slices((mnist_data.train.images, np.int32(mnist_data.train.labels))) heldout_dataset = tf.data.Dataset.from_tensor_slices((mnist_data.validation.images, np.int32(mnist_data.validation.labels))) elif (mnist_type == MnistType.BERNOULLI): training_dataset = load_bernoulli_mnist_dataset(data_dir, 'train') heldout_dataset = load_bernoulli_mnist_dataset(data_dir, 'valid') else: raise ValueError('Unknown MNIST type.') training_batches = training_dataset.repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) heldout_frozen = heldout_dataset.take(heldout_size).repeat().batch(heldout_size) heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen) handle = tf.compat.v1.placeholder(tf.string, shape=[]) feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(handle, training_batches.output_types, training_batches.output_shapes) (images, labels) = feedable_iterator.get_next() images = tf.reshape(images, shape=([(- 1)] + IMAGE_SHAPE)) if (mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]): images = tf.cast((images > 0.5), dtype=tf.int32) return (images, labels, handle, training_iterator, heldout_iterator)
def build_input_pipeline(data_dir, batch_size, heldout_size, mnist_type): if (mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]): if (mnist_type == MnistType.FAKE_DATA): mnist_data = build_fake_data() else: mnist_data = mnist.read_data_sets(data_dir) training_dataset = tf.data.Dataset.from_tensor_slices((mnist_data.train.images, np.int32(mnist_data.train.labels))) heldout_dataset = tf.data.Dataset.from_tensor_slices((mnist_data.validation.images, np.int32(mnist_data.validation.labels))) elif (mnist_type == MnistType.BERNOULLI): training_dataset = load_bernoulli_mnist_dataset(data_dir, 'train') heldout_dataset = load_bernoulli_mnist_dataset(data_dir, 'valid') else: raise ValueError('Unknown MNIST type.') training_batches = training_dataset.repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) heldout_frozen = heldout_dataset.take(heldout_size).repeat().batch(heldout_size) heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen) handle = tf.compat.v1.placeholder(tf.string, shape=[]) feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(handle, training_batches.output_types, training_batches.output_shapes) (images, labels) = feedable_iterator.get_next() images = tf.reshape(images, shape=([(- 1)] + IMAGE_SHAPE)) if (mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]): images = tf.cast((images > 0.5), dtype=tf.int32) return (images, labels, handle, training_iterator, heldout_iterator)<|docstring|>Builds an Iterator switching between train and heldout data.<|endoftext|>
6a560d177689fe811f12a09cfc96c9d0762d2811103877126736f98528d1d923
def __call__(self, codes): 'Uses codebook to find nearest neighbor for each code.\n\n Args:\n codes: A `float`-like `Tensor` containing the latent\n vectors to be compared to the codebook. These are rank-3 with shape\n `[batch_size, latent_size, code_size]`.\n\n Returns:\n nearest_codebook_entries: The 1-nearest neighbor in Euclidean distance for\n each code in the batch.\n one_hot_assignments: The one-hot vectors corresponding to the matched\n codebook entry for each code in the batch.\n ' distances = tf.norm(tensor=(tf.expand_dims(codes, 2) - tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size])), axis=3) assignments = tf.argmin(input=distances, axis=2) one_hot_assignments = tf.one_hot(assignments, depth=self.num_codes) nearest_codebook_entries = tf.reduce_sum(input_tensor=(tf.expand_dims(one_hot_assignments, (- 1)) * tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size])), axis=2) return (nearest_codebook_entries, one_hot_assignments)
Uses codebook to find nearest neighbor for each code. Args: codes: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook. These are rank-3 with shape `[batch_size, latent_size, code_size]`. Returns: nearest_codebook_entries: The 1-nearest neighbor in Euclidean distance for each code in the batch. one_hot_assignments: The one-hot vectors corresponding to the matched codebook entry for each code in the batch.
tensorflow_probability/examples/vq_vae.py
__call__
joshchang/probability
3,670
python
def __call__(self, codes): 'Uses codebook to find nearest neighbor for each code.\n\n Args:\n codes: A `float`-like `Tensor` containing the latent\n vectors to be compared to the codebook. These are rank-3 with shape\n `[batch_size, latent_size, code_size]`.\n\n Returns:\n nearest_codebook_entries: The 1-nearest neighbor in Euclidean distance for\n each code in the batch.\n one_hot_assignments: The one-hot vectors corresponding to the matched\n codebook entry for each code in the batch.\n ' distances = tf.norm(tensor=(tf.expand_dims(codes, 2) - tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size])), axis=3) assignments = tf.argmin(input=distances, axis=2) one_hot_assignments = tf.one_hot(assignments, depth=self.num_codes) nearest_codebook_entries = tf.reduce_sum(input_tensor=(tf.expand_dims(one_hot_assignments, (- 1)) * tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size])), axis=2) return (nearest_codebook_entries, one_hot_assignments)
def __call__(self, codes): 'Uses codebook to find nearest neighbor for each code.\n\n Args:\n codes: A `float`-like `Tensor` containing the latent\n vectors to be compared to the codebook. These are rank-3 with shape\n `[batch_size, latent_size, code_size]`.\n\n Returns:\n nearest_codebook_entries: The 1-nearest neighbor in Euclidean distance for\n each code in the batch.\n one_hot_assignments: The one-hot vectors corresponding to the matched\n codebook entry for each code in the batch.\n ' distances = tf.norm(tensor=(tf.expand_dims(codes, 2) - tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size])), axis=3) assignments = tf.argmin(input=distances, axis=2) one_hot_assignments = tf.one_hot(assignments, depth=self.num_codes) nearest_codebook_entries = tf.reduce_sum(input_tensor=(tf.expand_dims(one_hot_assignments, (- 1)) * tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size])), axis=2) return (nearest_codebook_entries, one_hot_assignments)<|docstring|>Uses codebook to find nearest neighbor for each code. Args: codes: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook. These are rank-3 with shape `[batch_size, latent_size, code_size]`. Returns: nearest_codebook_entries: The 1-nearest neighbor in Euclidean distance for each code in the batch. one_hot_assignments: The one-hot vectors corresponding to the matched codebook entry for each code in the batch.<|endoftext|>
929d8f2023ae390ced15e2bdc4a5191231e3b706fb758e512fc081170d31de28
def encoder(images): 'Encodes a batch of images.\n\n Args:\n images: A `Tensor` representing the inputs to be encoded, of shape `[...,\n channels]`.\n\n Returns:\n codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`.\n It represents latent vectors to be matched with the codebook.\n ' images = ((2 * tf.cast(images, dtype=tf.float32)) - 1) codes = encoder_net(images) return codes
Encodes a batch of images. Args: images: A `Tensor` representing the inputs to be encoded, of shape `[..., channels]`. Returns: codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`. It represents latent vectors to be matched with the codebook.
tensorflow_probability/examples/vq_vae.py
encoder
joshchang/probability
3,670
python
def encoder(images): 'Encodes a batch of images.\n\n Args:\n images: A `Tensor` representing the inputs to be encoded, of shape `[...,\n channels]`.\n\n Returns:\n codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`.\n It represents latent vectors to be matched with the codebook.\n ' images = ((2 * tf.cast(images, dtype=tf.float32)) - 1) codes = encoder_net(images) return codes
def encoder(images): 'Encodes a batch of images.\n\n Args:\n images: A `Tensor` representing the inputs to be encoded, of shape `[...,\n channels]`.\n\n Returns:\n codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`.\n It represents latent vectors to be matched with the codebook.\n ' images = ((2 * tf.cast(images, dtype=tf.float32)) - 1) codes = encoder_net(images) return codes<|docstring|>Encodes a batch of images. Args: images: A `Tensor` representing the inputs to be encoded, of shape `[..., channels]`. Returns: codes: A `float`-like `Tensor` of shape `[..., latent_size, code_size]`. It represents latent vectors to be matched with the codebook.<|endoftext|>
654aaabd5b8de162a9e240e2743fa201eae2a0b9462048f02a4a154cc9651fe5
def decoder(codes): 'Builds a distribution over images given codes.\n\n Args:\n codes: A `Tensor` representing the inputs to be decoded, of shape `[...,\n code_size]`.\n\n Returns:\n decoder_distribution: A multivariate `Bernoulli` distribution.\n ' logits = decoder_net(codes) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name='decoder_distribution')
Builds a distribution over images given codes. Args: codes: A `Tensor` representing the inputs to be decoded, of shape `[..., code_size]`. Returns: decoder_distribution: A multivariate `Bernoulli` distribution.
tensorflow_probability/examples/vq_vae.py
decoder
joshchang/probability
3,670
python
def decoder(codes): 'Builds a distribution over images given codes.\n\n Args:\n codes: A `Tensor` representing the inputs to be decoded, of shape `[...,\n code_size]`.\n\n Returns:\n decoder_distribution: A multivariate `Bernoulli` distribution.\n ' logits = decoder_net(codes) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name='decoder_distribution')
def decoder(codes): 'Builds a distribution over images given codes.\n\n Args:\n codes: A `Tensor` representing the inputs to be decoded, of shape `[...,\n code_size]`.\n\n Returns:\n decoder_distribution: A multivariate `Bernoulli` distribution.\n ' logits = decoder_net(codes) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name='decoder_distribution')<|docstring|>Builds a distribution over images given codes. Args: codes: A `Tensor` representing the inputs to be decoded, of shape `[..., code_size]`. Returns: decoder_distribution: A multivariate `Bernoulli` distribution.<|endoftext|>
8e29fb61b676415c884fd94a9006d769edfdc4cd49635d4b0b564e15ce320796
def main(): '\n Just tell each group to log itself to the outputs.\n\n ' for group_name in functional_groups: functional_groups[group_name].log_info()
Just tell each group to log itself to the outputs.
bin/list_groups.py
main
tdaff/fapswitch
0
python
def main(): '\n \n\n ' for group_name in functional_groups: functional_groups[group_name].log_info()
def main(): '\n \n\n ' for group_name in functional_groups: functional_groups[group_name].log_info()<|docstring|>Just tell each group to log itself to the outputs.<|endoftext|>
ed3e24702a168ec0efbe96beeb6a2c0075f0dfff22c003f9aef316fe5bc502dc
async def begin_create_or_update(self, resource_group_name: str, workspace_name: str, linked_service_name: str, parameters: '_models.LinkedService', **kwargs: Any) -> AsyncLROPoller['_models.LinkedService']: 'Create or update a linked service.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linkedServices resource.\n :type linked_service_name: str\n :param parameters: The parameters required to create or update a linked service.\n :type parameters: ~azure.mgmt.loganalytics.models.LinkedService\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, workspace_name=workspace_name, linked_service_name=linked_service_name, parameters=parameters, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
Create or update a linked service. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param linked_service_name: Name of the linkedServices resource. :type linked_service_name: str :param parameters: The parameters required to create or update a linked service. :type parameters: ~azure.mgmt.loganalytics.models.LinkedService :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService] :raises ~azure.core.exceptions.HttpResponseError:
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_linked_services_operations.py
begin_create_or_update
jayachithra/azure-sdk-for-python
2,728
python
async def begin_create_or_update(self, resource_group_name: str, workspace_name: str, linked_service_name: str, parameters: '_models.LinkedService', **kwargs: Any) -> AsyncLROPoller['_models.LinkedService']: 'Create or update a linked service.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linkedServices resource.\n :type linked_service_name: str\n :param parameters: The parameters required to create or update a linked service.\n :type parameters: ~azure.mgmt.loganalytics.models.LinkedService\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, workspace_name=workspace_name, linked_service_name=linked_service_name, parameters=parameters, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def begin_create_or_update(self, resource_group_name: str, workspace_name: str, linked_service_name: str, parameters: '_models.LinkedService', **kwargs: Any) -> AsyncLROPoller['_models.LinkedService']: 'Create or update a linked service.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linkedServices resource.\n :type linked_service_name: str\n :param parameters: The parameters required to create or update a linked service.\n :type parameters: ~azure.mgmt.loganalytics.models.LinkedService\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, workspace_name=workspace_name, linked_service_name=linked_service_name, parameters=parameters, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)<|docstring|>Create or update a linked service. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param linked_service_name: Name of the linkedServices resource. :type linked_service_name: str :param parameters: The parameters required to create or update a linked service. :type parameters: ~azure.mgmt.loganalytics.models.LinkedService :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService] :raises ~azure.core.exceptions.HttpResponseError:<|endoftext|>
35db9beb08ec00aead35dfd25c8d0006bee0d3a0667383146d42d5d1270f4048
async def begin_delete(self, resource_group_name: str, workspace_name: str, linked_service_name: str, **kwargs: Any) -> AsyncLROPoller['_models.LinkedService']: 'Deletes a linked service instance.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linked service.\n :type linked_service_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, workspace_name=workspace_name, linked_service_name=linked_service_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
Deletes a linked service instance. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param linked_service_name: Name of the linked service. :type linked_service_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService] :raises ~azure.core.exceptions.HttpResponseError:
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_linked_services_operations.py
begin_delete
jayachithra/azure-sdk-for-python
2,728
python
async def begin_delete(self, resource_group_name: str, workspace_name: str, linked_service_name: str, **kwargs: Any) -> AsyncLROPoller['_models.LinkedService']: 'Deletes a linked service instance.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linked service.\n :type linked_service_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, workspace_name=workspace_name, linked_service_name=linked_service_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def begin_delete(self, resource_group_name: str, workspace_name: str, linked_service_name: str, **kwargs: Any) -> AsyncLROPoller['_models.LinkedService']: 'Deletes a linked service instance.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linked service.\n :type linked_service_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, workspace_name=workspace_name, linked_service_name=linked_service_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)<|docstring|>Deletes a linked service instance. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param linked_service_name: Name of the linked service. :type linked_service_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LinkedService or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.LinkedService] :raises ~azure.core.exceptions.HttpResponseError:<|endoftext|>
1b29a780c1c9899d4539883776f64c1f4be86ab7df2e4582eb1063b1796e987c
async def get(self, resource_group_name: str, workspace_name: str, linked_service_name: str, **kwargs: Any) -> '_models.LinkedService': 'Gets a linked service instance.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linked service.\n :type linked_service_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: LinkedService, or the result of cls(response)\n :rtype: ~azure.mgmt.loganalytics.models.LinkedService\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2020-08-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Gets a linked service instance. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param linked_service_name: Name of the linked service. :type linked_service_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: LinkedService, or the result of cls(response) :rtype: ~azure.mgmt.loganalytics.models.LinkedService :raises: ~azure.core.exceptions.HttpResponseError
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_linked_services_operations.py
get
jayachithra/azure-sdk-for-python
2,728
python
async def get(self, resource_group_name: str, workspace_name: str, linked_service_name: str, **kwargs: Any) -> '_models.LinkedService': 'Gets a linked service instance.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linked service.\n :type linked_service_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: LinkedService, or the result of cls(response)\n :rtype: ~azure.mgmt.loganalytics.models.LinkedService\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2020-08-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
async def get(self, resource_group_name: str, workspace_name: str, linked_service_name: str, **kwargs: Any) -> '_models.LinkedService': 'Gets a linked service instance.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :param linked_service_name: Name of the linked service.\n :type linked_service_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: LinkedService, or the result of cls(response)\n :rtype: ~azure.mgmt.loganalytics.models.LinkedService\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2020-08-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'linkedServiceName': self._serialize.url('linked_service_name', linked_service_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LinkedService', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized<|docstring|>Gets a linked service instance. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param linked_service_name: Name of the linked service. :type linked_service_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: LinkedService, or the result of cls(response) :rtype: ~azure.mgmt.loganalytics.models.LinkedService :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
e3cd3999393ed6c43d646477c4303626ad4f243aada32c19f2842c675a17352e
def list_by_workspace(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> AsyncIterable['_models.LinkedServiceListResult']: 'Gets the linked services instances in a workspace.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either LinkedServiceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.LinkedServiceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2020-08-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list_by_workspace.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('LinkedServiceListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return (None, AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
Gets the linked services instances in a workspace. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either LinkedServiceListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.LinkedServiceListResult] :raises: ~azure.core.exceptions.HttpResponseError
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_linked_services_operations.py
list_by_workspace
jayachithra/azure-sdk-for-python
2,728
python
def list_by_workspace(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> AsyncIterable['_models.LinkedServiceListResult']: 'Gets the linked services instances in a workspace.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either LinkedServiceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.LinkedServiceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2020-08-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list_by_workspace.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('LinkedServiceListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return (None, AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
def list_by_workspace(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> AsyncIterable['_models.LinkedServiceListResult']: 'Gets the linked services instances in a workspace.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace.\n :type workspace_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either LinkedServiceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.LinkedServiceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2020-08-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list_by_workspace.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str', max_length=63, min_length=4, pattern='^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str', min_length=1)} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('LinkedServiceListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return (None, AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)<|docstring|>Gets the linked services instances in a workspace. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either LinkedServiceListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.LinkedServiceListResult] :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
06c40c7291bcb0dc79bf398da36bf5650bd2acb084e410e55a451e90a0cd4870
def extract_regexes_from_urlpatterns(urlpatterns, base=''): ' Extract a list of all regexes from the given urlpatterns ' regexes = [] for p in urlpatterns: if (isinstance(p, URLPattern) or hasattr(p, '_get_callback')): if (django.VERSION < (2, 0)): regexes.append((base + p.regex.pattern)) else: regexes.append((base + p.pattern.regex.pattern)) elif (isinstance(p, URLResolver) or hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns')): patterns = p.url_patterns if (django.VERSION < (2, 0)): regexes.extend(extract_regexes_from_urlpatterns(patterns, (base + p.regex.pattern))) else: regexes.extend(extract_regexes_from_urlpatterns(patterns, (base + p.pattern.regex.pattern))) else: raise TypeError(('%s does not appear to be a urlpattern object' % p)) return regexes
Extract a list of all regexes from the given urlpatterns
cfgov/cfgov/tests/test_urls.py
extract_regexes_from_urlpatterns
flacoman91/consumerfinance.gov
0
python
def extract_regexes_from_urlpatterns(urlpatterns, base=): ' ' regexes = [] for p in urlpatterns: if (isinstance(p, URLPattern) or hasattr(p, '_get_callback')): if (django.VERSION < (2, 0)): regexes.append((base + p.regex.pattern)) else: regexes.append((base + p.pattern.regex.pattern)) elif (isinstance(p, URLResolver) or hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns')): patterns = p.url_patterns if (django.VERSION < (2, 0)): regexes.extend(extract_regexes_from_urlpatterns(patterns, (base + p.regex.pattern))) else: regexes.extend(extract_regexes_from_urlpatterns(patterns, (base + p.pattern.regex.pattern))) else: raise TypeError(('%s does not appear to be a urlpattern object' % p)) return regexes
def extract_regexes_from_urlpatterns(urlpatterns, base=): ' ' regexes = [] for p in urlpatterns: if (isinstance(p, URLPattern) or hasattr(p, '_get_callback')): if (django.VERSION < (2, 0)): regexes.append((base + p.regex.pattern)) else: regexes.append((base + p.pattern.regex.pattern)) elif (isinstance(p, URLResolver) or hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns')): patterns = p.url_patterns if (django.VERSION < (2, 0)): regexes.extend(extract_regexes_from_urlpatterns(patterns, (base + p.regex.pattern))) else: regexes.extend(extract_regexes_from_urlpatterns(patterns, (base + p.pattern.regex.pattern))) else: raise TypeError(('%s does not appear to be a urlpattern object' % p)) return regexes<|docstring|>Extract a list of all regexes from the given urlpatterns<|endoftext|>
076cf6b49df79067a74c229059616b8aa6705d36e3b1a7ebc2f676d3ea5abe30
def test_admin_url_allowlist(self): ' Test to ensure admin urls match our allowlist ' non_matching_urls = [u for u in self.admin_urls if (not any((u.startswith(w) for w in ADMIN_URL_ALLOWLIST)))] self.assertEqual(len(non_matching_urls), 0, msg='Non-allowlisted admin URLs:\n\t{}\n'.format(',\n\t'.join(non_matching_urls)))
Test to ensure admin urls match our allowlist
cfgov/cfgov/tests/test_urls.py
test_admin_url_allowlist
flacoman91/consumerfinance.gov
0
python
def test_admin_url_allowlist(self): ' ' non_matching_urls = [u for u in self.admin_urls if (not any((u.startswith(w) for w in ADMIN_URL_ALLOWLIST)))] self.assertEqual(len(non_matching_urls), 0, msg='Non-allowlisted admin URLs:\n\t{}\n'.format(',\n\t'.join(non_matching_urls)))
def test_admin_url_allowlist(self): ' ' non_matching_urls = [u for u in self.admin_urls if (not any((u.startswith(w) for w in ADMIN_URL_ALLOWLIST)))] self.assertEqual(len(non_matching_urls), 0, msg='Non-allowlisted admin URLs:\n\t{}\n'.format(',\n\t'.join(non_matching_urls)))<|docstring|>Test to ensure admin urls match our allowlist<|endoftext|>
a5219e0cd96293744439298a6ad2bc0055361e7503d6525cc55e43dd6f8900cd
@override_settings(FLAGS={'MY_TEST_FLAG': [('boolean', True)]}) def test_flag_set_returns_view_that_calls_wagtail_serve_view(self): 'When flag is set, request should be routed to Wagtail.\n\n This test checks for text that is known to exist in the template for\n the Wagtail page that gets served from the site root.\n ' response = self.client.get('/') self.assertContains(response, 'Consumer Financial Protection Bureau')
When flag is set, request should be routed to Wagtail. This test checks for text that is known to exist in the template for the Wagtail page that gets served from the site root.
cfgov/cfgov/tests/test_urls.py
test_flag_set_returns_view_that_calls_wagtail_serve_view
flacoman91/consumerfinance.gov
0
python
@override_settings(FLAGS={'MY_TEST_FLAG': [('boolean', True)]}) def test_flag_set_returns_view_that_calls_wagtail_serve_view(self): 'When flag is set, request should be routed to Wagtail.\n\n This test checks for text that is known to exist in the template for\n the Wagtail page that gets served from the site root.\n ' response = self.client.get('/') self.assertContains(response, 'Consumer Financial Protection Bureau')
@override_settings(FLAGS={'MY_TEST_FLAG': [('boolean', True)]}) def test_flag_set_returns_view_that_calls_wagtail_serve_view(self): 'When flag is set, request should be routed to Wagtail.\n\n This test checks for text that is known to exist in the template for\n the Wagtail page that gets served from the site root.\n ' response = self.client.get('/') self.assertContains(response, 'Consumer Financial Protection Bureau')<|docstring|>When flag is set, request should be routed to Wagtail. This test checks for text that is known to exist in the template for the Wagtail page that gets served from the site root.<|endoftext|>
128dd7edb435482f2220246217e3be8237a552febdb0420631e3fdae02f77ab3
def fit(self, data_instances): '\n Apply binning method for both data instances in local party as well as the other one. Afterwards, calculate\n the specific metric value for specific columns.\n ' self._abnormal_detection(data_instances) self._setup_bin_inner_param(data_instances, self.model_param) split_points = self.binning_obj.fit_split_points(data_instances) if (not self.model_param.local_only): self._sync_init_bucket(data_instances, split_points) if (self.model_param.method == consts.OPTIMAL): self.optimal_binning_sync() if (self.transform_type != 'woe'): data_instances = self.transform(data_instances) self.set_schema(data_instances) self.data_output = data_instances return data_instances
Apply binning method for both data instances in local party as well as the other one. Afterwards, calculate the specific metric value for specific columns.
federatedml/feature/hetero_feature_binning/hetero_binning_host.py
fit
bentanust/FedRec
32
python
def fit(self, data_instances): '\n Apply binning method for both data instances in local party as well as the other one. Afterwards, calculate\n the specific metric value for specific columns.\n ' self._abnormal_detection(data_instances) self._setup_bin_inner_param(data_instances, self.model_param) split_points = self.binning_obj.fit_split_points(data_instances) if (not self.model_param.local_only): self._sync_init_bucket(data_instances, split_points) if (self.model_param.method == consts.OPTIMAL): self.optimal_binning_sync() if (self.transform_type != 'woe'): data_instances = self.transform(data_instances) self.set_schema(data_instances) self.data_output = data_instances return data_instances
def fit(self, data_instances): '\n Apply binning method for both data instances in local party as well as the other one. Afterwards, calculate\n the specific metric value for specific columns.\n ' self._abnormal_detection(data_instances) self._setup_bin_inner_param(data_instances, self.model_param) split_points = self.binning_obj.fit_split_points(data_instances) if (not self.model_param.local_only): self._sync_init_bucket(data_instances, split_points) if (self.model_param.method == consts.OPTIMAL): self.optimal_binning_sync() if (self.transform_type != 'woe'): data_instances = self.transform(data_instances) self.set_schema(data_instances) self.data_output = data_instances return data_instances<|docstring|>Apply binning method for both data instances in local party as well as the other one. Afterwards, calculate the specific metric value for specific columns.<|endoftext|>
e8cff4a1e4a7ff7ee6c35a84ddc825fdab7158be01aa73ace0b83c0c81b402b9
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' (_len, _nums) = (len(nums), sorted(nums)) if (nums == _nums): return 0 l = min([i for i in range(_len) if (nums[i] != _nums[i])]) r = max([i for i in range(_len) if (nums[i] != _nums[i])]) return ((r - l) + 1)
:type nums: List[int] :rtype: int
leetcode_python/Array/shortest-unsorted-continuous-subarray.py
findUnsortedSubarray
yennanliu/Python_basics
18
python
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' (_len, _nums) = (len(nums), sorted(nums)) if (nums == _nums): return 0 l = min([i for i in range(_len) if (nums[i] != _nums[i])]) r = max([i for i in range(_len) if (nums[i] != _nums[i])]) return ((r - l) + 1)
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' (_len, _nums) = (len(nums), sorted(nums)) if (nums == _nums): return 0 l = min([i for i in range(_len) if (nums[i] != _nums[i])]) r = max([i for i in range(_len) if (nums[i] != _nums[i])]) return ((r - l) + 1)<|docstring|>:type nums: List[int] :rtype: int<|endoftext|>
3183b848ee5c5e20b2e2066481e1bfc87a412b83f665b14464311bcc2bd73ee1
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' snums = sorted(nums) s = e = (- 1) for i in range(len(nums)): if (nums[i] != snums[i]): if (s == (- 1)): s = i e = i return (((e - s) + 1) if (e != s) else 0)
:type nums: List[int] :rtype: int
leetcode_python/Array/shortest-unsorted-continuous-subarray.py
findUnsortedSubarray
yennanliu/Python_basics
18
python
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' snums = sorted(nums) s = e = (- 1) for i in range(len(nums)): if (nums[i] != snums[i]): if (s == (- 1)): s = i e = i return (((e - s) + 1) if (e != s) else 0)
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' snums = sorted(nums) s = e = (- 1) for i in range(len(nums)): if (nums[i] != snums[i]): if (s == (- 1)): s = i e = i return (((e - s) + 1) if (e != s) else 0)<|docstring|>:type nums: List[int] :rtype: int<|endoftext|>
156bc01b670d41f0e96f3e13bdba999bd0248c3990bd3532da44139acbfd6b6f
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' n = len(nums) (left, right) = ((- 1), (- 2)) (min_from_right, max_from_left) = (nums[(- 1)], nums[0]) for i in range(1, n): max_from_left = max(max_from_left, nums[i]) min_from_right = min(min_from_right, nums[((n - 1) - i)]) if (nums[i] < max_from_left): right = i if (nums[((n - 1) - i)] > min_from_right): left = ((n - 1) - i)
:type nums: List[int] :rtype: int
leetcode_python/Array/shortest-unsorted-continuous-subarray.py
findUnsortedSubarray
yennanliu/Python_basics
18
python
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' n = len(nums) (left, right) = ((- 1), (- 2)) (min_from_right, max_from_left) = (nums[(- 1)], nums[0]) for i in range(1, n): max_from_left = max(max_from_left, nums[i]) min_from_right = min(min_from_right, nums[((n - 1) - i)]) if (nums[i] < max_from_left): right = i if (nums[((n - 1) - i)] > min_from_right): left = ((n - 1) - i)
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' n = len(nums) (left, right) = ((- 1), (- 2)) (min_from_right, max_from_left) = (nums[(- 1)], nums[0]) for i in range(1, n): max_from_left = max(max_from_left, nums[i]) min_from_right = min(min_from_right, nums[((n - 1) - i)]) if (nums[i] < max_from_left): right = i if (nums[((n - 1) - i)] > min_from_right): left = ((n - 1) - i)<|docstring|>:type nums: List[int] :rtype: int<|endoftext|>
f18655c0e5d5fc4d9d2a69e7024a739ccf7073f31a227adecc92e30faaf617b1
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' a = sorted(nums) (left, right) = (0, (len(nums) - 1)) while ((nums[left] == a[left]) or (nums[right] == a[right])): if ((right - left) <= 1): return 0 if (nums[left] == a[left]): left += 1 if (nums[right] == a[right]): right -= 1 return ((right - left) + 1)
:type nums: List[int] :rtype: int
leetcode_python/Array/shortest-unsorted-continuous-subarray.py
findUnsortedSubarray
yennanliu/Python_basics
18
python
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' a = sorted(nums) (left, right) = (0, (len(nums) - 1)) while ((nums[left] == a[left]) or (nums[right] == a[right])): if ((right - left) <= 1): return 0 if (nums[left] == a[left]): left += 1 if (nums[right] == a[right]): right -= 1 return ((right - left) + 1)
def findUnsortedSubarray(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' a = sorted(nums) (left, right) = (0, (len(nums) - 1)) while ((nums[left] == a[left]) or (nums[right] == a[right])): if ((right - left) <= 1): return 0 if (nums[left] == a[left]): left += 1 if (nums[right] == a[right]): right -= 1 return ((right - left) + 1)<|docstring|>:type nums: List[int] :rtype: int<|endoftext|>
3d771b06eb939f45bac8a36536c3288410cf2b49391fb4768ae8d2ef3868f1e8
def run(coroutine): '\n Runs and returns the data from the couroutine passed in. This is to\n only be used in unittesting.\n\n coroutine : asyncio coroutine\n\n -> coroutine return\n ' return asyncio.get_event_loop().run_until_complete(coroutine)
Runs and returns the data from the couroutine passed in. This is to only be used in unittesting. coroutine : asyncio coroutine -> coroutine return
tests/test_structures.py
run
ephreal/rollbot
2
python
def run(coroutine): '\n Runs and returns the data from the couroutine passed in. This is to\n only be used in unittesting.\n\n coroutine : asyncio coroutine\n\n -> coroutine return\n ' return asyncio.get_event_loop().run_until_complete(coroutine)
def run(coroutine): '\n Runs and returns the data from the couroutine passed in. This is to\n only be used in unittesting.\n\n coroutine : asyncio coroutine\n\n -> coroutine return\n ' return asyncio.get_event_loop().run_until_complete(coroutine)<|docstring|>Runs and returns the data from the couroutine passed in. This is to only be used in unittesting. coroutine : asyncio coroutine -> coroutine return<|endoftext|>
d09d4caf2decaef27363e12b4250640ac607309d1aa828c5f900a1af62e60779
def test_add(self): '\n Verifies that adding things to the queue works\n ' run(self.queue.add('test')) self.assertEqual('test', self.queue.items[0])
Verifies that adding things to the queue works
tests/test_structures.py
test_add
ephreal/rollbot
2
python
def test_add(self): '\n \n ' run(self.queue.add('test')) self.assertEqual('test', self.queue.items[0])
def test_add(self): '\n \n ' run(self.queue.add('test')) self.assertEqual('test', self.queue.items[0])<|docstring|>Verifies that adding things to the queue works<|endoftext|>
178e625403f14e519107bc5db39b3b3d7d62737bffcff686897f912c4c9f44df
def test_clear(self): '\n Verifies the queue can be cleared proprely\n ' run(self.queue.add(1)) self.assertEqual(len(self.queue.items), 1) run(self.queue.clear()) self.assertEqual(len(self.queue.items), 0)
Verifies the queue can be cleared proprely
tests/test_structures.py
test_clear
ephreal/rollbot
2
python
def test_clear(self): '\n \n ' run(self.queue.add(1)) self.assertEqual(len(self.queue.items), 1) run(self.queue.clear()) self.assertEqual(len(self.queue.items), 0)
def test_clear(self): '\n \n ' run(self.queue.add(1)) self.assertEqual(len(self.queue.items), 1) run(self.queue.clear()) self.assertEqual(len(self.queue.items), 0)<|docstring|>Verifies the queue can be cleared proprely<|endoftext|>
4efcdf26002eddb25b023d9c22fe3f91c026b72f3cf32eebfc90c3b622e39c50
def test_empty(self): "\n Verifies the queue can test whether it's empty or not correctly\n " self.assertTrue(run(self.queue.is_empty())) run(self.queue.add(5)) self.assertFalse(run(self.queue.is_empty()))
Verifies the queue can test whether it's empty or not correctly
tests/test_structures.py
test_empty
ephreal/rollbot
2
python
def test_empty(self): "\n \n " self.assertTrue(run(self.queue.is_empty())) run(self.queue.add(5)) self.assertFalse(run(self.queue.is_empty()))
def test_empty(self): "\n \n " self.assertTrue(run(self.queue.is_empty())) run(self.queue.add(5)) self.assertFalse(run(self.queue.is_empty()))<|docstring|>Verifies the queue can test whether it's empty or not correctly<|endoftext|>
5ddfb7efbc47fecd77bff7b72b9fc9a4d0918902b6d7eacd6ec8356ea0a7752e
def test_full(self): "\n Verifies that full checks it's array properly.\n " self.assertFalse(run(self.queue.full())) for i in range(0, 6): run(self.queue.add(i)) self.assertTrue(run(self.queue.full()))
Verifies that full checks it's array properly.
tests/test_structures.py
test_full
ephreal/rollbot
2
python
def test_full(self): "\n \n " self.assertFalse(run(self.queue.full())) for i in range(0, 6): run(self.queue.add(i)) self.assertTrue(run(self.queue.full()))
def test_full(self): "\n \n " self.assertFalse(run(self.queue.full())) for i in range(0, 6): run(self.queue.add(i)) self.assertTrue(run(self.queue.full()))<|docstring|>Verifies that full checks it's array properly.<|endoftext|>
df66ff0a85c9b1b851ff9875fbff34d93c8e7036a7c1607b95b1d7763c52d28b
def test_peek(self): '\n Verifies that peek shows the first element and does not remove any\n elements from the queue.\n ' run(self.queue.add(3)) self.assertTrue(3, run(self.queue.peek())) self.assertTrue(1, len(self.queue.items))
Verifies that peek shows the first element and does not remove any elements from the queue.
tests/test_structures.py
test_peek
ephreal/rollbot
2
python
def test_peek(self): '\n Verifies that peek shows the first element and does not remove any\n elements from the queue.\n ' run(self.queue.add(3)) self.assertTrue(3, run(self.queue.peek())) self.assertTrue(1, len(self.queue.items))
def test_peek(self): '\n Verifies that peek shows the first element and does not remove any\n elements from the queue.\n ' run(self.queue.add(3)) self.assertTrue(3, run(self.queue.peek())) self.assertTrue(1, len(self.queue.items))<|docstring|>Verifies that peek shows the first element and does not remove any elements from the queue.<|endoftext|>
b35c09ebb7c7a5724d39da52453e8965b090940d292e909a7b6acc32ec4dfed1
def test_remove(self): '\n Verifies that the queue is able to remove things from it.\n ' run(self.queue.add('test')) item = run(self.queue.remove()) self.assertEqual('test', item)
Verifies that the queue is able to remove things from it.
tests/test_structures.py
test_remove
ephreal/rollbot
2
python
def test_remove(self): '\n \n ' run(self.queue.add('test')) item = run(self.queue.remove()) self.assertEqual('test', item)
def test_remove(self): '\n \n ' run(self.queue.add('test')) item = run(self.queue.remove()) self.assertEqual('test', item)<|docstring|>Verifies that the queue is able to remove things from it.<|endoftext|>
dc536a36ea8cbda4b7809fc6e68fc162c8755fdaecddf647c1ee88712e48a294
def parse_content(self, content): '\n Parse the lines given into a list of dictionaries for each row. This\n is stored in the ``rows`` attribute.\n\n If the ``key_field`` property is set, use this to key a ``data``\n dictionary attribute.\n ' if (not (self.key_field and self.attr_name)): raise NotImplementedError("'key_field' or 'attr_name' is not defined") if any((l for l in content if l.startswith('Usage: '))): raise SkipException('No data only help output.') self.rows = parse_fixed_table(content, heading_ignore=self.heading_ignore, header_substitute=self.substitutions) if (not self.rows): raise SkipException('No data.') data = {} for row in self.rows: k = row.get(self.key_field) for sub in self.substitutions: row[sub[0]] = (row.pop(sub[1]) if (sub[1] in row) else None) if ((k is not None) and (k != '<none>')): data[k] = row setattr(self, self.attr_name, data)
Parse the lines given into a list of dictionaries for each row. This is stored in the ``rows`` attribute. If the ``key_field`` property is set, use this to key a ``data`` dictionary attribute.
insights/parsers/docker_list.py
parse_content
maxamillion/insights-core
121
python
def parse_content(self, content): '\n Parse the lines given into a list of dictionaries for each row. This\n is stored in the ``rows`` attribute.\n\n If the ``key_field`` property is set, use this to key a ``data``\n dictionary attribute.\n ' if (not (self.key_field and self.attr_name)): raise NotImplementedError("'key_field' or 'attr_name' is not defined") if any((l for l in content if l.startswith('Usage: '))): raise SkipException('No data only help output.') self.rows = parse_fixed_table(content, heading_ignore=self.heading_ignore, header_substitute=self.substitutions) if (not self.rows): raise SkipException('No data.') data = {} for row in self.rows: k = row.get(self.key_field) for sub in self.substitutions: row[sub[0]] = (row.pop(sub[1]) if (sub[1] in row) else None) if ((k is not None) and (k != '<none>')): data[k] = row setattr(self, self.attr_name, data)
def parse_content(self, content): '\n Parse the lines given into a list of dictionaries for each row. This\n is stored in the ``rows`` attribute.\n\n If the ``key_field`` property is set, use this to key a ``data``\n dictionary attribute.\n ' if (not (self.key_field and self.attr_name)): raise NotImplementedError("'key_field' or 'attr_name' is not defined") if any((l for l in content if l.startswith('Usage: '))): raise SkipException('No data only help output.') self.rows = parse_fixed_table(content, heading_ignore=self.heading_ignore, header_substitute=self.substitutions) if (not self.rows): raise SkipException('No data.') data = {} for row in self.rows: k = row.get(self.key_field) for sub in self.substitutions: row[sub[0]] = (row.pop(sub[1]) if (sub[1] in row) else None) if ((k is not None) and (k != '<none>')): data[k] = row setattr(self, self.attr_name, data)<|docstring|>Parse the lines given into a list of dictionaries for each row. This is stored in the ``rows`` attribute. If the ``key_field`` property is set, use this to key a ``data`` dictionary attribute.<|endoftext|>
24615001a8675d3a40da11361c5d346b5df83140a98e263e57fa1d6dd9157ede
def json_serial(obj): 'JSON serializer for objects not serializable by default json code' if isinstance(obj, datetime): serial = obj.isoformat() return serial raise TypeError('Type not serializable')
JSON serializer for objects not serializable by default json code
manual-scans/aws-alias-eb.py
json_serial
Mellis3489/domain-protect
0
python
def json_serial(obj): if isinstance(obj, datetime): serial = obj.isoformat() return serial raise TypeError('Type not serializable')
def json_serial(obj): if isinstance(obj, datetime): serial = obj.isoformat() return serial raise TypeError('Type not serializable')<|docstring|>JSON serializer for objects not serializable by default json code<|endoftext|>
8993bfb0acfe9c071668b9c2b8ef53bb4fa3e243478e9de61ad7af29f4c36767
def delete_matching_records_from_model(model_id, record_key, record_test): '\n This function provides an easy way to remove information from within a specific key of a model.\n\n - model_id: the id of the model that we should be removing information from\n - record_key: the key of the model that we should look in to remove data (ie "parameters", "outputs")\n - record_test: a function that will run on each of the records within the record_key to see whether\n they should be deleted. record_test() should return True if this record is to be deleted\n ' from src.models import get_model, modify_model record_count = 0 model = get_model(model_id) records = model.get(record_key, []) records_to_delete = [] for record in records: if record_test(record): records_to_delete.append(record) for record in records_to_delete: record_count += 1 records.remove(record) update = {record_key: records} modify_model(model_id, ModelSchema.ModelMetadataPatchSchema(**update)) return record_count
This function provides an easy way to remove information from within a specific key of a model. - model_id: the id of the model that we should be removing information from - record_key: the key of the model that we should look in to remove data (ie "parameters", "outputs") - record_test: a function that will run on each of the records within the record_key to see whether they should be deleted. record_test() should return True if this record is to be deleted
api/src/utils.py
delete_matching_records_from_model
jataware/dojo
2
python
def delete_matching_records_from_model(model_id, record_key, record_test): '\n This function provides an easy way to remove information from within a specific key of a model.\n\n - model_id: the id of the model that we should be removing information from\n - record_key: the key of the model that we should look in to remove data (ie "parameters", "outputs")\n - record_test: a function that will run on each of the records within the record_key to see whether\n they should be deleted. record_test() should return True if this record is to be deleted\n ' from src.models import get_model, modify_model record_count = 0 model = get_model(model_id) records = model.get(record_key, []) records_to_delete = [] for record in records: if record_test(record): records_to_delete.append(record) for record in records_to_delete: record_count += 1 records.remove(record) update = {record_key: records} modify_model(model_id, ModelSchema.ModelMetadataPatchSchema(**update)) return record_count
def delete_matching_records_from_model(model_id, record_key, record_test): '\n This function provides an easy way to remove information from within a specific key of a model.\n\n - model_id: the id of the model that we should be removing information from\n - record_key: the key of the model that we should look in to remove data (ie "parameters", "outputs")\n - record_test: a function that will run on each of the records within the record_key to see whether\n they should be deleted. record_test() should return True if this record is to be deleted\n ' from src.models import get_model, modify_model record_count = 0 model = get_model(model_id) records = model.get(record_key, []) records_to_delete = [] for record in records: if record_test(record): records_to_delete.append(record) for record in records_to_delete: record_count += 1 records.remove(record) update = {record_key: records} modify_model(model_id, ModelSchema.ModelMetadataPatchSchema(**update)) return record_count<|docstring|>This function provides an easy way to remove information from within a specific key of a model. - model_id: the id of the model that we should be removing information from - record_key: the key of the model that we should look in to remove data (ie "parameters", "outputs") - record_test: a function that will run on each of the records within the record_key to see whether they should be deleted. record_test() should return True if this record is to be deleted<|endoftext|>
3c69cdef919965066edec1a1fd4867e1ea0aa80bf555c909506ca28913d3427c
def test_widths_simple(self): 'Get mean and std from simple.' (m, s) = plan.compute_channel_width(self.simple_cm, section=self.trace) assert (m == ((((1 + 2) + 4) + 1) / 4)) assert (s == pytest.approx(1.22474487))
Get mean and std from simple.
tests/test_plan.py
test_widths_simple
DeltaRCM/DeltaMetrics
0
python
def test_widths_simple(self): (m, s) = plan.compute_channel_width(self.simple_cm, section=self.trace) assert (m == ((((1 + 2) + 4) + 1) / 4)) assert (s == pytest.approx(1.22474487))
def test_widths_simple(self): (m, s) = plan.compute_channel_width(self.simple_cm, section=self.trace) assert (m == ((((1 + 2) + 4) + 1) / 4)) assert (s == pytest.approx(1.22474487))<|docstring|>Get mean and std from simple.<|endoftext|>
043956522cb0910fc91deeb2de5e9bf7b2976aa67bb1de42bbcfce6f671a7923
def test_widths_simple_list_equal(self): 'Get mean, std, list from simple, check that same.' (m1, s1) = plan.compute_channel_width(self.simple_cm, section=self.trace) (m2, s2, w) = plan.compute_channel_width(self.simple_cm, section=self.trace, return_widths=True) assert (m1 == ((((1 + 2) + 4) + 1) / 4)) assert (m1 == m2) assert (s1 == s2) assert (len(w) == 4)
Get mean, std, list from simple, check that same.
tests/test_plan.py
test_widths_simple_list_equal
DeltaRCM/DeltaMetrics
0
python
def test_widths_simple_list_equal(self): (m1, s1) = plan.compute_channel_width(self.simple_cm, section=self.trace) (m2, s2, w) = plan.compute_channel_width(self.simple_cm, section=self.trace, return_widths=True) assert (m1 == ((((1 + 2) + 4) + 1) / 4)) assert (m1 == m2) assert (s1 == s2) assert (len(w) == 4)
def test_widths_simple_list_equal(self): (m1, s1) = plan.compute_channel_width(self.simple_cm, section=self.trace) (m2, s2, w) = plan.compute_channel_width(self.simple_cm, section=self.trace, return_widths=True) assert (m1 == ((((1 + 2) + 4) + 1) / 4)) assert (m1 == m2) assert (s1 == s2) assert (len(w) == 4)<|docstring|>Get mean, std, list from simple, check that same.<|endoftext|>
64181f5590834ed514adc75b81bb3073197814da9f4d78fbc3ca02ed0a0b8a35
def test_widths_example(self): 'Get mean and std from example.\n\n This test does not actually test the computation, just that something\n valid is returned, i.e., the function takes the input.\n ' (m, s) = plan.compute_channel_width(self.cm, section=self.sec) assert (m > 0) assert (s > 0)
Get mean and std from example. This test does not actually test the computation, just that something valid is returned, i.e., the function takes the input.
tests/test_plan.py
test_widths_example
DeltaRCM/DeltaMetrics
0
python
def test_widths_example(self): 'Get mean and std from example.\n\n This test does not actually test the computation, just that something\n valid is returned, i.e., the function takes the input.\n ' (m, s) = plan.compute_channel_width(self.cm, section=self.sec) assert (m > 0) assert (s > 0)
def test_widths_example(self): 'Get mean and std from example.\n\n This test does not actually test the computation, just that something\n valid is returned, i.e., the function takes the input.\n ' (m, s) = plan.compute_channel_width(self.cm, section=self.sec) assert (m > 0) assert (s > 0)<|docstring|>Get mean and std from example. This test does not actually test the computation, just that something valid is returned, i.e., the function takes the input.<|endoftext|>
de834df6a83725edfad9a1748d8dceacf843d00eec4ae2f16063ea04164a77bd
def test_depths_simple_thalweg(self): 'Get mean and std from simple.' (m, s) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace) assert (m == ((((0.5 + 0.4) + 1) + 9) / 4)) assert (s == pytest.approx(3.6299965564))
Get mean and std from simple.
tests/test_plan.py
test_depths_simple_thalweg
DeltaRCM/DeltaMetrics
0
python
def test_depths_simple_thalweg(self): (m, s) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace) assert (m == ((((0.5 + 0.4) + 1) + 9) / 4)) assert (s == pytest.approx(3.6299965564))
def test_depths_simple_thalweg(self): (m, s) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace) assert (m == ((((0.5 + 0.4) + 1) + 9) / 4)) assert (s == pytest.approx(3.6299965564))<|docstring|>Get mean and std from simple.<|endoftext|>
38386bc2b4309ff921ae9acab61e6f9a8f01c9a4830067d3a9c69d784107a39a
def test_depths_simple_mean(self): 'Get mean and std from simple.' (m, s) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace, depth_type='mean') assert (m == ((((0.5 + 0.3) + 1) + 9) / 4)) assert (s == pytest.approx(3.6462309307009066))
Get mean and std from simple.
tests/test_plan.py
test_depths_simple_mean
DeltaRCM/DeltaMetrics
0
python
def test_depths_simple_mean(self): (m, s) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace, depth_type='mean') assert (m == ((((0.5 + 0.3) + 1) + 9) / 4)) assert (s == pytest.approx(3.6462309307009066))
def test_depths_simple_mean(self): (m, s) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace, depth_type='mean') assert (m == ((((0.5 + 0.3) + 1) + 9) / 4)) assert (s == pytest.approx(3.6462309307009066))<|docstring|>Get mean and std from simple.<|endoftext|>
59d8ca0aadc834a92ca707b5534c230466ca66d0ecf0b5d518d886155bd4aa5c
def test_depths_simple_list_equal(self): 'Get mean, std, list from simple, check that same.' (m1, s1) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace) (m2, s2, w) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace, return_depths=True) assert (m1 == ((((0.5 + 0.4) + 1) + 9) / 4)) assert (m1 == m2) assert (s1 == s2) assert (len(w) == 4)
Get mean, std, list from simple, check that same.
tests/test_plan.py
test_depths_simple_list_equal
DeltaRCM/DeltaMetrics
0
python
def test_depths_simple_list_equal(self): (m1, s1) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace) (m2, s2, w) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace, return_depths=True) assert (m1 == ((((0.5 + 0.4) + 1) + 9) / 4)) assert (m1 == m2) assert (s1 == s2) assert (len(w) == 4)
def test_depths_simple_list_equal(self): (m1, s1) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace) (m2, s2, w) = plan.compute_channel_depth(self.simple_cm, self.simple_depth, section=self.trace, return_depths=True) assert (m1 == ((((0.5 + 0.4) + 1) + 9) / 4)) assert (m1 == m2) assert (s1 == s2) assert (len(w) == 4)<|docstring|>Get mean, std, list from simple, check that same.<|endoftext|>
685f76464b80ef34e295bca8fe7400dcdba56d1192799c0083461b25ee01bd51
def test_depths_example_thalweg(self): 'Get mean and std from example.\n\n This test does not actually test the computation, just that something\n valid is returned, i.e., the function takes the input.\n ' (m, s) = plan.compute_channel_depth(self.cm, self.golf['depth'][((- 1), :, :)], section=self.sec) assert (m > 0) assert (s > 0)
Get mean and std from example. This test does not actually test the computation, just that something valid is returned, i.e., the function takes the input.
tests/test_plan.py
test_depths_example_thalweg
DeltaRCM/DeltaMetrics
0
python
def test_depths_example_thalweg(self): 'Get mean and std from example.\n\n This test does not actually test the computation, just that something\n valid is returned, i.e., the function takes the input.\n ' (m, s) = plan.compute_channel_depth(self.cm, self.golf['depth'][((- 1), :, :)], section=self.sec) assert (m > 0) assert (s > 0)
def test_depths_example_thalweg(self): 'Get mean and std from example.\n\n This test does not actually test the computation, just that something\n valid is returned, i.e., the function takes the input.\n ' (m, s) = plan.compute_channel_depth(self.cm, self.golf['depth'][((- 1), :, :)], section=self.sec) assert (m > 0) assert (s > 0)<|docstring|>Get mean and std from example. This test does not actually test the computation, just that something valid is returned, i.e., the function takes the input.<|endoftext|>
00c284b54b8c6f6e24529f5236ee76e22a7cdd45c590f18042b8e74ba3ad1ff9
def __init__(self, V=None, M=None): '\n Constrói um objeto do tipo Grafo. Se nenhum parâmetro for passado, cria um Grafo vazio.\n Se houver alguma aresta ou algum vértice inválido, uma exceção é lançada.\n :param V: Uma lista dos vértices (ou nodos) do grafo.\n :param V: Uma matriz de adjacência que guarda as arestas do grafo. Cada entrada da matriz tem um inteiro que indica a quantidade de arestas que ligam aqueles vértices\n ' if (V == None): V = list() if (M == None): M = list() for v in V: if (not Grafo.verticeValido(v)): raise VerticeInvalidoException((('O vértice ' + v) + ' é inválido')) if (len(v) > self.__maior_vertice): self.__maior_vertice = len(v) self.N = list(V) self.pesos = {} if (M == []): for k in range(len(V)): M.append(list()) for l in range(len(V)): if (k > l): M[k].append('-') else: M[k].append(0) if (len(M) != len(V)): raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto') for c in M: if (len(c) != len(V)): raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto') for i in range(len(V)): for j in range(len(V)): '\n Verifica se os índices passados como parâmetro representam um elemento da matriz abaixo da diagonal principal.\n Além disso, verifica se o referido elemento é um traço "-". Isso indica que a matriz é não direcionada e foi construída corretamente.\n ' if ((i > j) and (not (M[i][j] == '-'))): raise MatrizInvalidaException('A matriz não representa uma matriz não direcionada') aresta = ((V[i] + Grafo.SEPARADOR_ARESTA) + V[j]) if (not self.arestaValida(aresta)): raise ArestaInvalidaException((('A aresta ' + aresta) + ' é inválida')) self.M = list(M)
Constrói um objeto do tipo Grafo. Se nenhum parâmetro for passado, cria um Grafo vazio. Se houver alguma aresta ou algum vértice inválido, uma exceção é lançada. :param V: Uma lista dos vértices (ou nodos) do grafo. :param V: Uma matriz de adjacência que guarda as arestas do grafo. Cada entrada da matriz tem um inteiro que indica a quantidade de arestas que ligam aqueles vértices
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
__init__
lucasEngdComp/graphs
0
python
def __init__(self, V=None, M=None): '\n Constrói um objeto do tipo Grafo. Se nenhum parâmetro for passado, cria um Grafo vazio.\n Se houver alguma aresta ou algum vértice inválido, uma exceção é lançada.\n :param V: Uma lista dos vértices (ou nodos) do grafo.\n :param V: Uma matriz de adjacência que guarda as arestas do grafo. Cada entrada da matriz tem um inteiro que indica a quantidade de arestas que ligam aqueles vértices\n ' if (V == None): V = list() if (M == None): M = list() for v in V: if (not Grafo.verticeValido(v)): raise VerticeInvalidoException((('O vértice ' + v) + ' é inválido')) if (len(v) > self.__maior_vertice): self.__maior_vertice = len(v) self.N = list(V) self.pesos = {} if (M == []): for k in range(len(V)): M.append(list()) for l in range(len(V)): if (k > l): M[k].append('-') else: M[k].append(0) if (len(M) != len(V)): raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto') for c in M: if (len(c) != len(V)): raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto') for i in range(len(V)): for j in range(len(V)): '\n Verifica se os índices passados como parâmetro representam um elemento da matriz abaixo da diagonal principal.\n Além disso, verifica se o referido elemento é um traço "-". Isso indica que a matriz é não direcionada e foi construída corretamente.\n ' if ((i > j) and (not (M[i][j] == '-'))): raise MatrizInvalidaException('A matriz não representa uma matriz não direcionada') aresta = ((V[i] + Grafo.SEPARADOR_ARESTA) + V[j]) if (not self.arestaValida(aresta)): raise ArestaInvalidaException((('A aresta ' + aresta) + ' é inválida')) self.M = list(M)
def __init__(self, V=None, M=None): '\n Constrói um objeto do tipo Grafo. Se nenhum parâmetro for passado, cria um Grafo vazio.\n Se houver alguma aresta ou algum vértice inválido, uma exceção é lançada.\n :param V: Uma lista dos vértices (ou nodos) do grafo.\n :param V: Uma matriz de adjacência que guarda as arestas do grafo. Cada entrada da matriz tem um inteiro que indica a quantidade de arestas que ligam aqueles vértices\n ' if (V == None): V = list() if (M == None): M = list() for v in V: if (not Grafo.verticeValido(v)): raise VerticeInvalidoException((('O vértice ' + v) + ' é inválido')) if (len(v) > self.__maior_vertice): self.__maior_vertice = len(v) self.N = list(V) self.pesos = {} if (M == []): for k in range(len(V)): M.append(list()) for l in range(len(V)): if (k > l): M[k].append('-') else: M[k].append(0) if (len(M) != len(V)): raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto') for c in M: if (len(c) != len(V)): raise MatrizInvalidaException('A matriz passada como parâmetro não tem o tamanho correto') for i in range(len(V)): for j in range(len(V)): '\n Verifica se os índices passados como parâmetro representam um elemento da matriz abaixo da diagonal principal.\n Além disso, verifica se o referido elemento é um traço "-". Isso indica que a matriz é não direcionada e foi construída corretamente.\n ' if ((i > j) and (not (M[i][j] == '-'))): raise MatrizInvalidaException('A matriz não representa uma matriz não direcionada') aresta = ((V[i] + Grafo.SEPARADOR_ARESTA) + V[j]) if (not self.arestaValida(aresta)): raise ArestaInvalidaException((('A aresta ' + aresta) + ' é inválida')) self.M = list(M)<|docstring|>Constrói um objeto do tipo Grafo. Se nenhum parâmetro for passado, cria um Grafo vazio. Se houver alguma aresta ou algum vértice inválido, uma exceção é lançada. :param V: Uma lista dos vértices (ou nodos) do grafo. :param V: Uma matriz de adjacência que guarda as arestas do grafo. Cada entrada da matriz tem um inteiro que indica a quantidade de arestas que ligam aqueles vértices<|endoftext|>
f08cd16337232b0c8347b03fc76af935504bd76b66b9cda54d75424bd1763242
def arestaValida(self, aresta=''): '\n Verifica se uma aresta passada como parâmetro está dentro do padrão estabelecido.\n Uma aresta é representada por um string com o formato a-b, onde:\n a é um substring de aresta que é o nome de um vértice adjacente à aresta.\n - é um caractere separador. Uma aresta só pode ter um único caractere como esse.\n b é um substring de aresta que é o nome do outro vértice adjacente à aresta.\n Além disso, uma aresta só é válida se conectar dois vértices existentes no grafo.\n :param aresta: A aresta que se quer verificar se está no formato correto.\n :return: Um valor booleano que indica se a aresta está no formato correto.\n ' if (aresta.count(Grafo.SEPARADOR_ARESTA) != Grafo.QTDE_MAX_SEPARADOR): return False i_traco = aresta.index(Grafo.SEPARADOR_ARESTA) if ((i_traco == 0) or (aresta[(- 1)] == Grafo.SEPARADOR_ARESTA)): return False if ((not self.existeVertice(aresta[:i_traco])) or (not self.existeVertice(aresta[(i_traco + 1):]))): return False return True
Verifica se uma aresta passada como parâmetro está dentro do padrão estabelecido. Uma aresta é representada por um string com o formato a-b, onde: a é um substring de aresta que é o nome de um vértice adjacente à aresta. - é um caractere separador. Uma aresta só pode ter um único caractere como esse. b é um substring de aresta que é o nome do outro vértice adjacente à aresta. Além disso, uma aresta só é válida se conectar dois vértices existentes no grafo. :param aresta: A aresta que se quer verificar se está no formato correto. :return: Um valor booleano que indica se a aresta está no formato correto.
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
arestaValida
lucasEngdComp/graphs
0
python
def arestaValida(self, aresta=): '\n Verifica se uma aresta passada como parâmetro está dentro do padrão estabelecido.\n Uma aresta é representada por um string com o formato a-b, onde:\n a é um substring de aresta que é o nome de um vértice adjacente à aresta.\n - é um caractere separador. Uma aresta só pode ter um único caractere como esse.\n b é um substring de aresta que é o nome do outro vértice adjacente à aresta.\n Além disso, uma aresta só é válida se conectar dois vértices existentes no grafo.\n :param aresta: A aresta que se quer verificar se está no formato correto.\n :return: Um valor booleano que indica se a aresta está no formato correto.\n ' if (aresta.count(Grafo.SEPARADOR_ARESTA) != Grafo.QTDE_MAX_SEPARADOR): return False i_traco = aresta.index(Grafo.SEPARADOR_ARESTA) if ((i_traco == 0) or (aresta[(- 1)] == Grafo.SEPARADOR_ARESTA)): return False if ((not self.existeVertice(aresta[:i_traco])) or (not self.existeVertice(aresta[(i_traco + 1):]))): return False return True
def arestaValida(self, aresta=): '\n Verifica se uma aresta passada como parâmetro está dentro do padrão estabelecido.\n Uma aresta é representada por um string com o formato a-b, onde:\n a é um substring de aresta que é o nome de um vértice adjacente à aresta.\n - é um caractere separador. Uma aresta só pode ter um único caractere como esse.\n b é um substring de aresta que é o nome do outro vértice adjacente à aresta.\n Além disso, uma aresta só é válida se conectar dois vértices existentes no grafo.\n :param aresta: A aresta que se quer verificar se está no formato correto.\n :return: Um valor booleano que indica se a aresta está no formato correto.\n ' if (aresta.count(Grafo.SEPARADOR_ARESTA) != Grafo.QTDE_MAX_SEPARADOR): return False i_traco = aresta.index(Grafo.SEPARADOR_ARESTA) if ((i_traco == 0) or (aresta[(- 1)] == Grafo.SEPARADOR_ARESTA)): return False if ((not self.existeVertice(aresta[:i_traco])) or (not self.existeVertice(aresta[(i_traco + 1):]))): return False return True<|docstring|>Verifica se uma aresta passada como parâmetro está dentro do padrão estabelecido. Uma aresta é representada por um string com o formato a-b, onde: a é um substring de aresta que é o nome de um vértice adjacente à aresta. - é um caractere separador. Uma aresta só pode ter um único caractere como esse. b é um substring de aresta que é o nome do outro vértice adjacente à aresta. Além disso, uma aresta só é válida se conectar dois vértices existentes no grafo. :param aresta: A aresta que se quer verificar se está no formato correto. :return: Um valor booleano que indica se a aresta está no formato correto.<|endoftext|>
45d24f7097cbe9c84cca5f9331dd6c2e15816dc20a7b758184ec3390532eea9e
@classmethod def verticeValido(self, vertice: str): '\n Verifica se um vértice passado como parâmetro está dentro do padrão estabelecido.\n Um vértice é um string qualquer que não pode ser vazio e nem conter o caractere separador.\n :param vertice: Um string que representa o vértice a ser analisado.\n :return: Um valor booleano que indica se o vértice está no formato correto.\n ' return ((vertice != '') and (vertice.count(Grafo.SEPARADOR_ARESTA) == 0))
Verifica se um vértice passado como parâmetro está dentro do padrão estabelecido. Um vértice é um string qualquer que não pode ser vazio e nem conter o caractere separador. :param vertice: Um string que representa o vértice a ser analisado. :return: Um valor booleano que indica se o vértice está no formato correto.
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
verticeValido
lucasEngdComp/graphs
0
python
@classmethod def verticeValido(self, vertice: str): '\n Verifica se um vértice passado como parâmetro está dentro do padrão estabelecido.\n Um vértice é um string qualquer que não pode ser vazio e nem conter o caractere separador.\n :param vertice: Um string que representa o vértice a ser analisado.\n :return: Um valor booleano que indica se o vértice está no formato correto.\n ' return ((vertice != ) and (vertice.count(Grafo.SEPARADOR_ARESTA) == 0))
@classmethod def verticeValido(self, vertice: str): '\n Verifica se um vértice passado como parâmetro está dentro do padrão estabelecido.\n Um vértice é um string qualquer que não pode ser vazio e nem conter o caractere separador.\n :param vertice: Um string que representa o vértice a ser analisado.\n :return: Um valor booleano que indica se o vértice está no formato correto.\n ' return ((vertice != ) and (vertice.count(Grafo.SEPARADOR_ARESTA) == 0))<|docstring|>Verifica se um vértice passado como parâmetro está dentro do padrão estabelecido. Um vértice é um string qualquer que não pode ser vazio e nem conter o caractere separador. :param vertice: Um string que representa o vértice a ser analisado. :return: Um valor booleano que indica se o vértice está no formato correto.<|endoftext|>
018768d447df8b487174baee556b61686b98a42e46d466329b018c4058dc99f3
def existeVertice(self, vertice: str): '\n Verifica se um vértice passado como parâmetro pertence ao grafo.\n :param vertice: O vértice que deve ser verificado.\n :return: Um valor booleano que indica se o vértice existe no grafo.\n ' return (Grafo.verticeValido(vertice) and (self.N.count(vertice) > 0))
Verifica se um vértice passado como parâmetro pertence ao grafo. :param vertice: O vértice que deve ser verificado. :return: Um valor booleano que indica se o vértice existe no grafo.
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
existeVertice
lucasEngdComp/graphs
0
python
def existeVertice(self, vertice: str): '\n Verifica se um vértice passado como parâmetro pertence ao grafo.\n :param vertice: O vértice que deve ser verificado.\n :return: Um valor booleano que indica se o vértice existe no grafo.\n ' return (Grafo.verticeValido(vertice) and (self.N.count(vertice) > 0))
def existeVertice(self, vertice: str): '\n Verifica se um vértice passado como parâmetro pertence ao grafo.\n :param vertice: O vértice que deve ser verificado.\n :return: Um valor booleano que indica se o vértice existe no grafo.\n ' return (Grafo.verticeValido(vertice) and (self.N.count(vertice) > 0))<|docstring|>Verifica se um vértice passado como parâmetro pertence ao grafo. :param vertice: O vértice que deve ser verificado. :return: Um valor booleano que indica se o vértice existe no grafo.<|endoftext|>
82b4019056ff1e0d12682209b1e38e95a56c8fde420a01a444aa75ec59fdd09a
def __primeiro_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o vértice X\n :param a: a aresta a ser analisada\n :return: O primeiro vértice da aresta\n ' return a[0:a.index(Grafo.SEPARADOR_ARESTA)]
Dada uma aresta no formato X-Y, retorna o vértice X :param a: a aresta a ser analisada :return: O primeiro vértice da aresta
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
__primeiro_vertice_aresta
lucasEngdComp/graphs
0
python
def __primeiro_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o vértice X\n :param a: a aresta a ser analisada\n :return: O primeiro vértice da aresta\n ' return a[0:a.index(Grafo.SEPARADOR_ARESTA)]
def __primeiro_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o vértice X\n :param a: a aresta a ser analisada\n :return: O primeiro vértice da aresta\n ' return a[0:a.index(Grafo.SEPARADOR_ARESTA)]<|docstring|>Dada uma aresta no formato X-Y, retorna o vértice X :param a: a aresta a ser analisada :return: O primeiro vértice da aresta<|endoftext|>
fc92b57bdb4a34b196fd86ab982c183ca6719d7c8b07f89afdc29285910b9a21
def __segundo_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o vértice Y\n :param a: A aresta a ser analisada\n :return: O segundo vértice da aresta\n ' return a[(a.index(Grafo.SEPARADOR_ARESTA) + 1):]
Dada uma aresta no formato X-Y, retorna o vértice Y :param a: A aresta a ser analisada :return: O segundo vértice da aresta
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
__segundo_vertice_aresta
lucasEngdComp/graphs
0
python
def __segundo_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o vértice Y\n :param a: A aresta a ser analisada\n :return: O segundo vértice da aresta\n ' return a[(a.index(Grafo.SEPARADOR_ARESTA) + 1):]
def __segundo_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o vértice Y\n :param a: A aresta a ser analisada\n :return: O segundo vértice da aresta\n ' return a[(a.index(Grafo.SEPARADOR_ARESTA) + 1):]<|docstring|>Dada uma aresta no formato X-Y, retorna o vértice Y :param a: A aresta a ser analisada :return: O segundo vértice da aresta<|endoftext|>
533e4515082dab47c439b1baf59bd54eba7292e18aa123aac24b9ab94cbb5715
def __indice_primeiro_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o índice do vértice X na lista de vértices\n :param a: A aresta a ser analisada\n :return: O índice do primeiro vértice da aresta na lista de vértices\n ' return self.N.index(self.__primeiro_vertice_aresta(a))
Dada uma aresta no formato X-Y, retorna o índice do vértice X na lista de vértices :param a: A aresta a ser analisada :return: O índice do primeiro vértice da aresta na lista de vértices
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
__indice_primeiro_vertice_aresta
lucasEngdComp/graphs
0
python
def __indice_primeiro_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o índice do vértice X na lista de vértices\n :param a: A aresta a ser analisada\n :return: O índice do primeiro vértice da aresta na lista de vértices\n ' return self.N.index(self.__primeiro_vertice_aresta(a))
def __indice_primeiro_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o índice do vértice X na lista de vértices\n :param a: A aresta a ser analisada\n :return: O índice do primeiro vértice da aresta na lista de vértices\n ' return self.N.index(self.__primeiro_vertice_aresta(a))<|docstring|>Dada uma aresta no formato X-Y, retorna o índice do vértice X na lista de vértices :param a: A aresta a ser analisada :return: O índice do primeiro vértice da aresta na lista de vértices<|endoftext|>
ef1472dc3a638a6571a3687cd16ff13700c74d995117e588b4297a340e0a5745
def __indice_segundo_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o índice do vértice Y na lista de vértices\n :param a: A aresta a ser analisada\n :return: O índice do segundo vértice da aresta na lista de vértices\n ' return self.N.index(self.__segundo_vertice_aresta(a))
Dada uma aresta no formato X-Y, retorna o índice do vértice Y na lista de vértices :param a: A aresta a ser analisada :return: O índice do segundo vértice da aresta na lista de vértices
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
__indice_segundo_vertice_aresta
lucasEngdComp/graphs
0
python
def __indice_segundo_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o índice do vértice Y na lista de vértices\n :param a: A aresta a ser analisada\n :return: O índice do segundo vértice da aresta na lista de vértices\n ' return self.N.index(self.__segundo_vertice_aresta(a))
def __indice_segundo_vertice_aresta(self, a: str): '\n Dada uma aresta no formato X-Y, retorna o índice do vértice Y na lista de vértices\n :param a: A aresta a ser analisada\n :return: O índice do segundo vértice da aresta na lista de vértices\n ' return self.N.index(self.__segundo_vertice_aresta(a))<|docstring|>Dada uma aresta no formato X-Y, retorna o índice do vértice Y na lista de vértices :param a: A aresta a ser analisada :return: O índice do segundo vértice da aresta na lista de vértices<|endoftext|>
b647d17e9c2e95735fdc6e54dad8d5d819aa4add906766869a9d84d8394bf45a
def existeAresta(self, a: str): '\n Verifica se uma aresta passada como parâmetro pertence ao grafo.\n :param aresta: A aresta a ser verificada\n :return: Um valor booleano que indica se a aresta existe no grafo.\n ' existe = False if Grafo.arestaValida(self, a): for i in range(len(self.M)): for j in range(len(self.M)): if self.M[self.__indice_primeiro_vertice_aresta(a)][self.__indice_segundo_vertice_aresta(a)]: existe = True return existe
Verifica se uma aresta passada como parâmetro pertence ao grafo. :param aresta: A aresta a ser verificada :return: Um valor booleano que indica se a aresta existe no grafo.
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
existeAresta
lucasEngdComp/graphs
0
python
def existeAresta(self, a: str): '\n Verifica se uma aresta passada como parâmetro pertence ao grafo.\n :param aresta: A aresta a ser verificada\n :return: Um valor booleano que indica se a aresta existe no grafo.\n ' existe = False if Grafo.arestaValida(self, a): for i in range(len(self.M)): for j in range(len(self.M)): if self.M[self.__indice_primeiro_vertice_aresta(a)][self.__indice_segundo_vertice_aresta(a)]: existe = True return existe
def existeAresta(self, a: str): '\n Verifica se uma aresta passada como parâmetro pertence ao grafo.\n :param aresta: A aresta a ser verificada\n :return: Um valor booleano que indica se a aresta existe no grafo.\n ' existe = False if Grafo.arestaValida(self, a): for i in range(len(self.M)): for j in range(len(self.M)): if self.M[self.__indice_primeiro_vertice_aresta(a)][self.__indice_segundo_vertice_aresta(a)]: existe = True return existe<|docstring|>Verifica se uma aresta passada como parâmetro pertence ao grafo. :param aresta: A aresta a ser verificada :return: Um valor booleano que indica se a aresta existe no grafo.<|endoftext|>
215f1deaa02e601e2563d0dac2491604de450c3d4d325dfec62d562fcf984903
def adicionaVertice(self, v): '\n Inclui um vértice no grafo se ele estiver no formato correto.\n :param v: O vértice a ser incluído no grafo.\n :raises VerticeInvalidoException se o vértice já existe ou se ele não estiver no formato válido.\n ' if (v in self.N): raise VerticeInvalidoException('O vértice {} já existe'.format(v)) if self.verticeValido(v): if (len(v) > self.__maior_vertice): self.__maior_vertice = len(v) self.N.append(v) self.M.append([]) for k in range(len(self.N)): if (k != (len(self.N) - 1)): self.M[k].append(0) self.M[self.N.index(v)].append('-') else: self.M[self.N.index(v)].append(0) else: raise VerticeInvalidoException((('O vértice ' + v) + ' é inválido'))
Inclui um vértice no grafo se ele estiver no formato correto. :param v: O vértice a ser incluído no grafo. :raises VerticeInvalidoException se o vértice já existe ou se ele não estiver no formato válido.
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
adicionaVertice
lucasEngdComp/graphs
0
python
def adicionaVertice(self, v): '\n Inclui um vértice no grafo se ele estiver no formato correto.\n :param v: O vértice a ser incluído no grafo.\n :raises VerticeInvalidoException se o vértice já existe ou se ele não estiver no formato válido.\n ' if (v in self.N): raise VerticeInvalidoException('O vértice {} já existe'.format(v)) if self.verticeValido(v): if (len(v) > self.__maior_vertice): self.__maior_vertice = len(v) self.N.append(v) self.M.append([]) for k in range(len(self.N)): if (k != (len(self.N) - 1)): self.M[k].append(0) self.M[self.N.index(v)].append('-') else: self.M[self.N.index(v)].append(0) else: raise VerticeInvalidoException((('O vértice ' + v) + ' é inválido'))
def adicionaVertice(self, v): '\n Inclui um vértice no grafo se ele estiver no formato correto.\n :param v: O vértice a ser incluído no grafo.\n :raises VerticeInvalidoException se o vértice já existe ou se ele não estiver no formato válido.\n ' if (v in self.N): raise VerticeInvalidoException('O vértice {} já existe'.format(v)) if self.verticeValido(v): if (len(v) > self.__maior_vertice): self.__maior_vertice = len(v) self.N.append(v) self.M.append([]) for k in range(len(self.N)): if (k != (len(self.N) - 1)): self.M[k].append(0) self.M[self.N.index(v)].append('-') else: self.M[self.N.index(v)].append(0) else: raise VerticeInvalidoException((('O vértice ' + v) + ' é inválido'))<|docstring|>Inclui um vértice no grafo se ele estiver no formato correto. :param v: O vértice a ser incluído no grafo. :raises VerticeInvalidoException se o vértice já existe ou se ele não estiver no formato válido.<|endoftext|>
71c7ffbc55434703632a1e821353b7e3f16f765f1bf23e2632db800a8dfdea28
def adicionaAresta(self, a, peso): '\n Adiciona uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice\n :param a: a aresta no formato correto\n :raise: lança uma exceção caso a aresta não estiver em um formato válido\n ' if self.arestaValida(a): i_a1 = self.__indice_primeiro_vertice_aresta(a) i_a2 = self.__indice_segundo_vertice_aresta(a) if (i_a1 < i_a2): self.M[i_a1][i_a2] += 1 else: self.M[i_a2][i_a1] += 1 else: raise ArestaInvalidaException('A aresta {} é inválida'.format(a)) self.pesos[a] = peso
Adiciona uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice :param a: a aresta no formato correto :raise: lança uma exceção caso a aresta não estiver em um formato válido
Graphs/adjacent undirected graph/grafo_adj_nao_dir.py
adicionaAresta
lucasEngdComp/graphs
0
python
def adicionaAresta(self, a, peso): '\n Adiciona uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice\n :param a: a aresta no formato correto\n :raise: lança uma exceção caso a aresta não estiver em um formato válido\n ' if self.arestaValida(a): i_a1 = self.__indice_primeiro_vertice_aresta(a) i_a2 = self.__indice_segundo_vertice_aresta(a) if (i_a1 < i_a2): self.M[i_a1][i_a2] += 1 else: self.M[i_a2][i_a1] += 1 else: raise ArestaInvalidaException('A aresta {} é inválida'.format(a)) self.pesos[a] = peso
def adicionaAresta(self, a, peso): '\n Adiciona uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice\n :param a: a aresta no formato correto\n :raise: lança uma exceção caso a aresta não estiver em um formato válido\n ' if self.arestaValida(a): i_a1 = self.__indice_primeiro_vertice_aresta(a) i_a2 = self.__indice_segundo_vertice_aresta(a) if (i_a1 < i_a2): self.M[i_a1][i_a2] += 1 else: self.M[i_a2][i_a1] += 1 else: raise ArestaInvalidaException('A aresta {} é inválida'.format(a)) self.pesos[a] = peso<|docstring|>Adiciona uma aresta ao grafo no formato X-Y, onde X é o primeiro vértice e Y é o segundo vértice :param a: a aresta no formato correto :raise: lança uma exceção caso a aresta não estiver em um formato válido<|endoftext|>