body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
23108840baeaeaac30044900aa2829647b1da8a11b63dec8d54bab29cda2f490
def resolve_hostname(hostname): 'Resolves host name to IP address.' res = socket.getaddrinfo(hostname, None)[0] (family, socktype, proto, canonname, sockaddr) = res return sockaddr[0]
Resolves host name to IP address.
cinder/volume/drivers/netapp/utils.py
resolve_hostname
bswartz/cinder
11
python
def resolve_hostname(hostname): res = socket.getaddrinfo(hostname, None)[0] (family, socktype, proto, canonname, sockaddr) = res return sockaddr[0]
def resolve_hostname(hostname): res = socket.getaddrinfo(hostname, None)[0] (family, socktype, proto, canonname, sockaddr) = res return sockaddr[0]<|docstring|>Resolves host name to IP address.<|endoftext|>
151aba6cc1e313575a1ab79090bfd8a3e22d87c13bdb0bda17948674736750dd
def validate_qos_spec(qos_spec): 'Check validity of Cinder qos spec for our backend.' if (qos_spec is None): return normalized_qos_keys = [key.lower() for key in QOS_KEYS] keylist = [] for (key, value) in qos_spec.items(): lower_case_key = key.lower() if (lower_case_key not in normalized_qos_keys): msg = (_('Unrecognized QOS keyword: "%s"') % key) raise exception.Invalid(msg) keylist.append(lower_case_key) if (len(keylist) > 1): msg = _('Only one limit can be set in a QoS spec.') raise exception.Invalid(msg)
Check validity of Cinder qos spec for our backend.
cinder/volume/drivers/netapp/utils.py
validate_qos_spec
bswartz/cinder
11
python
def validate_qos_spec(qos_spec): if (qos_spec is None): return normalized_qos_keys = [key.lower() for key in QOS_KEYS] keylist = [] for (key, value) in qos_spec.items(): lower_case_key = key.lower() if (lower_case_key not in normalized_qos_keys): msg = (_('Unrecognized QOS keyword: "%s"') % key) raise exception.Invalid(msg) keylist.append(lower_case_key) if (len(keylist) > 1): msg = _('Only one limit can be set in a QoS spec.') raise exception.Invalid(msg)
def validate_qos_spec(qos_spec): if (qos_spec is None): return normalized_qos_keys = [key.lower() for key in QOS_KEYS] keylist = [] for (key, value) in qos_spec.items(): lower_case_key = key.lower() if (lower_case_key not in normalized_qos_keys): msg = (_('Unrecognized QOS keyword: "%s"') % key) raise exception.Invalid(msg) keylist.append(lower_case_key) if (len(keylist) > 1): msg = _('Only one limit can be set in a QoS spec.') raise exception.Invalid(msg)<|docstring|>Check validity of Cinder qos spec for our backend.<|endoftext|>
9fac7511d765daabc2c77bdf68bb5d0338efca78b564788b779a1c3553f13073
def get_volume_type_from_volume(volume): 'Provides volume type associated with volume.' type_id = volume.get('volume_type_id') if (type_id is None): return {} ctxt = context.get_admin_context() return volume_types.get_volume_type(ctxt, type_id)
Provides volume type associated with volume.
cinder/volume/drivers/netapp/utils.py
get_volume_type_from_volume
bswartz/cinder
11
python
def get_volume_type_from_volume(volume): type_id = volume.get('volume_type_id') if (type_id is None): return {} ctxt = context.get_admin_context() return volume_types.get_volume_type(ctxt, type_id)
def get_volume_type_from_volume(volume): type_id = volume.get('volume_type_id') if (type_id is None): return {} ctxt = context.get_admin_context() return volume_types.get_volume_type(ctxt, type_id)<|docstring|>Provides volume type associated with volume.<|endoftext|>
22ccefff904d39571617c126fe6ded26f66056c950db89298f7382340be12511
def map_qos_spec(qos_spec, volume): 'Map Cinder QOS spec to limit/throughput-value as used in client API.' if (qos_spec is None): return None qos_spec = map_dict_to_lower(qos_spec) spec = dict(policy_name=get_qos_policy_group_name(volume), max_throughput=None) if ('maxiops' in qos_spec): spec['max_throughput'] = ('%siops' % qos_spec['maxiops']) elif ('maxiopspergib' in qos_spec): spec['max_throughput'] = ('%siops' % six.text_type((int(qos_spec['maxiopspergib']) * int(volume['size'])))) elif ('maxbps' in qos_spec): spec['max_throughput'] = ('%sB/s' % qos_spec['maxbps']) elif ('maxbpspergib' in qos_spec): spec['max_throughput'] = ('%sB/s' % six.text_type((int(qos_spec['maxbpspergib']) * int(volume['size'])))) return spec
Map Cinder QOS spec to limit/throughput-value as used in client API.
cinder/volume/drivers/netapp/utils.py
map_qos_spec
bswartz/cinder
11
python
def map_qos_spec(qos_spec, volume): if (qos_spec is None): return None qos_spec = map_dict_to_lower(qos_spec) spec = dict(policy_name=get_qos_policy_group_name(volume), max_throughput=None) if ('maxiops' in qos_spec): spec['max_throughput'] = ('%siops' % qos_spec['maxiops']) elif ('maxiopspergib' in qos_spec): spec['max_throughput'] = ('%siops' % six.text_type((int(qos_spec['maxiopspergib']) * int(volume['size'])))) elif ('maxbps' in qos_spec): spec['max_throughput'] = ('%sB/s' % qos_spec['maxbps']) elif ('maxbpspergib' in qos_spec): spec['max_throughput'] = ('%sB/s' % six.text_type((int(qos_spec['maxbpspergib']) * int(volume['size'])))) return spec
def map_qos_spec(qos_spec, volume): if (qos_spec is None): return None qos_spec = map_dict_to_lower(qos_spec) spec = dict(policy_name=get_qos_policy_group_name(volume), max_throughput=None) if ('maxiops' in qos_spec): spec['max_throughput'] = ('%siops' % qos_spec['maxiops']) elif ('maxiopspergib' in qos_spec): spec['max_throughput'] = ('%siops' % six.text_type((int(qos_spec['maxiopspergib']) * int(volume['size'])))) elif ('maxbps' in qos_spec): spec['max_throughput'] = ('%sB/s' % qos_spec['maxbps']) elif ('maxbpspergib' in qos_spec): spec['max_throughput'] = ('%sB/s' % six.text_type((int(qos_spec['maxbpspergib']) * int(volume['size'])))) return spec<|docstring|>Map Cinder QOS spec to limit/throughput-value as used in client API.<|endoftext|>
d599f2082b44c17ebc9b50b7a2fbf0661855a6d6ce63dd77b00b71d783fd9c1c
def map_dict_to_lower(input_dict): 'Return an equivalent to the input dictionary with lower-case keys.' lower_case_dict = {} for key in input_dict: lower_case_dict[key.lower()] = input_dict[key] return lower_case_dict
Return an equivalent to the input dictionary with lower-case keys.
cinder/volume/drivers/netapp/utils.py
map_dict_to_lower
bswartz/cinder
11
python
def map_dict_to_lower(input_dict): lower_case_dict = {} for key in input_dict: lower_case_dict[key.lower()] = input_dict[key] return lower_case_dict
def map_dict_to_lower(input_dict): lower_case_dict = {} for key in input_dict: lower_case_dict[key.lower()] = input_dict[key] return lower_case_dict<|docstring|>Return an equivalent to the input dictionary with lower-case keys.<|endoftext|>
c0b174b038fe3e380959369af54db69dfb81c4778a23610db84807ce99d8635a
def get_qos_policy_group_name(volume): 'Return the name of backend QOS policy group based on its volume id.' if ('id' in volume): return (OPENSTACK_PREFIX + volume['id']) return None
Return the name of backend QOS policy group based on its volume id.
cinder/volume/drivers/netapp/utils.py
get_qos_policy_group_name
bswartz/cinder
11
python
def get_qos_policy_group_name(volume): if ('id' in volume): return (OPENSTACK_PREFIX + volume['id']) return None
def get_qos_policy_group_name(volume): if ('id' in volume): return (OPENSTACK_PREFIX + volume['id']) return None<|docstring|>Return the name of backend QOS policy group based on its volume id.<|endoftext|>
0395dece3459949ca10468d21b70be35ca450b5f8a5e8b4551b41b8b41433d0b
def get_qos_policy_group_name_from_info(qos_policy_group_info): 'Return the name of a QOS policy group given qos policy group info.' if (qos_policy_group_info is None): return None legacy = qos_policy_group_info.get('legacy') if (legacy is not None): return legacy['policy_name'] spec = qos_policy_group_info.get('spec') if (spec is not None): return spec['policy_name'] return None
Return the name of a QOS policy group given qos policy group info.
cinder/volume/drivers/netapp/utils.py
get_qos_policy_group_name_from_info
bswartz/cinder
11
python
def get_qos_policy_group_name_from_info(qos_policy_group_info): if (qos_policy_group_info is None): return None legacy = qos_policy_group_info.get('legacy') if (legacy is not None): return legacy['policy_name'] spec = qos_policy_group_info.get('spec') if (spec is not None): return spec['policy_name'] return None
def get_qos_policy_group_name_from_info(qos_policy_group_info): if (qos_policy_group_info is None): return None legacy = qos_policy_group_info.get('legacy') if (legacy is not None): return legacy['policy_name'] spec = qos_policy_group_info.get('spec') if (spec is not None): return spec['policy_name'] return None<|docstring|>Return the name of a QOS policy group given qos policy group info.<|endoftext|>
3c5ebc60688548b497556a35f5bd7758acc0086a3df9823c8ff8a36b204bd9e5
def get_pool_name_filter_regex(configuration): 'Build the regex for filtering pools by name\n\n :param configuration: The volume driver configuration\n :raise InvalidConfigurationValue: if configured regex pattern is invalid\n :returns: A compiled regex for filtering pool names\n ' pool_patterns = (configuration.netapp_pool_name_search_pattern or '(.+)') pool_patterns = '|'.join([(('^' + pool_pattern.strip('^$ \t')) + '$') for pool_pattern in pool_patterns.split(',')]) try: return re.compile(pool_patterns) except re.error: raise exception.InvalidConfigurationValue(option='netapp_pool_name_search_pattern', value=configuration.netapp_pool_name_search_pattern)
Build the regex for filtering pools by name :param configuration: The volume driver configuration :raise InvalidConfigurationValue: if configured regex pattern is invalid :returns: A compiled regex for filtering pool names
cinder/volume/drivers/netapp/utils.py
get_pool_name_filter_regex
bswartz/cinder
11
python
def get_pool_name_filter_regex(configuration): 'Build the regex for filtering pools by name\n\n :param configuration: The volume driver configuration\n :raise InvalidConfigurationValue: if configured regex pattern is invalid\n :returns: A compiled regex for filtering pool names\n ' pool_patterns = (configuration.netapp_pool_name_search_pattern or '(.+)') pool_patterns = '|'.join([(('^' + pool_pattern.strip('^$ \t')) + '$') for pool_pattern in pool_patterns.split(',')]) try: return re.compile(pool_patterns) except re.error: raise exception.InvalidConfigurationValue(option='netapp_pool_name_search_pattern', value=configuration.netapp_pool_name_search_pattern)
def get_pool_name_filter_regex(configuration): 'Build the regex for filtering pools by name\n\n :param configuration: The volume driver configuration\n :raise InvalidConfigurationValue: if configured regex pattern is invalid\n :returns: A compiled regex for filtering pool names\n ' pool_patterns = (configuration.netapp_pool_name_search_pattern or '(.+)') pool_patterns = '|'.join([(('^' + pool_pattern.strip('^$ \t')) + '$') for pool_pattern in pool_patterns.split(',')]) try: return re.compile(pool_patterns) except re.error: raise exception.InvalidConfigurationValue(option='netapp_pool_name_search_pattern', value=configuration.netapp_pool_name_search_pattern)<|docstring|>Build the regex for filtering pools by name :param configuration: The volume driver configuration :raise InvalidConfigurationValue: if configured regex pattern is invalid :returns: A compiled regex for filtering pool names<|endoftext|>
3af5866798274840156c5890ac80111cf89f2bbdc4d4968746b454abc19c25a3
def get_valid_qos_policy_group_info(volume, extra_specs=None): 'Given a volume, return information for QOS provisioning.' info = dict(legacy=None, spec=None) try: volume_type = get_volume_type_from_volume(volume) except KeyError: LOG.exception(_LE('Cannot get QoS spec for volume %s.'), volume['id']) return info if (volume_type is None): return info if (extra_specs is None): extra_specs = volume_type.get('extra_specs', {}) info['legacy'] = get_legacy_qos_policy(extra_specs) info['spec'] = get_valid_backend_qos_spec_from_volume_type(volume, volume_type) msg = 'QoS policy group info for volume %(vol)s: %(info)s' LOG.debug(msg, {'vol': volume['name'], 'info': info}) check_for_invalid_qos_spec_combination(info, volume_type) return info
Given a volume, return information for QOS provisioning.
cinder/volume/drivers/netapp/utils.py
get_valid_qos_policy_group_info
bswartz/cinder
11
python
def get_valid_qos_policy_group_info(volume, extra_specs=None): info = dict(legacy=None, spec=None) try: volume_type = get_volume_type_from_volume(volume) except KeyError: LOG.exception(_LE('Cannot get QoS spec for volume %s.'), volume['id']) return info if (volume_type is None): return info if (extra_specs is None): extra_specs = volume_type.get('extra_specs', {}) info['legacy'] = get_legacy_qos_policy(extra_specs) info['spec'] = get_valid_backend_qos_spec_from_volume_type(volume, volume_type) msg = 'QoS policy group info for volume %(vol)s: %(info)s' LOG.debug(msg, {'vol': volume['name'], 'info': info}) check_for_invalid_qos_spec_combination(info, volume_type) return info
def get_valid_qos_policy_group_info(volume, extra_specs=None): info = dict(legacy=None, spec=None) try: volume_type = get_volume_type_from_volume(volume) except KeyError: LOG.exception(_LE('Cannot get QoS spec for volume %s.'), volume['id']) return info if (volume_type is None): return info if (extra_specs is None): extra_specs = volume_type.get('extra_specs', {}) info['legacy'] = get_legacy_qos_policy(extra_specs) info['spec'] = get_valid_backend_qos_spec_from_volume_type(volume, volume_type) msg = 'QoS policy group info for volume %(vol)s: %(info)s' LOG.debug(msg, {'vol': volume['name'], 'info': info}) check_for_invalid_qos_spec_combination(info, volume_type) return info<|docstring|>Given a volume, return information for QOS provisioning.<|endoftext|>
307fbbcdb3275b49df40eba455891047dcc3ce1c063f8ebf8037ab5ac86ecb80
def get_valid_backend_qos_spec_from_volume_type(volume, volume_type): 'Given a volume type, return the associated Cinder QoS spec.' spec_key_values = get_backend_qos_spec_from_volume_type(volume_type) if (spec_key_values is None): return None validate_qos_spec(spec_key_values) return map_qos_spec(spec_key_values, volume)
Given a volume type, return the associated Cinder QoS spec.
cinder/volume/drivers/netapp/utils.py
get_valid_backend_qos_spec_from_volume_type
bswartz/cinder
11
python
def get_valid_backend_qos_spec_from_volume_type(volume, volume_type): spec_key_values = get_backend_qos_spec_from_volume_type(volume_type) if (spec_key_values is None): return None validate_qos_spec(spec_key_values) return map_qos_spec(spec_key_values, volume)
def get_valid_backend_qos_spec_from_volume_type(volume, volume_type): spec_key_values = get_backend_qos_spec_from_volume_type(volume_type) if (spec_key_values is None): return None validate_qos_spec(spec_key_values) return map_qos_spec(spec_key_values, volume)<|docstring|>Given a volume type, return the associated Cinder QoS spec.<|endoftext|>
f5b2b26d1dcef9b13394d8f14c4b97e4b6bd63cc8e79c8f4eae11a1a2f70736b
def check_for_invalid_qos_spec_combination(info, volume_type): 'Invalidate QOS spec if both legacy and non-legacy info is present.' if (info['legacy'] and info['spec']): msg = (_('Conflicting QoS specifications in volume type %s: when QoS spec is associated to volume type, legacy "netapp:qos_policy_group" is not allowed in the volume type extra specs.') % volume_type['id']) raise exception.Invalid(msg)
Invalidate QOS spec if both legacy and non-legacy info is present.
cinder/volume/drivers/netapp/utils.py
check_for_invalid_qos_spec_combination
bswartz/cinder
11
python
def check_for_invalid_qos_spec_combination(info, volume_type): if (info['legacy'] and info['spec']): msg = (_('Conflicting QoS specifications in volume type %s: when QoS spec is associated to volume type, legacy "netapp:qos_policy_group" is not allowed in the volume type extra specs.') % volume_type['id']) raise exception.Invalid(msg)
def check_for_invalid_qos_spec_combination(info, volume_type): if (info['legacy'] and info['spec']): msg = (_('Conflicting QoS specifications in volume type %s: when QoS spec is associated to volume type, legacy "netapp:qos_policy_group" is not allowed in the volume type extra specs.') % volume_type['id']) raise exception.Invalid(msg)<|docstring|>Invalidate QOS spec if both legacy and non-legacy info is present.<|endoftext|>
d26fcf3d5e790faf00f2994cdef7766c79469012c3ac1866349af6f61b603d67
def get_legacy_qos_policy(extra_specs): 'Return legacy qos policy information if present in extra specs.' external_policy_name = extra_specs.get('netapp:qos_policy_group') if (external_policy_name is None): return None return dict(policy_name=external_policy_name)
Return legacy qos policy information if present in extra specs.
cinder/volume/drivers/netapp/utils.py
get_legacy_qos_policy
bswartz/cinder
11
python
def get_legacy_qos_policy(extra_specs): external_policy_name = extra_specs.get('netapp:qos_policy_group') if (external_policy_name is None): return None return dict(policy_name=external_policy_name)
def get_legacy_qos_policy(extra_specs): external_policy_name = extra_specs.get('netapp:qos_policy_group') if (external_policy_name is None): return None return dict(policy_name=external_policy_name)<|docstring|>Return legacy qos policy information if present in extra specs.<|endoftext|>
a27dce6d32ebbe65213457c2be4a89a1d5ac9c05274cae599804e973df9fc86f
def __init__(self, supported=True, minimum_version=None): 'Represents the current state of enablement for a Feature\n\n :param supported: True if supported, false otherwise\n :param minimum_version: The minimum version that this feature is\n supported at\n ' self.supported = supported self.minimum_version = minimum_version
Represents the current state of enablement for a Feature :param supported: True if supported, false otherwise :param minimum_version: The minimum version that this feature is supported at
cinder/volume/drivers/netapp/utils.py
__init__
bswartz/cinder
11
python
def __init__(self, supported=True, minimum_version=None): 'Represents the current state of enablement for a Feature\n\n :param supported: True if supported, false otherwise\n :param minimum_version: The minimum version that this feature is\n supported at\n ' self.supported = supported self.minimum_version = minimum_version
def __init__(self, supported=True, minimum_version=None): 'Represents the current state of enablement for a Feature\n\n :param supported: True if supported, false otherwise\n :param minimum_version: The minimum version that this feature is\n supported at\n ' self.supported = supported self.minimum_version = minimum_version<|docstring|>Represents the current state of enablement for a Feature :param supported: True if supported, false otherwise :param minimum_version: The minimum version that this feature is supported at<|endoftext|>
0cf21d81bed614ac77d325f81ae1e0afbf3eb5be5d28cda07a6332544053a90b
def __nonzero__(self): 'Allow a FeatureState object to be tested for truth value\n\n :returns: True if the feature is supported, otherwise False\n ' return self.supported
Allow a FeatureState object to be tested for truth value :returns: True if the feature is supported, otherwise False
cinder/volume/drivers/netapp/utils.py
__nonzero__
bswartz/cinder
11
python
def __nonzero__(self): 'Allow a FeatureState object to be tested for truth value\n\n :returns: True if the feature is supported, otherwise False\n ' return self.supported
def __nonzero__(self): 'Allow a FeatureState object to be tested for truth value\n\n :returns: True if the feature is supported, otherwise False\n ' return self.supported<|docstring|>Allow a FeatureState object to be tested for truth value :returns: True if the feature is supported, otherwise False<|endoftext|>
302955950d9eba0bc5b6daf1fa2028c78980a7c2b3e51c995289774df23c0c81
def __bool__(self): 'py3 Allow a FeatureState object to be tested for truth value\n\n :returns: True if the feature is supported, otherwise False\n ' return self.supported
py3 Allow a FeatureState object to be tested for truth value :returns: True if the feature is supported, otherwise False
cinder/volume/drivers/netapp/utils.py
__bool__
bswartz/cinder
11
python
def __bool__(self): 'py3 Allow a FeatureState object to be tested for truth value\n\n :returns: True if the feature is supported, otherwise False\n ' return self.supported
def __bool__(self): 'py3 Allow a FeatureState object to be tested for truth value\n\n :returns: True if the feature is supported, otherwise False\n ' return self.supported<|docstring|>py3 Allow a FeatureState object to be tested for truth value :returns: True if the feature is supported, otherwise False<|endoftext|>
6c9aeec3c90fb8a695bac4360351c43ef2b07878bb68c06909c35d5ee2db0a77
def apply_multi_defaults(file_config: _ConfigDict, cli_config: _ConfigDict) -> _ConfigDict: "\n Returns all options where multiple=True that\n appeared in the config file, but weren't passed\n via the command line.\n " cli_paths = cli_config.get('path') conf_file_paths = file_config.get('path', '.') file_config_only = {} if (conf_file_paths and (not cli_paths)): file_config_only['path'] = as_list(conf_file_paths) multiple_options = ('exclude', 'hook_module') for param in multiple_options: from_cli = cli_config.get(param) from_conf_file = file_config.get(param, '') if (from_conf_file and (not from_cli)): file_config_only[param] = as_list(from_conf_file) return file_config_only
Returns all options where multiple=True that appeared in the config file, but weren't passed via the command line.
ward/_config.py
apply_multi_defaults
mcous/ward
877
python
def apply_multi_defaults(file_config: _ConfigDict, cli_config: _ConfigDict) -> _ConfigDict: "\n Returns all options where multiple=True that\n appeared in the config file, but weren't passed\n via the command line.\n " cli_paths = cli_config.get('path') conf_file_paths = file_config.get('path', '.') file_config_only = {} if (conf_file_paths and (not cli_paths)): file_config_only['path'] = as_list(conf_file_paths) multiple_options = ('exclude', 'hook_module') for param in multiple_options: from_cli = cli_config.get(param) from_conf_file = file_config.get(param, ) if (from_conf_file and (not from_cli)): file_config_only[param] = as_list(from_conf_file) return file_config_only
def apply_multi_defaults(file_config: _ConfigDict, cli_config: _ConfigDict) -> _ConfigDict: "\n Returns all options where multiple=True that\n appeared in the config file, but weren't passed\n via the command line.\n " cli_paths = cli_config.get('path') conf_file_paths = file_config.get('path', '.') file_config_only = {} if (conf_file_paths and (not cli_paths)): file_config_only['path'] = as_list(conf_file_paths) multiple_options = ('exclude', 'hook_module') for param in multiple_options: from_cli = cli_config.get(param) from_conf_file = file_config.get(param, ) if (from_conf_file and (not from_cli)): file_config_only[param] = as_list(from_conf_file) return file_config_only<|docstring|>Returns all options where multiple=True that appeared in the config file, but weren't passed via the command line.<|endoftext|>
9c1648a38254dda0a6e48a17ce31f4976ba4b091535cd4bb6deacd8571171287
def add_data_for_name(name_data, year, rank, name): '\n Adds the given year and rank to the associated name in the name_data dict.\n\n Input:\n name_data (dict): dict holding baby name data\n year (str): the year of the data entry to add\n rank (str): the rank of the data entry to add\n name (str): the name of the data entry to add\n\n Output:\n This function modifies the name_data dict to store the provided\n name, year, and rank. This function does not return any values.\n\n ' if (name in name_data): if (year in name_data[name]): new_rank = int(name_data[name][year]) if (int(rank) < new_rank): name_data[name][year] = rank else: name_data[name][year] = rank else: name_data[name] = {year: rank}
Adds the given year and rank to the associated name in the name_data dict. Input: name_data (dict): dict holding baby name data year (str): the year of the data entry to add rank (str): the rank of the data entry to add name (str): the name of the data entry to add Output: This function modifies the name_data dict to store the provided name, year, and rank. This function does not return any values.
stanCode_projects/SC101_name_searching/babynames.py
add_data_for_name
joe850226/sc-projects
1
python
def add_data_for_name(name_data, year, rank, name): '\n Adds the given year and rank to the associated name in the name_data dict.\n\n Input:\n name_data (dict): dict holding baby name data\n year (str): the year of the data entry to add\n rank (str): the rank of the data entry to add\n name (str): the name of the data entry to add\n\n Output:\n This function modifies the name_data dict to store the provided\n name, year, and rank. This function does not return any values.\n\n ' if (name in name_data): if (year in name_data[name]): new_rank = int(name_data[name][year]) if (int(rank) < new_rank): name_data[name][year] = rank else: name_data[name][year] = rank else: name_data[name] = {year: rank}
def add_data_for_name(name_data, year, rank, name): '\n Adds the given year and rank to the associated name in the name_data dict.\n\n Input:\n name_data (dict): dict holding baby name data\n year (str): the year of the data entry to add\n rank (str): the rank of the data entry to add\n name (str): the name of the data entry to add\n\n Output:\n This function modifies the name_data dict to store the provided\n name, year, and rank. This function does not return any values.\n\n ' if (name in name_data): if (year in name_data[name]): new_rank = int(name_data[name][year]) if (int(rank) < new_rank): name_data[name][year] = rank else: name_data[name][year] = rank else: name_data[name] = {year: rank}<|docstring|>Adds the given year and rank to the associated name in the name_data dict. Input: name_data (dict): dict holding baby name data year (str): the year of the data entry to add rank (str): the rank of the data entry to add name (str): the name of the data entry to add Output: This function modifies the name_data dict to store the provided name, year, and rank. This function does not return any values.<|endoftext|>
f10d7240789fef670273778f7341984866aacca7fc08edef68abe1befc8fc315
def add_file(name_data, filename): '\n Reads the information from the specified file and populates the name_data\n dict with the data found in the file.\n\n Input:\n name_data (dict): dict holding baby name data\n filename (str): name of the file holding baby name data\n\n Output:\n This function modifies the name_data dict to store information from\n the provided file name. This function does not return any value.\n\n ' with open(filename, 'r') as f: for line in f: info_list = line.split(',') if (len(info_list) == 1): year = info_list[0].strip() else: rank = info_list[0].strip() name1 = info_list[1].strip() name2 = info_list[2].strip() add_data_for_name(name_data, year, rank, name1) add_data_for_name(name_data, year, rank, name2) pass
Reads the information from the specified file and populates the name_data dict with the data found in the file. Input: name_data (dict): dict holding baby name data filename (str): name of the file holding baby name data Output: This function modifies the name_data dict to store information from the provided file name. This function does not return any value.
stanCode_projects/SC101_name_searching/babynames.py
add_file
joe850226/sc-projects
1
python
def add_file(name_data, filename): '\n Reads the information from the specified file and populates the name_data\n dict with the data found in the file.\n\n Input:\n name_data (dict): dict holding baby name data\n filename (str): name of the file holding baby name data\n\n Output:\n This function modifies the name_data dict to store information from\n the provided file name. This function does not return any value.\n\n ' with open(filename, 'r') as f: for line in f: info_list = line.split(',') if (len(info_list) == 1): year = info_list[0].strip() else: rank = info_list[0].strip() name1 = info_list[1].strip() name2 = info_list[2].strip() add_data_for_name(name_data, year, rank, name1) add_data_for_name(name_data, year, rank, name2) pass
def add_file(name_data, filename): '\n Reads the information from the specified file and populates the name_data\n dict with the data found in the file.\n\n Input:\n name_data (dict): dict holding baby name data\n filename (str): name of the file holding baby name data\n\n Output:\n This function modifies the name_data dict to store information from\n the provided file name. This function does not return any value.\n\n ' with open(filename, 'r') as f: for line in f: info_list = line.split(',') if (len(info_list) == 1): year = info_list[0].strip() else: rank = info_list[0].strip() name1 = info_list[1].strip() name2 = info_list[2].strip() add_data_for_name(name_data, year, rank, name1) add_data_for_name(name_data, year, rank, name2) pass<|docstring|>Reads the information from the specified file and populates the name_data dict with the data found in the file. Input: name_data (dict): dict holding baby name data filename (str): name of the file holding baby name data Output: This function modifies the name_data dict to store information from the provided file name. This function does not return any value.<|endoftext|>
0ecff34405a97ea346e044e46bf38f18ff39a4d08b39a261a6ee0c0c5caa3bef
def read_files(filenames): '\n Reads the data from all files specified in the provided list\n into a single name_data dict and then returns that dict.\n\n Input:\n filenames (List[str]): a list of filenames containing baby name data\n\n Returns:\n name_data (dict): the dict storing all baby name data in a structured manner\n ' name_data = {} for filename in filenames: add_file(name_data, filename) return name_data
Reads the data from all files specified in the provided list into a single name_data dict and then returns that dict. Input: filenames (List[str]): a list of filenames containing baby name data Returns: name_data (dict): the dict storing all baby name data in a structured manner
stanCode_projects/SC101_name_searching/babynames.py
read_files
joe850226/sc-projects
1
python
def read_files(filenames): '\n Reads the data from all files specified in the provided list\n into a single name_data dict and then returns that dict.\n\n Input:\n filenames (List[str]): a list of filenames containing baby name data\n\n Returns:\n name_data (dict): the dict storing all baby name data in a structured manner\n ' name_data = {} for filename in filenames: add_file(name_data, filename) return name_data
def read_files(filenames): '\n Reads the data from all files specified in the provided list\n into a single name_data dict and then returns that dict.\n\n Input:\n filenames (List[str]): a list of filenames containing baby name data\n\n Returns:\n name_data (dict): the dict storing all baby name data in a structured manner\n ' name_data = {} for filename in filenames: add_file(name_data, filename) return name_data<|docstring|>Reads the data from all files specified in the provided list into a single name_data dict and then returns that dict. Input: filenames (List[str]): a list of filenames containing baby name data Returns: name_data (dict): the dict storing all baby name data in a structured manner<|endoftext|>
519df7507a573231caeb87296dd0a684978dfd9f170e719fb3a5053f73c6d9cd
def search_names(name_data, target): '\n Given a name_data dict that stores baby name information and a target string,\n returns a list of all names in the dict that contain the target string. This\n function should be case-insensitive with respect to the target string.\n\n Input:\n name_data (dict): a dict containing baby name data organized by name\n target (str): a string to look for in the names contained within name_data\n\n Returns:\n matching_names (List[str]): a list of all names from name_data that contain\n the target string\n\n ' names = [] for (name, d_name) in name_data.items(): name_lower = name.lower() target = target.lower() if (target in name_lower): names.append(name) return names
Given a name_data dict that stores baby name information and a target string, returns a list of all names in the dict that contain the target string. This function should be case-insensitive with respect to the target string. Input: name_data (dict): a dict containing baby name data organized by name target (str): a string to look for in the names contained within name_data Returns: matching_names (List[str]): a list of all names from name_data that contain the target string
stanCode_projects/SC101_name_searching/babynames.py
search_names
joe850226/sc-projects
1
python
def search_names(name_data, target): '\n Given a name_data dict that stores baby name information and a target string,\n returns a list of all names in the dict that contain the target string. This\n function should be case-insensitive with respect to the target string.\n\n Input:\n name_data (dict): a dict containing baby name data organized by name\n target (str): a string to look for in the names contained within name_data\n\n Returns:\n matching_names (List[str]): a list of all names from name_data that contain\n the target string\n\n ' names = [] for (name, d_name) in name_data.items(): name_lower = name.lower() target = target.lower() if (target in name_lower): names.append(name) return names
def search_names(name_data, target): '\n Given a name_data dict that stores baby name information and a target string,\n returns a list of all names in the dict that contain the target string. This\n function should be case-insensitive with respect to the target string.\n\n Input:\n name_data (dict): a dict containing baby name data organized by name\n target (str): a string to look for in the names contained within name_data\n\n Returns:\n matching_names (List[str]): a list of all names from name_data that contain\n the target string\n\n ' names = [] for (name, d_name) in name_data.items(): name_lower = name.lower() target = target.lower() if (target in name_lower): names.append(name) return names<|docstring|>Given a name_data dict that stores baby name information and a target string, returns a list of all names in the dict that contain the target string. This function should be case-insensitive with respect to the target string. Input: name_data (dict): a dict containing baby name data organized by name target (str): a string to look for in the names contained within name_data Returns: matching_names (List[str]): a list of all names from name_data that contain the target string<|endoftext|>
900cd11a9165d0a6f96aaa079a2db97ceba8b6e7dff6e7dd284edbdd927de3cf
def print_names(name_data): '\n (provided, DO NOT MODIFY)\n Given a name_data dict, print out all its data, one name per line.\n The names are printed in alphabetical order,\n with the corresponding years data displayed in increasing order.\n\n Input:\n name_data (dict): a dict containing baby name data organized by name\n Returns:\n This function does not return anything\n ' for (key, value) in sorted(name_data.items()): print(key, sorted(value.items()))
(provided, DO NOT MODIFY) Given a name_data dict, print out all its data, one name per line. The names are printed in alphabetical order, with the corresponding years data displayed in increasing order. Input: name_data (dict): a dict containing baby name data organized by name Returns: This function does not return anything
stanCode_projects/SC101_name_searching/babynames.py
print_names
joe850226/sc-projects
1
python
def print_names(name_data): '\n (provided, DO NOT MODIFY)\n Given a name_data dict, print out all its data, one name per line.\n The names are printed in alphabetical order,\n with the corresponding years data displayed in increasing order.\n\n Input:\n name_data (dict): a dict containing baby name data organized by name\n Returns:\n This function does not return anything\n ' for (key, value) in sorted(name_data.items()): print(key, sorted(value.items()))
def print_names(name_data): '\n (provided, DO NOT MODIFY)\n Given a name_data dict, print out all its data, one name per line.\n The names are printed in alphabetical order,\n with the corresponding years data displayed in increasing order.\n\n Input:\n name_data (dict): a dict containing baby name data organized by name\n Returns:\n This function does not return anything\n ' for (key, value) in sorted(name_data.items()): print(key, sorted(value.items()))<|docstring|>(provided, DO NOT MODIFY) Given a name_data dict, print out all its data, one name per line. The names are printed in alphabetical order, with the corresponding years data displayed in increasing order. Input: name_data (dict): a dict containing baby name data organized by name Returns: This function does not return anything<|endoftext|>
6c7a5bb4ef917207cead86fd229828877cf47aca8c3ab25411c2a4471d944451
def get_version(version=None): 'Derives a PEP386-compliant version number from VERSION.' if (version is None): version = VERSION assert (len(version) == 5) assert (version[3] in ('alpha', 'beta', 'rc', 'final')) parts = (2 if (version[2] == 0) else 3) main = '.'.join((str(x) for x in version[:parts])) sub = '' if ((version[3] == 'alpha') and (version[4] == 0)): from django.utils.version import get_svn_revision svn_revision = get_svn_revision()[4:] if (svn_revision != 'unknown'): sub = ('.dev%s' % svn_revision) elif (version[3] != 'final'): mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = (mapping[version[3]] + str(version[4])) return (main + sub)
Derives a PEP386-compliant version number from VERSION.
Lib/django-1.4/django/__init__.py
get_version
plooploops/rosterrun
790
python
def get_version(version=None): if (version is None): version = VERSION assert (len(version) == 5) assert (version[3] in ('alpha', 'beta', 'rc', 'final')) parts = (2 if (version[2] == 0) else 3) main = '.'.join((str(x) for x in version[:parts])) sub = if ((version[3] == 'alpha') and (version[4] == 0)): from django.utils.version import get_svn_revision svn_revision = get_svn_revision()[4:] if (svn_revision != 'unknown'): sub = ('.dev%s' % svn_revision) elif (version[3] != 'final'): mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = (mapping[version[3]] + str(version[4])) return (main + sub)
def get_version(version=None): if (version is None): version = VERSION assert (len(version) == 5) assert (version[3] in ('alpha', 'beta', 'rc', 'final')) parts = (2 if (version[2] == 0) else 3) main = '.'.join((str(x) for x in version[:parts])) sub = if ((version[3] == 'alpha') and (version[4] == 0)): from django.utils.version import get_svn_revision svn_revision = get_svn_revision()[4:] if (svn_revision != 'unknown'): sub = ('.dev%s' % svn_revision) elif (version[3] != 'final'): mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = (mapping[version[3]] + str(version[4])) return (main + sub)<|docstring|>Derives a PEP386-compliant version number from VERSION.<|endoftext|>
a8299b56a0b81084b1e969b49abe93fa689b1a34979984d1a7fd1719c5890f98
def get_master_slave_server_groups(ids: Optional[Sequence[str]]=None, load_balancer_id: Optional[str]=None, name_regex: Optional[str]=None, output_file: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetMasterSlaveServerGroupsResult: '\n This data source provides the master slave server groups related to a server load balancer.\n\n > **NOTE:** Available in 1.54.0+\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_alicloud as alicloud\n\n default_zones = alicloud.get_zones(available_disk_category="cloud_efficiency",\n available_resource_creation="VSwitch")\n default_instance_types = alicloud.ecs.get_instance_types(availability_zone=default_zones.zones[0].id,\n eni_amount=2)\n image = alicloud.ecs.get_images(name_regex="^ubuntu_18.*64",\n most_recent=True,\n owners="system")\n config = pulumi.Config()\n name = config.get("name")\n if name is None:\n name = "tf-testAccSlbMasterSlaveServerGroupVpc"\n number = config.get("number")\n if number is None:\n number = "1"\n main_network = alicloud.vpc.Network("mainNetwork", cidr_block="172.16.0.0/16")\n main_switch = alicloud.vpc.Switch("mainSwitch",\n vpc_id=main_network.id,\n zone_id=default_zones.zones[0].id,\n vswitch_name=name,\n cidr_block="172.16.0.0/16")\n group_security_group = alicloud.ecs.SecurityGroup("groupSecurityGroup", vpc_id=main_network.id)\n instance_instance = []\n for range in [{"value": i} for i in range(0, 2)]:\n instance_instance.append(alicloud.ecs.Instance(f"instanceInstance-{range[\'value\']}",\n image_id=image.images[0].id,\n instance_type=default_instance_types.instance_types[0].id,\n instance_name=name,\n security_groups=[group_security_group.id],\n internet_charge_type="PayByTraffic",\n internet_max_bandwidth_out=10,\n availability_zone=default_zones.zones[0].id,\n instance_charge_type="PostPaid",\n system_disk_category="cloud_efficiency",\n vswitch_id=main_switch.id))\n instance_application_load_balancer = alicloud.slb.ApplicationLoadBalancer("instanceApplicationLoadBalancer",\n load_balancer_name=name,\n vswitch_id=main_switch.id,\n load_balancer_spec="slb.s2.small")\n group_master_slave_server_group = alicloud.slb.MasterSlaveServerGroup("groupMasterSlaveServerGroup",\n load_balancer_id=instance_application_load_balancer.id,\n servers=[\n alicloud.slb.MasterSlaveServerGroupServerArgs(\n server_id=instance_instance[0].id,\n port=100,\n weight=100,\n server_type="Master",\n ),\n alicloud.slb.MasterSlaveServerGroupServerArgs(\n server_id=instance_instance[1].id,\n port=100,\n weight=100,\n server_type="Slave",\n ),\n ])\n sample_ds = instance_application_load_balancer.id.apply(lambda id: alicloud.slb.get_master_slave_server_groups(load_balancer_id=id))\n pulumi.export("firstSlbServerGroupId", sample_ds.groups[0].id)\n ```\n\n\n :param Sequence[str] ids: A list of master slave server group IDs to filter results.\n :param str load_balancer_id: ID of the SLB.\n :param str name_regex: A regex string to filter results by master slave server group name.\n ' __args__ = dict() __args__['ids'] = ids __args__['loadBalancerId'] = load_balancer_id __args__['nameRegex'] = name_regex __args__['outputFile'] = output_file if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('alicloud:slb/getMasterSlaveServerGroups:getMasterSlaveServerGroups', __args__, opts=opts, typ=GetMasterSlaveServerGroupsResult).value return AwaitableGetMasterSlaveServerGroupsResult(groups=__ret__.groups, id=__ret__.id, ids=__ret__.ids, load_balancer_id=__ret__.load_balancer_id, name_regex=__ret__.name_regex, names=__ret__.names, output_file=__ret__.output_file)
This data source provides the master slave server groups related to a server load balancer. > **NOTE:** Available in 1.54.0+ ## Example Usage ```python import pulumi import pulumi_alicloud as alicloud default_zones = alicloud.get_zones(available_disk_category="cloud_efficiency", available_resource_creation="VSwitch") default_instance_types = alicloud.ecs.get_instance_types(availability_zone=default_zones.zones[0].id, eni_amount=2) image = alicloud.ecs.get_images(name_regex="^ubuntu_18.*64", most_recent=True, owners="system") config = pulumi.Config() name = config.get("name") if name is None: name = "tf-testAccSlbMasterSlaveServerGroupVpc" number = config.get("number") if number is None: number = "1" main_network = alicloud.vpc.Network("mainNetwork", cidr_block="172.16.0.0/16") main_switch = alicloud.vpc.Switch("mainSwitch", vpc_id=main_network.id, zone_id=default_zones.zones[0].id, vswitch_name=name, cidr_block="172.16.0.0/16") group_security_group = alicloud.ecs.SecurityGroup("groupSecurityGroup", vpc_id=main_network.id) instance_instance = [] for range in [{"value": i} for i in range(0, 2)]: instance_instance.append(alicloud.ecs.Instance(f"instanceInstance-{range['value']}", image_id=image.images[0].id, instance_type=default_instance_types.instance_types[0].id, instance_name=name, security_groups=[group_security_group.id], internet_charge_type="PayByTraffic", internet_max_bandwidth_out=10, availability_zone=default_zones.zones[0].id, instance_charge_type="PostPaid", system_disk_category="cloud_efficiency", vswitch_id=main_switch.id)) instance_application_load_balancer = alicloud.slb.ApplicationLoadBalancer("instanceApplicationLoadBalancer", load_balancer_name=name, vswitch_id=main_switch.id, load_balancer_spec="slb.s2.small") group_master_slave_server_group = alicloud.slb.MasterSlaveServerGroup("groupMasterSlaveServerGroup", load_balancer_id=instance_application_load_balancer.id, servers=[ alicloud.slb.MasterSlaveServerGroupServerArgs( server_id=instance_instance[0].id, port=100, weight=100, server_type="Master", ), alicloud.slb.MasterSlaveServerGroupServerArgs( server_id=instance_instance[1].id, port=100, weight=100, server_type="Slave", ), ]) sample_ds = instance_application_load_balancer.id.apply(lambda id: alicloud.slb.get_master_slave_server_groups(load_balancer_id=id)) pulumi.export("firstSlbServerGroupId", sample_ds.groups[0].id) ``` :param Sequence[str] ids: A list of master slave server group IDs to filter results. :param str load_balancer_id: ID of the SLB. :param str name_regex: A regex string to filter results by master slave server group name.
sdk/python/pulumi_alicloud/slb/get_master_slave_server_groups.py
get_master_slave_server_groups
pulumi/pulumi-alicloud
42
python
def get_master_slave_server_groups(ids: Optional[Sequence[str]]=None, load_balancer_id: Optional[str]=None, name_regex: Optional[str]=None, output_file: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetMasterSlaveServerGroupsResult: '\n This data source provides the master slave server groups related to a server load balancer.\n\n > **NOTE:** Available in 1.54.0+\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_alicloud as alicloud\n\n default_zones = alicloud.get_zones(available_disk_category="cloud_efficiency",\n available_resource_creation="VSwitch")\n default_instance_types = alicloud.ecs.get_instance_types(availability_zone=default_zones.zones[0].id,\n eni_amount=2)\n image = alicloud.ecs.get_images(name_regex="^ubuntu_18.*64",\n most_recent=True,\n owners="system")\n config = pulumi.Config()\n name = config.get("name")\n if name is None:\n name = "tf-testAccSlbMasterSlaveServerGroupVpc"\n number = config.get("number")\n if number is None:\n number = "1"\n main_network = alicloud.vpc.Network("mainNetwork", cidr_block="172.16.0.0/16")\n main_switch = alicloud.vpc.Switch("mainSwitch",\n vpc_id=main_network.id,\n zone_id=default_zones.zones[0].id,\n vswitch_name=name,\n cidr_block="172.16.0.0/16")\n group_security_group = alicloud.ecs.SecurityGroup("groupSecurityGroup", vpc_id=main_network.id)\n instance_instance = []\n for range in [{"value": i} for i in range(0, 2)]:\n instance_instance.append(alicloud.ecs.Instance(f"instanceInstance-{range[\'value\']}",\n image_id=image.images[0].id,\n instance_type=default_instance_types.instance_types[0].id,\n instance_name=name,\n security_groups=[group_security_group.id],\n internet_charge_type="PayByTraffic",\n internet_max_bandwidth_out=10,\n availability_zone=default_zones.zones[0].id,\n instance_charge_type="PostPaid",\n system_disk_category="cloud_efficiency",\n vswitch_id=main_switch.id))\n instance_application_load_balancer = alicloud.slb.ApplicationLoadBalancer("instanceApplicationLoadBalancer",\n load_balancer_name=name,\n vswitch_id=main_switch.id,\n load_balancer_spec="slb.s2.small")\n group_master_slave_server_group = alicloud.slb.MasterSlaveServerGroup("groupMasterSlaveServerGroup",\n load_balancer_id=instance_application_load_balancer.id,\n servers=[\n alicloud.slb.MasterSlaveServerGroupServerArgs(\n server_id=instance_instance[0].id,\n port=100,\n weight=100,\n server_type="Master",\n ),\n alicloud.slb.MasterSlaveServerGroupServerArgs(\n server_id=instance_instance[1].id,\n port=100,\n weight=100,\n server_type="Slave",\n ),\n ])\n sample_ds = instance_application_load_balancer.id.apply(lambda id: alicloud.slb.get_master_slave_server_groups(load_balancer_id=id))\n pulumi.export("firstSlbServerGroupId", sample_ds.groups[0].id)\n ```\n\n\n :param Sequence[str] ids: A list of master slave server group IDs to filter results.\n :param str load_balancer_id: ID of the SLB.\n :param str name_regex: A regex string to filter results by master slave server group name.\n ' __args__ = dict() __args__['ids'] = ids __args__['loadBalancerId'] = load_balancer_id __args__['nameRegex'] = name_regex __args__['outputFile'] = output_file if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('alicloud:slb/getMasterSlaveServerGroups:getMasterSlaveServerGroups', __args__, opts=opts, typ=GetMasterSlaveServerGroupsResult).value return AwaitableGetMasterSlaveServerGroupsResult(groups=__ret__.groups, id=__ret__.id, ids=__ret__.ids, load_balancer_id=__ret__.load_balancer_id, name_regex=__ret__.name_regex, names=__ret__.names, output_file=__ret__.output_file)
def get_master_slave_server_groups(ids: Optional[Sequence[str]]=None, load_balancer_id: Optional[str]=None, name_regex: Optional[str]=None, output_file: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetMasterSlaveServerGroupsResult: '\n This data source provides the master slave server groups related to a server load balancer.\n\n > **NOTE:** Available in 1.54.0+\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_alicloud as alicloud\n\n default_zones = alicloud.get_zones(available_disk_category="cloud_efficiency",\n available_resource_creation="VSwitch")\n default_instance_types = alicloud.ecs.get_instance_types(availability_zone=default_zones.zones[0].id,\n eni_amount=2)\n image = alicloud.ecs.get_images(name_regex="^ubuntu_18.*64",\n most_recent=True,\n owners="system")\n config = pulumi.Config()\n name = config.get("name")\n if name is None:\n name = "tf-testAccSlbMasterSlaveServerGroupVpc"\n number = config.get("number")\n if number is None:\n number = "1"\n main_network = alicloud.vpc.Network("mainNetwork", cidr_block="172.16.0.0/16")\n main_switch = alicloud.vpc.Switch("mainSwitch",\n vpc_id=main_network.id,\n zone_id=default_zones.zones[0].id,\n vswitch_name=name,\n cidr_block="172.16.0.0/16")\n group_security_group = alicloud.ecs.SecurityGroup("groupSecurityGroup", vpc_id=main_network.id)\n instance_instance = []\n for range in [{"value": i} for i in range(0, 2)]:\n instance_instance.append(alicloud.ecs.Instance(f"instanceInstance-{range[\'value\']}",\n image_id=image.images[0].id,\n instance_type=default_instance_types.instance_types[0].id,\n instance_name=name,\n security_groups=[group_security_group.id],\n internet_charge_type="PayByTraffic",\n internet_max_bandwidth_out=10,\n availability_zone=default_zones.zones[0].id,\n instance_charge_type="PostPaid",\n system_disk_category="cloud_efficiency",\n vswitch_id=main_switch.id))\n instance_application_load_balancer = alicloud.slb.ApplicationLoadBalancer("instanceApplicationLoadBalancer",\n load_balancer_name=name,\n vswitch_id=main_switch.id,\n load_balancer_spec="slb.s2.small")\n group_master_slave_server_group = alicloud.slb.MasterSlaveServerGroup("groupMasterSlaveServerGroup",\n load_balancer_id=instance_application_load_balancer.id,\n servers=[\n alicloud.slb.MasterSlaveServerGroupServerArgs(\n server_id=instance_instance[0].id,\n port=100,\n weight=100,\n server_type="Master",\n ),\n alicloud.slb.MasterSlaveServerGroupServerArgs(\n server_id=instance_instance[1].id,\n port=100,\n weight=100,\n server_type="Slave",\n ),\n ])\n sample_ds = instance_application_load_balancer.id.apply(lambda id: alicloud.slb.get_master_slave_server_groups(load_balancer_id=id))\n pulumi.export("firstSlbServerGroupId", sample_ds.groups[0].id)\n ```\n\n\n :param Sequence[str] ids: A list of master slave server group IDs to filter results.\n :param str load_balancer_id: ID of the SLB.\n :param str name_regex: A regex string to filter results by master slave server group name.\n ' __args__ = dict() __args__['ids'] = ids __args__['loadBalancerId'] = load_balancer_id __args__['nameRegex'] = name_regex __args__['outputFile'] = output_file if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('alicloud:slb/getMasterSlaveServerGroups:getMasterSlaveServerGroups', __args__, opts=opts, typ=GetMasterSlaveServerGroupsResult).value return AwaitableGetMasterSlaveServerGroupsResult(groups=__ret__.groups, id=__ret__.id, ids=__ret__.ids, load_balancer_id=__ret__.load_balancer_id, name_regex=__ret__.name_regex, names=__ret__.names, output_file=__ret__.output_file)<|docstring|>This data source provides the master slave server groups related to a server load balancer. > **NOTE:** Available in 1.54.0+ ## Example Usage ```python import pulumi import pulumi_alicloud as alicloud default_zones = alicloud.get_zones(available_disk_category="cloud_efficiency", available_resource_creation="VSwitch") default_instance_types = alicloud.ecs.get_instance_types(availability_zone=default_zones.zones[0].id, eni_amount=2) image = alicloud.ecs.get_images(name_regex="^ubuntu_18.*64", most_recent=True, owners="system") config = pulumi.Config() name = config.get("name") if name is None: name = "tf-testAccSlbMasterSlaveServerGroupVpc" number = config.get("number") if number is None: number = "1" main_network = alicloud.vpc.Network("mainNetwork", cidr_block="172.16.0.0/16") main_switch = alicloud.vpc.Switch("mainSwitch", vpc_id=main_network.id, zone_id=default_zones.zones[0].id, vswitch_name=name, cidr_block="172.16.0.0/16") group_security_group = alicloud.ecs.SecurityGroup("groupSecurityGroup", vpc_id=main_network.id) instance_instance = [] for range in [{"value": i} for i in range(0, 2)]: instance_instance.append(alicloud.ecs.Instance(f"instanceInstance-{range['value']}", image_id=image.images[0].id, instance_type=default_instance_types.instance_types[0].id, instance_name=name, security_groups=[group_security_group.id], internet_charge_type="PayByTraffic", internet_max_bandwidth_out=10, availability_zone=default_zones.zones[0].id, instance_charge_type="PostPaid", system_disk_category="cloud_efficiency", vswitch_id=main_switch.id)) instance_application_load_balancer = alicloud.slb.ApplicationLoadBalancer("instanceApplicationLoadBalancer", load_balancer_name=name, vswitch_id=main_switch.id, load_balancer_spec="slb.s2.small") group_master_slave_server_group = alicloud.slb.MasterSlaveServerGroup("groupMasterSlaveServerGroup", load_balancer_id=instance_application_load_balancer.id, servers=[ alicloud.slb.MasterSlaveServerGroupServerArgs( server_id=instance_instance[0].id, port=100, weight=100, server_type="Master", ), alicloud.slb.MasterSlaveServerGroupServerArgs( server_id=instance_instance[1].id, port=100, weight=100, server_type="Slave", ), ]) sample_ds = instance_application_load_balancer.id.apply(lambda id: alicloud.slb.get_master_slave_server_groups(load_balancer_id=id)) pulumi.export("firstSlbServerGroupId", sample_ds.groups[0].id) ``` :param Sequence[str] ids: A list of master slave server group IDs to filter results. :param str load_balancer_id: ID of the SLB. :param str name_regex: A regex string to filter results by master slave server group name.<|endoftext|>
99fc7c6e595e6c82af7161f04fc3452b4222d3b7960f8495393f11a5a376d48d
@property @pulumi.getter def groups(self) -> Sequence['outputs.GetMasterSlaveServerGroupsGroupResult']: '\n A list of SLB master slave server groups. Each element contains the following attributes:\n ' return pulumi.get(self, 'groups')
A list of SLB master slave server groups. Each element contains the following attributes:
sdk/python/pulumi_alicloud/slb/get_master_slave_server_groups.py
groups
pulumi/pulumi-alicloud
42
python
@property @pulumi.getter def groups(self) -> Sequence['outputs.GetMasterSlaveServerGroupsGroupResult']: '\n \n ' return pulumi.get(self, 'groups')
@property @pulumi.getter def groups(self) -> Sequence['outputs.GetMasterSlaveServerGroupsGroupResult']: '\n \n ' return pulumi.get(self, 'groups')<|docstring|>A list of SLB master slave server groups. Each element contains the following attributes:<|endoftext|>
bcf5b51a327014088b63f706e1dc3987198031e1f0241bd10b06cf4dd5bcb53c
@property @pulumi.getter def id(self) -> str: '\n The provider-assigned unique ID for this managed resource.\n ' return pulumi.get(self, 'id')
The provider-assigned unique ID for this managed resource.
sdk/python/pulumi_alicloud/slb/get_master_slave_server_groups.py
id
pulumi/pulumi-alicloud
42
python
@property @pulumi.getter def id(self) -> str: '\n \n ' return pulumi.get(self, 'id')
@property @pulumi.getter def id(self) -> str: '\n \n ' return pulumi.get(self, 'id')<|docstring|>The provider-assigned unique ID for this managed resource.<|endoftext|>
09296f0142560310a425fc9c6494e92a140c9c23b140eeda159683d526a43261
@property @pulumi.getter def ids(self) -> Sequence[str]: '\n A list of SLB master slave server groups IDs.\n ' return pulumi.get(self, 'ids')
A list of SLB master slave server groups IDs.
sdk/python/pulumi_alicloud/slb/get_master_slave_server_groups.py
ids
pulumi/pulumi-alicloud
42
python
@property @pulumi.getter def ids(self) -> Sequence[str]: '\n \n ' return pulumi.get(self, 'ids')
@property @pulumi.getter def ids(self) -> Sequence[str]: '\n \n ' return pulumi.get(self, 'ids')<|docstring|>A list of SLB master slave server groups IDs.<|endoftext|>
a3e182ffd9c352c8775cb4127de838ef5cc414b2d3ed1e6bdf68820a21a74167
@property @pulumi.getter def names(self) -> Sequence[str]: '\n A list of SLB master slave server groups names.\n ' return pulumi.get(self, 'names')
A list of SLB master slave server groups names.
sdk/python/pulumi_alicloud/slb/get_master_slave_server_groups.py
names
pulumi/pulumi-alicloud
42
python
@property @pulumi.getter def names(self) -> Sequence[str]: '\n \n ' return pulumi.get(self, 'names')
@property @pulumi.getter def names(self) -> Sequence[str]: '\n \n ' return pulumi.get(self, 'names')<|docstring|>A list of SLB master slave server groups names.<|endoftext|>
b6470c375e5582a85af28c0222af751942fd933f5535990c0555da75f3793842
def run(command: str, working_dir: str=Path.cwd()) -> None: 'Run command using subprocess.run()' subprocess.run(command, cwd=working_dir, check=True)
Run command using subprocess.run()
scripts/check_swiftshader_runtime.py
run
whitemike889/VK-GL-CTS
354
python
def run(command: str, working_dir: str=Path.cwd()) -> None: subprocess.run(command, cwd=working_dir, check=True)
def run(command: str, working_dir: str=Path.cwd()) -> None: subprocess.run(command, cwd=working_dir, check=True)<|docstring|>Run command using subprocess.run()<|endoftext|>
1bd4ec1e4e041e9677d9b31f9509ad12ed45555fded9faf4f5c0c00f354b1f6d
def META_BUILDER_3(name: str, fun: str, input_cls: Type[Element], output_cls: Type[Element], skips: List[str]) -> DEF: '\n formal inputs : True\n formal outputs : True\n has_skips: False\n reflexive: False\n\n :param name: the wrapper name\n :param fun: the wrapped name\n :param input_cls: the input class of elements\n :param output_cls: the output class of elements\n :param skips: the values to skip\n :return:\n ' SRC = VAR('src') CUR = VAR('cur') OLD = VAR('old') NEW = VAR('new') FUN = VAR(fun) CLS = VAR(output_cls.__name__) POS = VAR('pos') def CURSOR(i): return CLS.CALL(ARG('at', i), ARG('to', i), ARG('value', 0)) GENERATOR = CUR.METH('develop', FUN.CALL(CUR, OLD), OLD) def UPDATE_CUR(obj): return CUR.ASSIGN(obj) NON_TERMINAL_CLAUSE = IF(NEW.GETATTR('is_terminal').NOT(), BLOCK(UPDATE_CUR(NEW), CONTINUE)) VALID_CLAUSE = IF(NEW.GETATTR('is_valid'), BLOCK(UPDATE_CUR(CURSOR(NEW.GETATTR('to'))), IF(NEW.GETATTR('value').IN(LIST(list(map(STR, skips)))), CONTINUE).ELSE(NEW.ASSIGN(VAR('replace').CALL(NEW, ARG('at', POS), ARG('to', POS.ADD(1)))), POS.IADD(1)), NEW.YIELD(), CONTINUE)) EOF_CLAUSE = IF(OLD.GETATTR('value').EQ(STR('EOF')), BLOCK(CLS.METH('EOF', POS).YIELD(), BREAK)) EACH_ITEM = BLOCK(WHILE(CUR.GETATTR('to').EQ(OLD.GETATTR('at')), BLOCK(NEW.ASSIGN(GENERATOR, t=output_cls), NON_TERMINAL_CLAUSE, VALID_CLAUSE, EOF_CLAUSE, VAR('SyntaxError').CALL(TUPLE((CUR, OLD, NEW))).RAISE()))) return DEF(name=name, args=SRC.ARG(t=f'Iterator[{input_cls.__name__}]'), t=f'Iterator[{output_cls.__name__}]', block=BLOCK(IMPORT.FROM('typing', 'Iterator'), IMPORT.FROM(input_cls.__module__, input_cls.__name__), IMPORT.FROM(output_cls.__module__, output_cls.__name__), IMPORT.FROM('dataclasses', 'replace'), CUR.ASSIGN(CURSOR(0), t=output_cls), POS.ASSIGN(0, t=int), FOR(OLD, SRC, EACH_ITEM)))
formal inputs : True formal outputs : True has_skips: False reflexive: False :param name: the wrapper name :param fun: the wrapped name :param input_cls: the input class of elements :param output_cls: the output class of elements :param skips: the values to skip :return:
item_engine/builders/MB3.py
META_BUILDER_3
GabrielAmare/TextEngine
0
python
def META_BUILDER_3(name: str, fun: str, input_cls: Type[Element], output_cls: Type[Element], skips: List[str]) -> DEF: '\n formal inputs : True\n formal outputs : True\n has_skips: False\n reflexive: False\n\n :param name: the wrapper name\n :param fun: the wrapped name\n :param input_cls: the input class of elements\n :param output_cls: the output class of elements\n :param skips: the values to skip\n :return:\n ' SRC = VAR('src') CUR = VAR('cur') OLD = VAR('old') NEW = VAR('new') FUN = VAR(fun) CLS = VAR(output_cls.__name__) POS = VAR('pos') def CURSOR(i): return CLS.CALL(ARG('at', i), ARG('to', i), ARG('value', 0)) GENERATOR = CUR.METH('develop', FUN.CALL(CUR, OLD), OLD) def UPDATE_CUR(obj): return CUR.ASSIGN(obj) NON_TERMINAL_CLAUSE = IF(NEW.GETATTR('is_terminal').NOT(), BLOCK(UPDATE_CUR(NEW), CONTINUE)) VALID_CLAUSE = IF(NEW.GETATTR('is_valid'), BLOCK(UPDATE_CUR(CURSOR(NEW.GETATTR('to'))), IF(NEW.GETATTR('value').IN(LIST(list(map(STR, skips)))), CONTINUE).ELSE(NEW.ASSIGN(VAR('replace').CALL(NEW, ARG('at', POS), ARG('to', POS.ADD(1)))), POS.IADD(1)), NEW.YIELD(), CONTINUE)) EOF_CLAUSE = IF(OLD.GETATTR('value').EQ(STR('EOF')), BLOCK(CLS.METH('EOF', POS).YIELD(), BREAK)) EACH_ITEM = BLOCK(WHILE(CUR.GETATTR('to').EQ(OLD.GETATTR('at')), BLOCK(NEW.ASSIGN(GENERATOR, t=output_cls), NON_TERMINAL_CLAUSE, VALID_CLAUSE, EOF_CLAUSE, VAR('SyntaxError').CALL(TUPLE((CUR, OLD, NEW))).RAISE()))) return DEF(name=name, args=SRC.ARG(t=f'Iterator[{input_cls.__name__}]'), t=f'Iterator[{output_cls.__name__}]', block=BLOCK(IMPORT.FROM('typing', 'Iterator'), IMPORT.FROM(input_cls.__module__, input_cls.__name__), IMPORT.FROM(output_cls.__module__, output_cls.__name__), IMPORT.FROM('dataclasses', 'replace'), CUR.ASSIGN(CURSOR(0), t=output_cls), POS.ASSIGN(0, t=int), FOR(OLD, SRC, EACH_ITEM)))
def META_BUILDER_3(name: str, fun: str, input_cls: Type[Element], output_cls: Type[Element], skips: List[str]) -> DEF: '\n formal inputs : True\n formal outputs : True\n has_skips: False\n reflexive: False\n\n :param name: the wrapper name\n :param fun: the wrapped name\n :param input_cls: the input class of elements\n :param output_cls: the output class of elements\n :param skips: the values to skip\n :return:\n ' SRC = VAR('src') CUR = VAR('cur') OLD = VAR('old') NEW = VAR('new') FUN = VAR(fun) CLS = VAR(output_cls.__name__) POS = VAR('pos') def CURSOR(i): return CLS.CALL(ARG('at', i), ARG('to', i), ARG('value', 0)) GENERATOR = CUR.METH('develop', FUN.CALL(CUR, OLD), OLD) def UPDATE_CUR(obj): return CUR.ASSIGN(obj) NON_TERMINAL_CLAUSE = IF(NEW.GETATTR('is_terminal').NOT(), BLOCK(UPDATE_CUR(NEW), CONTINUE)) VALID_CLAUSE = IF(NEW.GETATTR('is_valid'), BLOCK(UPDATE_CUR(CURSOR(NEW.GETATTR('to'))), IF(NEW.GETATTR('value').IN(LIST(list(map(STR, skips)))), CONTINUE).ELSE(NEW.ASSIGN(VAR('replace').CALL(NEW, ARG('at', POS), ARG('to', POS.ADD(1)))), POS.IADD(1)), NEW.YIELD(), CONTINUE)) EOF_CLAUSE = IF(OLD.GETATTR('value').EQ(STR('EOF')), BLOCK(CLS.METH('EOF', POS).YIELD(), BREAK)) EACH_ITEM = BLOCK(WHILE(CUR.GETATTR('to').EQ(OLD.GETATTR('at')), BLOCK(NEW.ASSIGN(GENERATOR, t=output_cls), NON_TERMINAL_CLAUSE, VALID_CLAUSE, EOF_CLAUSE, VAR('SyntaxError').CALL(TUPLE((CUR, OLD, NEW))).RAISE()))) return DEF(name=name, args=SRC.ARG(t=f'Iterator[{input_cls.__name__}]'), t=f'Iterator[{output_cls.__name__}]', block=BLOCK(IMPORT.FROM('typing', 'Iterator'), IMPORT.FROM(input_cls.__module__, input_cls.__name__), IMPORT.FROM(output_cls.__module__, output_cls.__name__), IMPORT.FROM('dataclasses', 'replace'), CUR.ASSIGN(CURSOR(0), t=output_cls), POS.ASSIGN(0, t=int), FOR(OLD, SRC, EACH_ITEM)))<|docstring|>formal inputs : True formal outputs : True has_skips: False reflexive: False :param name: the wrapper name :param fun: the wrapped name :param input_cls: the input class of elements :param output_cls: the output class of elements :param skips: the values to skip :return:<|endoftext|>
180f432690a0fd952d857835c9a8f5a7f10b3ea11c7f2eae74ebd7b8cfdb8bfb
def run(self): 'Run all tests!' errno = call(['py.test', '--verbose', '--cov=plugnpy', '--cov-report=term-missing', '--cov-config=.coveragerc', '--junitxml=.junit.xml']) raise SystemExit(errno)
Run all tests!
setup.py
run
bt-thiago/plugnpy
0
python
def run(self): errno = call(['py.test', '--verbose', '--cov=plugnpy', '--cov-report=term-missing', '--cov-config=.coveragerc', '--junitxml=.junit.xml']) raise SystemExit(errno)
def run(self): errno = call(['py.test', '--verbose', '--cov=plugnpy', '--cov-report=term-missing', '--cov-config=.coveragerc', '--junitxml=.junit.xml']) raise SystemExit(errno)<|docstring|>Run all tests!<|endoftext|>
a32c9e30f6bab6cb61b5f18668fbb55e43d409536bd7f9bdcaa273e119613611
@click.command(context_settings=dict(max_content_width=400), name='get', help='Gets the specified property.') @click.argument('prop', required=True, default='') @click.argument('jail', required=True, default='') @click.option('--header', '-h', '-H', is_flag=True, default=True, help='For scripting, use tabs for separators.') @click.option('--recursive', '-r', help='Get the specified property for all jails.', flag_value='recursive') @click.option('--plugin', '-P', help='Get the specified key for a plugin jail, if accessing a nested key use . as a separator.\n\x08 Example: iocage get -P foo.bar.baz PLUGIN', is_flag=True) @click.option('--all', '-a', '_type', help='Get all properties for the specified jail.', flag_value='all') @click.option('--pool', '-p', '_pool', help='Get the currently activated zpool.', is_flag=True) @click.option('--state', '-s', '_type', help='Get the jails state', flag_value='state') @click.option('--jid', '-j', '_type', help='Get the jails jid', flag_value='jid') @click.option('--force', '-f', default=False, is_flag=True, help="Start the jail for plugin properties if it's not running.") def cli(prop, _type, _pool, jail, recursive, header, plugin, force): 'Get a list of jails and print the property.' table = texttable.Texttable(max_width=0) if _type: jail = prop prop = _type elif _pool: pool = ioc.IOCage(skip_jails=True).get('', pool=True) ioc_common.logit({'level': 'INFO', 'message': pool}) exit() elif ((not jail) and (not recursive)): ioc_common.logit({'level': 'EXCEPTION', 'message': 'You must specify a jail!'}) if ((_type == 'all') and recursive): ioc_common.logit({'level': 'EXCEPTION', 'message': 'You cannot use --all (-a) and --recursive (-r) together. '}) if ((_type == 'all') and (not jail)): ioc_common.logit({'level': 'EXCEPTION', 'message': 'Please specify a jail name when using -a flag.'}) if (not recursive): if ((prop == 'state') or (_type == 'state')): state = ioc.IOCage(jail=jail).get(prop) ioc_common.logit({'level': 'INFO', 'message': state}) elif ((prop == 'jid') or (_type == 'jid')): jid = ioc.IOCage(jail=jail).list('jid', uuid=jail)[1] ioc_common.logit({'level': 'INFO', 'message': jid}) elif plugin: _plugin = ioc.IOCage(jail=jail, skip_jails=True).get(prop, plugin=True, start_jail=force) ioc_common.logit({'level': 'INFO', 'message': _plugin}) elif (prop == 'all'): props = ioc.IOCage(jail=jail, skip_jails=True).get(prop) for (p, v) in props.items(): ioc_common.logit({'level': 'INFO', 'message': f'{p}:{v}'}) else: p = ioc.IOCage(jail=jail, skip_jails=True).get(prop) ioc_common.logit({'level': 'INFO', 'message': p}) else: jails = ioc.IOCage().get(prop, recursive=True) table.header(['NAME', f'PROP - {prop}']) for jail_dict in jails: for (jail, prop) in jail_dict.items(): if header: table.add_row([jail, prop]) else: ioc_common.logit({'level': 'INFO', 'message': f'{jail} {prop}'}) if header: ioc_common.logit({'level': 'INFO', 'message': table.draw()})
Get a list of jails and print the property.
iocage_cli/get.py
cli
Corvan/iocage
827
python
@click.command(context_settings=dict(max_content_width=400), name='get', help='Gets the specified property.') @click.argument('prop', required=True, default=) @click.argument('jail', required=True, default=) @click.option('--header', '-h', '-H', is_flag=True, default=True, help='For scripting, use tabs for separators.') @click.option('--recursive', '-r', help='Get the specified property for all jails.', flag_value='recursive') @click.option('--plugin', '-P', help='Get the specified key for a plugin jail, if accessing a nested key use . as a separator.\n\x08 Example: iocage get -P foo.bar.baz PLUGIN', is_flag=True) @click.option('--all', '-a', '_type', help='Get all properties for the specified jail.', flag_value='all') @click.option('--pool', '-p', '_pool', help='Get the currently activated zpool.', is_flag=True) @click.option('--state', '-s', '_type', help='Get the jails state', flag_value='state') @click.option('--jid', '-j', '_type', help='Get the jails jid', flag_value='jid') @click.option('--force', '-f', default=False, is_flag=True, help="Start the jail for plugin properties if it's not running.") def cli(prop, _type, _pool, jail, recursive, header, plugin, force): table = texttable.Texttable(max_width=0) if _type: jail = prop prop = _type elif _pool: pool = ioc.IOCage(skip_jails=True).get(, pool=True) ioc_common.logit({'level': 'INFO', 'message': pool}) exit() elif ((not jail) and (not recursive)): ioc_common.logit({'level': 'EXCEPTION', 'message': 'You must specify a jail!'}) if ((_type == 'all') and recursive): ioc_common.logit({'level': 'EXCEPTION', 'message': 'You cannot use --all (-a) and --recursive (-r) together. '}) if ((_type == 'all') and (not jail)): ioc_common.logit({'level': 'EXCEPTION', 'message': 'Please specify a jail name when using -a flag.'}) if (not recursive): if ((prop == 'state') or (_type == 'state')): state = ioc.IOCage(jail=jail).get(prop) ioc_common.logit({'level': 'INFO', 'message': state}) elif ((prop == 'jid') or (_type == 'jid')): jid = ioc.IOCage(jail=jail).list('jid', uuid=jail)[1] ioc_common.logit({'level': 'INFO', 'message': jid}) elif plugin: _plugin = ioc.IOCage(jail=jail, skip_jails=True).get(prop, plugin=True, start_jail=force) ioc_common.logit({'level': 'INFO', 'message': _plugin}) elif (prop == 'all'): props = ioc.IOCage(jail=jail, skip_jails=True).get(prop) for (p, v) in props.items(): ioc_common.logit({'level': 'INFO', 'message': f'{p}:{v}'}) else: p = ioc.IOCage(jail=jail, skip_jails=True).get(prop) ioc_common.logit({'level': 'INFO', 'message': p}) else: jails = ioc.IOCage().get(prop, recursive=True) table.header(['NAME', f'PROP - {prop}']) for jail_dict in jails: for (jail, prop) in jail_dict.items(): if header: table.add_row([jail, prop]) else: ioc_common.logit({'level': 'INFO', 'message': f'{jail} {prop}'}) if header: ioc_common.logit({'level': 'INFO', 'message': table.draw()})
@click.command(context_settings=dict(max_content_width=400), name='get', help='Gets the specified property.') @click.argument('prop', required=True, default=) @click.argument('jail', required=True, default=) @click.option('--header', '-h', '-H', is_flag=True, default=True, help='For scripting, use tabs for separators.') @click.option('--recursive', '-r', help='Get the specified property for all jails.', flag_value='recursive') @click.option('--plugin', '-P', help='Get the specified key for a plugin jail, if accessing a nested key use . as a separator.\n\x08 Example: iocage get -P foo.bar.baz PLUGIN', is_flag=True) @click.option('--all', '-a', '_type', help='Get all properties for the specified jail.', flag_value='all') @click.option('--pool', '-p', '_pool', help='Get the currently activated zpool.', is_flag=True) @click.option('--state', '-s', '_type', help='Get the jails state', flag_value='state') @click.option('--jid', '-j', '_type', help='Get the jails jid', flag_value='jid') @click.option('--force', '-f', default=False, is_flag=True, help="Start the jail for plugin properties if it's not running.") def cli(prop, _type, _pool, jail, recursive, header, plugin, force): table = texttable.Texttable(max_width=0) if _type: jail = prop prop = _type elif _pool: pool = ioc.IOCage(skip_jails=True).get(, pool=True) ioc_common.logit({'level': 'INFO', 'message': pool}) exit() elif ((not jail) and (not recursive)): ioc_common.logit({'level': 'EXCEPTION', 'message': 'You must specify a jail!'}) if ((_type == 'all') and recursive): ioc_common.logit({'level': 'EXCEPTION', 'message': 'You cannot use --all (-a) and --recursive (-r) together. '}) if ((_type == 'all') and (not jail)): ioc_common.logit({'level': 'EXCEPTION', 'message': 'Please specify a jail name when using -a flag.'}) if (not recursive): if ((prop == 'state') or (_type == 'state')): state = ioc.IOCage(jail=jail).get(prop) ioc_common.logit({'level': 'INFO', 'message': state}) elif ((prop == 'jid') or (_type == 'jid')): jid = ioc.IOCage(jail=jail).list('jid', uuid=jail)[1] ioc_common.logit({'level': 'INFO', 'message': jid}) elif plugin: _plugin = ioc.IOCage(jail=jail, skip_jails=True).get(prop, plugin=True, start_jail=force) ioc_common.logit({'level': 'INFO', 'message': _plugin}) elif (prop == 'all'): props = ioc.IOCage(jail=jail, skip_jails=True).get(prop) for (p, v) in props.items(): ioc_common.logit({'level': 'INFO', 'message': f'{p}:{v}'}) else: p = ioc.IOCage(jail=jail, skip_jails=True).get(prop) ioc_common.logit({'level': 'INFO', 'message': p}) else: jails = ioc.IOCage().get(prop, recursive=True) table.header(['NAME', f'PROP - {prop}']) for jail_dict in jails: for (jail, prop) in jail_dict.items(): if header: table.add_row([jail, prop]) else: ioc_common.logit({'level': 'INFO', 'message': f'{jail} {prop}'}) if header: ioc_common.logit({'level': 'INFO', 'message': table.draw()})<|docstring|>Get a list of jails and print the property.<|endoftext|>
5e99172c6c26526eabfce5e64a9859857b5ab2316d4bb6e620aa19a329fae194
def word_vector(utterances, corpus): '\n Este es el caso los modelos no secuenciales; se devuelve el vector disperso\n con tantos valores como tenga la bolsa de palabras pero con las frecuencias de las palabras\n que aparezcan en la consulta en cuestion\n ' words = [] training = [] for text in utterances: w = re.findall('\\w+', text.lower(), flags=re.UNICODE) words = w words = [word for word in words if (word not in stopwords.words('english'))] words = [word for word in words if (not word.isdigit())] words = [stemmer.stem(w) for w in words] words = [(x if (x in corpus) else 'UNK') for x in words] bag_new = [] bag_final = [] [bag_new.append([w, 0]) for w in corpus] df_bag = pd.DataFrame(bag_new) df_bag.columns = ['palabras', 'ocurrencias'] df_pattern = pd.DataFrame(words) df_pattern['ocurrencias'] = 1 df_pattern.columns = ['palabras', 'ocurrencias'] df_pattern = df_pattern.groupby(['palabras'])['ocurrencias'].sum() df_pattern = df_pattern.reset_index(level=['palabras']) df = pd.merge(df_bag, df_pattern, on='palabras', how='left').fillna(0) bag_final = df['ocurrencias_y'].tolist() training.append(bag_final) return training
Este es el caso los modelos no secuenciales; se devuelve el vector disperso con tantos valores como tenga la bolsa de palabras pero con las frecuencias de las palabras que aparezcan en la consulta en cuestion
data_preprocessing.py
word_vector
AlbertoBarbado/nlp_trabajo_final
0
python
def word_vector(utterances, corpus): '\n Este es el caso los modelos no secuenciales; se devuelve el vector disperso\n con tantos valores como tenga la bolsa de palabras pero con las frecuencias de las palabras\n que aparezcan en la consulta en cuestion\n ' words = [] training = [] for text in utterances: w = re.findall('\\w+', text.lower(), flags=re.UNICODE) words = w words = [word for word in words if (word not in stopwords.words('english'))] words = [word for word in words if (not word.isdigit())] words = [stemmer.stem(w) for w in words] words = [(x if (x in corpus) else 'UNK') for x in words] bag_new = [] bag_final = [] [bag_new.append([w, 0]) for w in corpus] df_bag = pd.DataFrame(bag_new) df_bag.columns = ['palabras', 'ocurrencias'] df_pattern = pd.DataFrame(words) df_pattern['ocurrencias'] = 1 df_pattern.columns = ['palabras', 'ocurrencias'] df_pattern = df_pattern.groupby(['palabras'])['ocurrencias'].sum() df_pattern = df_pattern.reset_index(level=['palabras']) df = pd.merge(df_bag, df_pattern, on='palabras', how='left').fillna(0) bag_final = df['ocurrencias_y'].tolist() training.append(bag_final) return training
def word_vector(utterances, corpus): '\n Este es el caso los modelos no secuenciales; se devuelve el vector disperso\n con tantos valores como tenga la bolsa de palabras pero con las frecuencias de las palabras\n que aparezcan en la consulta en cuestion\n ' words = [] training = [] for text in utterances: w = re.findall('\\w+', text.lower(), flags=re.UNICODE) words = w words = [word for word in words if (word not in stopwords.words('english'))] words = [word for word in words if (not word.isdigit())] words = [stemmer.stem(w) for w in words] words = [(x if (x in corpus) else 'UNK') for x in words] bag_new = [] bag_final = [] [bag_new.append([w, 0]) for w in corpus] df_bag = pd.DataFrame(bag_new) df_bag.columns = ['palabras', 'ocurrencias'] df_pattern = pd.DataFrame(words) df_pattern['ocurrencias'] = 1 df_pattern.columns = ['palabras', 'ocurrencias'] df_pattern = df_pattern.groupby(['palabras'])['ocurrencias'].sum() df_pattern = df_pattern.reset_index(level=['palabras']) df = pd.merge(df_bag, df_pattern, on='palabras', how='left').fillna(0) bag_final = df['ocurrencias_y'].tolist() training.append(bag_final) return training<|docstring|>Este es el caso los modelos no secuenciales; se devuelve el vector disperso con tantos valores como tenga la bolsa de palabras pero con las frecuencias de las palabras que aparezcan en la consulta en cuestion<|endoftext|>
16201997f60983e16e7ef18e45e398aa43a82d8ea57236aac4fdb730bf7a035e
def word2idx_creation(utterances, corpus): '\n Caso secuencial en el que cada palabra la codifico segun un valor unico asociado\n a la misma en el diccionario\n ' words = [] X = [] word2idx = {'START': 0, 'END': 1} current_idx = 2 for text in utterances: w = re.findall('\\w+', text.lower(), flags=re.UNICODE) words = w words = [word for word in words if (word not in stopwords.words('english'))] words = [word for word in words if (not word.isdigit())] words = [stemmer.stem(w) for w in words] words = [(x if (x in corpus) else 'UNK') for x in words] sentence = [] for t in words: if (t not in word2idx): word2idx[t] = current_idx current_idx += 1 idx = word2idx[t] sentence.append(idx) X.append(np.array(sentence)) return (word2idx, X)
Caso secuencial en el que cada palabra la codifico segun un valor unico asociado a la misma en el diccionario
data_preprocessing.py
word2idx_creation
AlbertoBarbado/nlp_trabajo_final
0
python
def word2idx_creation(utterances, corpus): '\n Caso secuencial en el que cada palabra la codifico segun un valor unico asociado\n a la misma en el diccionario\n ' words = [] X = [] word2idx = {'START': 0, 'END': 1} current_idx = 2 for text in utterances: w = re.findall('\\w+', text.lower(), flags=re.UNICODE) words = w words = [word for word in words if (word not in stopwords.words('english'))] words = [word for word in words if (not word.isdigit())] words = [stemmer.stem(w) for w in words] words = [(x if (x in corpus) else 'UNK') for x in words] sentence = [] for t in words: if (t not in word2idx): word2idx[t] = current_idx current_idx += 1 idx = word2idx[t] sentence.append(idx) X.append(np.array(sentence)) return (word2idx, X)
def word2idx_creation(utterances, corpus): '\n Caso secuencial en el que cada palabra la codifico segun un valor unico asociado\n a la misma en el diccionario\n ' words = [] X = [] word2idx = {'START': 0, 'END': 1} current_idx = 2 for text in utterances: w = re.findall('\\w+', text.lower(), flags=re.UNICODE) words = w words = [word for word in words if (word not in stopwords.words('english'))] words = [word for word in words if (not word.isdigit())] words = [stemmer.stem(w) for w in words] words = [(x if (x in corpus) else 'UNK') for x in words] sentence = [] for t in words: if (t not in word2idx): word2idx[t] = current_idx current_idx += 1 idx = word2idx[t] sentence.append(idx) X.append(np.array(sentence)) return (word2idx, X)<|docstring|>Caso secuencial en el que cada palabra la codifico segun un valor unico asociado a la misma en el diccionario<|endoftext|>
36b3544b2aca4f4883489b63292ac5d32937656b1b3b1253a33e6dc1095314ce
def display_divider(separator: str=HORIZONTAL_DIVIDER, length: int=CONSOLE_LENGTH) -> None: 'Displays a horizonal divider for CLI of separaters repeated length times.' print((separator * length))
Displays a horizonal divider for CLI of separaters repeated length times.
final/final_part3/cli.py
display_divider
MrDDaye/cna_cp1855
0
python
def display_divider(separator: str=HORIZONTAL_DIVIDER, length: int=CONSOLE_LENGTH) -> None: print((separator * length))
def display_divider(separator: str=HORIZONTAL_DIVIDER, length: int=CONSOLE_LENGTH) -> None: print((separator * length))<|docstring|>Displays a horizonal divider for CLI of separaters repeated length times.<|endoftext|>
de6ad16c953f5fe5bb87319f5f0832f45f19972c077dcba0cf6d4f2129afcabb
def display_title(title: str=PROGRAM_TITLE, length: int=CONSOLE_LENGTH) -> None: 'Display the program title in the center of CLI window.' print(((((length - len(title)) // 2) * ' ') + title))
Display the program title in the center of CLI window.
final/final_part3/cli.py
display_title
MrDDaye/cna_cp1855
0
python
def display_title(title: str=PROGRAM_TITLE, length: int=CONSOLE_LENGTH) -> None: print(((((length - len(title)) // 2) * ' ') + title))
def display_title(title: str=PROGRAM_TITLE, length: int=CONSOLE_LENGTH) -> None: print(((((length - len(title)) // 2) * ' ') + title))<|docstring|>Display the program title in the center of CLI window.<|endoftext|>
5658537a85334a85cf0f26153f4fb1eb045e7d95c1cf1769140c07608efa22a5
def display_menu(menu_heading: str=MENU_TITLE, options: list[str]=MENU_OPTIONS) -> None: 'Display the options menu for program.' print(menu_heading) for (index, option) in enumerate(options): print(f'{(index + 1)} - {option}') print()
Display the options menu for program.
final/final_part3/cli.py
display_menu
MrDDaye/cna_cp1855
0
python
def display_menu(menu_heading: str=MENU_TITLE, options: list[str]=MENU_OPTIONS) -> None: print(menu_heading) for (index, option) in enumerate(options): print(f'{(index + 1)} - {option}') print()
def display_menu(menu_heading: str=MENU_TITLE, options: list[str]=MENU_OPTIONS) -> None: print(menu_heading) for (index, option) in enumerate(options): print(f'{(index + 1)} - {option}') print()<|docstring|>Display the options menu for program.<|endoftext|>
46d4aaa128503227e12cb972ff39a6ce6784dbaee57de5161638fc5649061cf2
def display_positions(position_heading: str=POSITION_TITLE, positions: list[str]=POSITIONS): 'Displays the possible positions for the baseball team.' print(position_heading) print(', '.join(positions))
Displays the possible positions for the baseball team.
final/final_part3/cli.py
display_positions
MrDDaye/cna_cp1855
0
python
def display_positions(position_heading: str=POSITION_TITLE, positions: list[str]=POSITIONS): print(position_heading) print(', '.join(positions))
def display_positions(position_heading: str=POSITION_TITLE, positions: list[str]=POSITIONS): print(position_heading) print(', '.join(positions))<|docstring|>Displays the possible positions for the baseball team.<|endoftext|>
38954d325270e62455a000999b228ba8fe5db0b4f7bef9e503c58a1bbc56627a
def display_splash(separator: str=HORIZONTAL_DIVIDER, length: int=CONSOLE_LENGTH, title: str=PROGRAM_TITLE, menu_heading: str=MENU_TITLE, options: list[str]=MENU_OPTIONS, position_heading: str=POSITION_TITLE, positions: list[str]=POSITIONS) -> None: 'Display the splash screen composing of dividers, program title, options menu, and available positions.' display_divider(separator, length) display_title(title, length) display_menu(menu_heading, options) display_positions(position_heading, positions) display_divider(separator, length)
Display the splash screen composing of dividers, program title, options menu, and available positions.
final/final_part3/cli.py
display_splash
MrDDaye/cna_cp1855
0
python
def display_splash(separator: str=HORIZONTAL_DIVIDER, length: int=CONSOLE_LENGTH, title: str=PROGRAM_TITLE, menu_heading: str=MENU_TITLE, options: list[str]=MENU_OPTIONS, position_heading: str=POSITION_TITLE, positions: list[str]=POSITIONS) -> None: display_divider(separator, length) display_title(title, length) display_menu(menu_heading, options) display_positions(position_heading, positions) display_divider(separator, length)
def display_splash(separator: str=HORIZONTAL_DIVIDER, length: int=CONSOLE_LENGTH, title: str=PROGRAM_TITLE, menu_heading: str=MENU_TITLE, options: list[str]=MENU_OPTIONS, position_heading: str=POSITION_TITLE, positions: list[str]=POSITIONS) -> None: display_divider(separator, length) display_title(title, length) display_menu(menu_heading, options) display_positions(position_heading, positions) display_divider(separator, length)<|docstring|>Display the splash screen composing of dividers, program title, options menu, and available positions.<|endoftext|>
9ab8db9fc9e8e0efc443014f30a8a17dc7680ee83e4f62a614ccf17c9353dc12
def display_farewell() -> None: 'Display a farewell message on program exit.' print(FAREWELL)
Display a farewell message on program exit.
final/final_part3/cli.py
display_farewell
MrDDaye/cna_cp1855
0
python
def display_farewell() -> None: print(FAREWELL)
def display_farewell() -> None: print(FAREWELL)<|docstring|>Display a farewell message on program exit.<|endoftext|>
08815bd93b73ff09d1dd5a8365b35367ecc3a839b83fff360693d47a2a869ce4
def display_lineup_header(number_heading: str=LINEUP_NUMBER_TITLE, number_length: int=LINEUP_NUMBER_LENGTH, player_heading: str=LINEUP_PLAYER_TITLE, player_length: int=LINEUP_PLAYER_LENGTH, position_heading: str=LINEUP_POSITION_TITLE, position_length: int=LINEUP_POSITION_LENGTH, at_bats_heading: str=LINEUP_AT_BATS_TITLE, at_bats_length: int=LINEUP_AT_BATS_LENGTH, hits_heading: str=LINEUP_HITS_TITLE, hits_length: int=LINEUP_HITS_LENGTH, average_heading: str=LINEUP_AVERAGE_TITLE, average_length: int=LINEUP_AVERAGE_LENGTH, seperator: str=LINEUP_DIVIDER) -> None: 'Display the table header for team lineup.' number_heading += ((number_length - len(number_heading)) * ' ') player_heading += ((player_length - len(player_heading)) * ' ') position_heading += ((position_length - len(position_heading)) * ' ') at_bats_heading += ((at_bats_length - len(at_bats_heading)) * ' ') hits_heading += ((hits_length - len(hits_heading)) * ' ') average_heading += ((average_length - len(average_heading)) * ' ') print((((((number_heading + player_heading) + position_heading) + at_bats_heading) + hits_heading) + average_heading)) print((seperator * (((((number_length + player_length) + position_length) + at_bats_length) + hits_length) + average_length)))
Display the table header for team lineup.
final/final_part3/cli.py
display_lineup_header
MrDDaye/cna_cp1855
0
python
def display_lineup_header(number_heading: str=LINEUP_NUMBER_TITLE, number_length: int=LINEUP_NUMBER_LENGTH, player_heading: str=LINEUP_PLAYER_TITLE, player_length: int=LINEUP_PLAYER_LENGTH, position_heading: str=LINEUP_POSITION_TITLE, position_length: int=LINEUP_POSITION_LENGTH, at_bats_heading: str=LINEUP_AT_BATS_TITLE, at_bats_length: int=LINEUP_AT_BATS_LENGTH, hits_heading: str=LINEUP_HITS_TITLE, hits_length: int=LINEUP_HITS_LENGTH, average_heading: str=LINEUP_AVERAGE_TITLE, average_length: int=LINEUP_AVERAGE_LENGTH, seperator: str=LINEUP_DIVIDER) -> None: number_heading += ((number_length - len(number_heading)) * ' ') player_heading += ((player_length - len(player_heading)) * ' ') position_heading += ((position_length - len(position_heading)) * ' ') at_bats_heading += ((at_bats_length - len(at_bats_heading)) * ' ') hits_heading += ((hits_length - len(hits_heading)) * ' ') average_heading += ((average_length - len(average_heading)) * ' ') print((((((number_heading + player_heading) + position_heading) + at_bats_heading) + hits_heading) + average_heading)) print((seperator * (((((number_length + player_length) + position_length) + at_bats_length) + hits_length) + average_length)))
def display_lineup_header(number_heading: str=LINEUP_NUMBER_TITLE, number_length: int=LINEUP_NUMBER_LENGTH, player_heading: str=LINEUP_PLAYER_TITLE, player_length: int=LINEUP_PLAYER_LENGTH, position_heading: str=LINEUP_POSITION_TITLE, position_length: int=LINEUP_POSITION_LENGTH, at_bats_heading: str=LINEUP_AT_BATS_TITLE, at_bats_length: int=LINEUP_AT_BATS_LENGTH, hits_heading: str=LINEUP_HITS_TITLE, hits_length: int=LINEUP_HITS_LENGTH, average_heading: str=LINEUP_AVERAGE_TITLE, average_length: int=LINEUP_AVERAGE_LENGTH, seperator: str=LINEUP_DIVIDER) -> None: number_heading += ((number_length - len(number_heading)) * ' ') player_heading += ((player_length - len(player_heading)) * ' ') position_heading += ((position_length - len(position_heading)) * ' ') at_bats_heading += ((at_bats_length - len(at_bats_heading)) * ' ') hits_heading += ((hits_length - len(hits_heading)) * ' ') average_heading += ((average_length - len(average_heading)) * ' ') print((((((number_heading + player_heading) + position_heading) + at_bats_heading) + hits_heading) + average_heading)) print((seperator * (((((number_length + player_length) + position_length) + at_bats_length) + hits_length) + average_length)))<|docstring|>Display the table header for team lineup.<|endoftext|>
8a9129102c0222382e8d0817021087b22d69c8fb69ed8d5580e6a3863fa61708
def display_player(lineup_number: int, player: list[str], number_length: int=LINEUP_NUMBER_LENGTH, player_length: int=LINEUP_PLAYER_LENGTH, position_length: int=LINEUP_POSITION_LENGTH, at_bats_length: int=LINEUP_AT_BATS_LENGTH, hits_length: int=LINEUP_HITS_LENGTH, average_length: int=LINEUP_AVERAGE_LENGTH) -> None: 'Display the player in tabulated form for team lineup' number_column = (f'{lineup_number}' + ((number_length - len(str(lineup_number))) * ' ')) player_column = (f'{player[0]}' + ((player_length - len(player[0])) * ' ')) position_column = (f'{player[1]}' + ((position_length - len(player[1])) * ' ')) at_bats_column = (f'{player[2]}' + ((at_bats_length - len(str(player[2]))) * ' ')) hits_column = (f'{player[3]}' + ((hits_length - len(str(player[3]))) * ' ')) average_column = (f'{player[4]}' + ((average_length - len(str(player[4]))) * ' ')) print((((((number_column + player_column) + position_column) + at_bats_column) + hits_column) + average_column))
Display the player in tabulated form for team lineup
final/final_part3/cli.py
display_player
MrDDaye/cna_cp1855
0
python
def display_player(lineup_number: int, player: list[str], number_length: int=LINEUP_NUMBER_LENGTH, player_length: int=LINEUP_PLAYER_LENGTH, position_length: int=LINEUP_POSITION_LENGTH, at_bats_length: int=LINEUP_AT_BATS_LENGTH, hits_length: int=LINEUP_HITS_LENGTH, average_length: int=LINEUP_AVERAGE_LENGTH) -> None: number_column = (f'{lineup_number}' + ((number_length - len(str(lineup_number))) * ' ')) player_column = (f'{player[0]}' + ((player_length - len(player[0])) * ' ')) position_column = (f'{player[1]}' + ((position_length - len(player[1])) * ' ')) at_bats_column = (f'{player[2]}' + ((at_bats_length - len(str(player[2]))) * ' ')) hits_column = (f'{player[3]}' + ((hits_length - len(str(player[3]))) * ' ')) average_column = (f'{player[4]}' + ((average_length - len(str(player[4]))) * ' ')) print((((((number_column + player_column) + position_column) + at_bats_column) + hits_column) + average_column))
def display_player(lineup_number: int, player: list[str], number_length: int=LINEUP_NUMBER_LENGTH, player_length: int=LINEUP_PLAYER_LENGTH, position_length: int=LINEUP_POSITION_LENGTH, at_bats_length: int=LINEUP_AT_BATS_LENGTH, hits_length: int=LINEUP_HITS_LENGTH, average_length: int=LINEUP_AVERAGE_LENGTH) -> None: number_column = (f'{lineup_number}' + ((number_length - len(str(lineup_number))) * ' ')) player_column = (f'{player[0]}' + ((player_length - len(player[0])) * ' ')) position_column = (f'{player[1]}' + ((position_length - len(player[1])) * ' ')) at_bats_column = (f'{player[2]}' + ((at_bats_length - len(str(player[2]))) * ' ')) hits_column = (f'{player[3]}' + ((hits_length - len(str(player[3]))) * ' ')) average_column = (f'{player[4]}' + ((average_length - len(str(player[4]))) * ' ')) print((((((number_column + player_column) + position_column) + at_bats_column) + hits_column) + average_column))<|docstring|>Display the player in tabulated form for team lineup<|endoftext|>
1c5a81d602bdff7331d453ace524d2896f75a8fda48d77146f4b4a02899b55be
def display_player_stats(player: list[str]) -> None: 'Display statistics of selected player' print(f'You selected {player[0]} AB={player[2]} H={player[3]}')
Display statistics of selected player
final/final_part3/cli.py
display_player_stats
MrDDaye/cna_cp1855
0
python
def display_player_stats(player: list[str]) -> None: print(f'You selected {player[0]} AB={player[2]} H={player[3]}')
def display_player_stats(player: list[str]) -> None: print(f'You selected {player[0]} AB={player[2]} H={player[3]}')<|docstring|>Display statistics of selected player<|endoftext|>
0a9eb2bd0225df1a2eeb94803a26333135884847719200f41faadaebced206a2
def display_player_position(player: list[str]) -> None: 'Display position of selected player' print(f'You selected {player[0]} POS={player[1]}')
Display position of selected player
final/final_part3/cli.py
display_player_position
MrDDaye/cna_cp1855
0
python
def display_player_position(player: list[str]) -> None: print(f'You selected {player[0]} POS={player[1]}')
def display_player_position(player: list[str]) -> None: print(f'You selected {player[0]} POS={player[1]}')<|docstring|>Display position of selected player<|endoftext|>
c5e54461ec5ff9a22fe400fc8e815f5426cbc5a11eabf9c16cb17bb63693c0e9
def display_player_updated(player: list[str]) -> None: 'Display which player was updated' print(f'{player[0]} was updated.')
Display which player was updated
final/final_part3/cli.py
display_player_updated
MrDDaye/cna_cp1855
0
python
def display_player_updated(player: list[str]) -> None: print(f'{player[0]} was updated.')
def display_player_updated(player: list[str]) -> None: print(f'{player[0]} was updated.')<|docstring|>Display which player was updated<|endoftext|>
097f4d143eb9fdbd9c6c8c7066baada836f35ec07f7855d055cf925c74b01179
def main(): '\n Main function.\n\n :return:\n None.\n ' try: src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) if (src_path not in sys.path): sys.path.append(src_path) from aoiklivereload import LiveReloader reloader = LiveReloader() reloader.start_watcher_thread() server_host = '0.0.0.0' server_port = 8000 msg = '# ----- Run server -----\nHost: {}\nPort: {}'.format(server_host, server_port) print(msg) sanic_app = Sanic() @sanic_app.route('/') async def hello_handler(request): '\n Request handler.\n\n :return:\n Response body.\n ' return text('hello') sanic_app.run(host=server_host, port=server_port) except KeyboardInterrupt: pass
Main function. :return: None.
src/aoiklivereload/demo/sanic_demo.py
main
AoiKuiyuyou/AoikLiveReload
25
python
def main(): '\n Main function.\n\n :return:\n None.\n ' try: src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) if (src_path not in sys.path): sys.path.append(src_path) from aoiklivereload import LiveReloader reloader = LiveReloader() reloader.start_watcher_thread() server_host = '0.0.0.0' server_port = 8000 msg = '# ----- Run server -----\nHost: {}\nPort: {}'.format(server_host, server_port) print(msg) sanic_app = Sanic() @sanic_app.route('/') async def hello_handler(request): '\n Request handler.\n\n :return:\n Response body.\n ' return text('hello') sanic_app.run(host=server_host, port=server_port) except KeyboardInterrupt: pass
def main(): '\n Main function.\n\n :return:\n None.\n ' try: src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) if (src_path not in sys.path): sys.path.append(src_path) from aoiklivereload import LiveReloader reloader = LiveReloader() reloader.start_watcher_thread() server_host = '0.0.0.0' server_port = 8000 msg = '# ----- Run server -----\nHost: {}\nPort: {}'.format(server_host, server_port) print(msg) sanic_app = Sanic() @sanic_app.route('/') async def hello_handler(request): '\n Request handler.\n\n :return:\n Response body.\n ' return text('hello') sanic_app.run(host=server_host, port=server_port) except KeyboardInterrupt: pass<|docstring|>Main function. :return: None.<|endoftext|>
af368d97a4d86a1a56bc784541ff97f71ccd750f7f5c8a8f20c8dc95931ce576
@sanic_app.route('/') async def hello_handler(request): '\n Request handler.\n\n :return:\n Response body.\n ' return text('hello')
Request handler. :return: Response body.
src/aoiklivereload/demo/sanic_demo.py
hello_handler
AoiKuiyuyou/AoikLiveReload
25
python
@sanic_app.route('/') async def hello_handler(request): '\n Request handler.\n\n :return:\n Response body.\n ' return text('hello')
@sanic_app.route('/') async def hello_handler(request): '\n Request handler.\n\n :return:\n Response body.\n ' return text('hello')<|docstring|>Request handler. :return: Response body.<|endoftext|>
061d4ef709d5c5e81a9825a64dc9bebab5cc7321b4163dde3ddd10f5275e37d5
def __init__(self, task: Callable, interval_in_sec: int): '\n It inherits from `proton.MessagingHandler` in order to take advantage of its event\n scheduling functionality\n\n :param task:\n :param interval_in_sec:\n ' MessagingHandler.__init__(self) self.task = task self.interval_in_sec = interval_in_sec
It inherits from `proton.MessagingHandler` in order to take advantage of its event scheduling functionality :param task: :param interval_in_sec:
swim_proton/messaging_handlers.py
__init__
eurocontrol-swim/swim-qpid-proton
0
python
def __init__(self, task: Callable, interval_in_sec: int): '\n It inherits from `proton.MessagingHandler` in order to take advantage of its event\n scheduling functionality\n\n :param task:\n :param interval_in_sec:\n ' MessagingHandler.__init__(self) self.task = task self.interval_in_sec = interval_in_sec
def __init__(self, task: Callable, interval_in_sec: int): '\n It inherits from `proton.MessagingHandler` in order to take advantage of its event\n scheduling functionality\n\n :param task:\n :param interval_in_sec:\n ' MessagingHandler.__init__(self) self.task = task self.interval_in_sec = interval_in_sec<|docstring|>It inherits from `proton.MessagingHandler` in order to take advantage of its event scheduling functionality :param task: :param interval_in_sec:<|endoftext|>
2133a410dccf6bfcbaf4ae5b92dbef24028626ad24ea63055ac44b59cd058370
def on_timer_task(self, event: proton.Event): '\n Is triggered upon a scheduled action. The first scheduling will be done by the broker\n handler and then the topic will be re-scheduling itself\n\n :param event:\n ' self.task() event.container.schedule(self.interval_in_sec, self)
Is triggered upon a scheduled action. The first scheduling will be done by the broker handler and then the topic will be re-scheduling itself :param event:
swim_proton/messaging_handlers.py
on_timer_task
eurocontrol-swim/swim-qpid-proton
0
python
def on_timer_task(self, event: proton.Event): '\n Is triggered upon a scheduled action. The first scheduling will be done by the broker\n handler and then the topic will be re-scheduling itself\n\n :param event:\n ' self.task() event.container.schedule(self.interval_in_sec, self)
def on_timer_task(self, event: proton.Event): '\n Is triggered upon a scheduled action. The first scheduling will be done by the broker\n handler and then the topic will be re-scheduling itself\n\n :param event:\n ' self.task() event.container.schedule(self.interval_in_sec, self)<|docstring|>Is triggered upon a scheduled action. The first scheduling will be done by the broker handler and then the topic will be re-scheduling itself :param event:<|endoftext|>
8dbf8ceef2862c26d9f37b33fb3b4d2d41a6593dac33aa253aa90686772356f4
def __init__(self, connector: Connector) -> None: '\n Base class acting a MessagingHandler to a `proton.Container`. Any custom handler should\n inherit from this class.\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' MessagingHandler.__init__(self) self.connector = connector self.container = None self.connection = None self.config = None
Base class acting a MessagingHandler to a `proton.Container`. Any custom handler should inherit from this class. :param connector: takes care of the connection .i.e TSL, SASL etc
swim_proton/messaging_handlers.py
__init__
eurocontrol-swim/swim-qpid-proton
0
python
def __init__(self, connector: Connector) -> None: '\n Base class acting a MessagingHandler to a `proton.Container`. Any custom handler should\n inherit from this class.\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' MessagingHandler.__init__(self) self.connector = connector self.container = None self.connection = None self.config = None
def __init__(self, connector: Connector) -> None: '\n Base class acting a MessagingHandler to a `proton.Container`. Any custom handler should\n inherit from this class.\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' MessagingHandler.__init__(self) self.connector = connector self.container = None self.connection = None self.config = None<|docstring|>Base class acting a MessagingHandler to a `proton.Container`. Any custom handler should inherit from this class. :param connector: takes care of the connection .i.e TSL, SASL etc<|endoftext|>
f39742951e6c6f9b2637a4b4832ae531a1991b72e6ec8b3270ba588a48caffbe
def on_start(self, event: proton.Event): '\n Is triggered upon running the `proton.Container` that uses this handler. It creates a\n connection to the broker and can be overridden for further startup functionality.\n\n :param event:\n ' self.connect(event.container)
Is triggered upon running the `proton.Container` that uses this handler. It creates a connection to the broker and can be overridden for further startup functionality. :param event:
swim_proton/messaging_handlers.py
on_start
eurocontrol-swim/swim-qpid-proton
0
python
def on_start(self, event: proton.Event): '\n Is triggered upon running the `proton.Container` that uses this handler. It creates a\n connection to the broker and can be overridden for further startup functionality.\n\n :param event:\n ' self.connect(event.container)
def on_start(self, event: proton.Event): '\n Is triggered upon running the `proton.Container` that uses this handler. It creates a\n connection to the broker and can be overridden for further startup functionality.\n\n :param event:\n ' self.connect(event.container)<|docstring|>Is triggered upon running the `proton.Container` that uses this handler. It creates a connection to the broker and can be overridden for further startup functionality. :param event:<|endoftext|>
3e3805bd4f9f19d581eb8b64c752f64ac61659a5b180a42e51d3f9bef230eb77
@classmethod def create(cls, host: Optional[str]=None, cert_db: Optional[str]=None, cert_file: Optional[str]=None, cert_key: Optional[str]=None, cert_password: Optional[str]=None, sasl_user: Optional[str]=None, sasl_password: Optional[str]=None, allowed_mechs: Optional[str]='PLAIN', **kwargs): '\n\n :param host:\n :param cert_db:\n :param cert_file:\n :param cert_key:\n :param cert_password:\n :param sasl_user:\n :param sasl_password:\n :param allowed_mechs:\n :return:\n ' if (cert_file and cert_key): connector = TLSConnector(host=host, cert_file=cert_file, cert_key=cert_key, cert_db=cert_db, cert_password=cert_password) elif (cert_db and sasl_user and sasl_password): connector = SASLConnector(host=host, user=sasl_user, password=sasl_password, cert_db=cert_db, allowed_mechs=allowed_mechs) else: connector = Connector(host=host) return cls(connector=connector)
:param host: :param cert_db: :param cert_file: :param cert_key: :param cert_password: :param sasl_user: :param sasl_password: :param allowed_mechs: :return:
swim_proton/messaging_handlers.py
create
eurocontrol-swim/swim-qpid-proton
0
python
@classmethod def create(cls, host: Optional[str]=None, cert_db: Optional[str]=None, cert_file: Optional[str]=None, cert_key: Optional[str]=None, cert_password: Optional[str]=None, sasl_user: Optional[str]=None, sasl_password: Optional[str]=None, allowed_mechs: Optional[str]='PLAIN', **kwargs): '\n\n :param host:\n :param cert_db:\n :param cert_file:\n :param cert_key:\n :param cert_password:\n :param sasl_user:\n :param sasl_password:\n :param allowed_mechs:\n :return:\n ' if (cert_file and cert_key): connector = TLSConnector(host=host, cert_file=cert_file, cert_key=cert_key, cert_db=cert_db, cert_password=cert_password) elif (cert_db and sasl_user and sasl_password): connector = SASLConnector(host=host, user=sasl_user, password=sasl_password, cert_db=cert_db, allowed_mechs=allowed_mechs) else: connector = Connector(host=host) return cls(connector=connector)
@classmethod def create(cls, host: Optional[str]=None, cert_db: Optional[str]=None, cert_file: Optional[str]=None, cert_key: Optional[str]=None, cert_password: Optional[str]=None, sasl_user: Optional[str]=None, sasl_password: Optional[str]=None, allowed_mechs: Optional[str]='PLAIN', **kwargs): '\n\n :param host:\n :param cert_db:\n :param cert_file:\n :param cert_key:\n :param cert_password:\n :param sasl_user:\n :param sasl_password:\n :param allowed_mechs:\n :return:\n ' if (cert_file and cert_key): connector = TLSConnector(host=host, cert_file=cert_file, cert_key=cert_key, cert_db=cert_db, cert_password=cert_password) elif (cert_db and sasl_user and sasl_password): connector = SASLConnector(host=host, user=sasl_user, password=sasl_password, cert_db=cert_db, allowed_mechs=allowed_mechs) else: connector = Connector(host=host) return cls(connector=connector)<|docstring|>:param host: :param cert_db: :param cert_file: :param cert_key: :param cert_password: :param sasl_user: :param sasl_password: :param allowed_mechs: :return:<|endoftext|>
0e1a4d669273d57d8ac63f452d2c7b0baf434dfcf1ecc4f325297c0ea9712c7a
@classmethod def create_from_config(cls, config: ConfigDict): '\n Factory method for creating an instance from config values\n\n :param config:\n :return: BrokerHandler\n ' return cls.create(**config)
Factory method for creating an instance from config values :param config: :return: BrokerHandler
swim_proton/messaging_handlers.py
create_from_config
eurocontrol-swim/swim-qpid-proton
0
python
@classmethod def create_from_config(cls, config: ConfigDict): '\n Factory method for creating an instance from config values\n\n :param config:\n :return: BrokerHandler\n ' return cls.create(**config)
@classmethod def create_from_config(cls, config: ConfigDict): '\n Factory method for creating an instance from config values\n\n :param config:\n :return: BrokerHandler\n ' return cls.create(**config)<|docstring|>Factory method for creating an instance from config values :param config: :return: BrokerHandler<|endoftext|>
ade247d8f0ded18b92decabe31f214cb55c1fc5f9c1e7fd78cd74f31657ef393
def __init__(self, connector: Connector) -> None: '\n An implementation of a broker handler that is supposed to act as a publisher. It keeps a\n list of message producers which will generate respective messages which will be routed in\n the broker via a `proton.Sender` instance\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' PubSubMessagingHandler.__init__(self, connector) self.endpoint: str = '/exchange/amq.topic' self._sender: Optional[proton.Sender] = None self._to_schedule: list[Messenger] = []
An implementation of a broker handler that is supposed to act as a publisher. It keeps a list of message producers which will generate respective messages which will be routed in the broker via a `proton.Sender` instance :param connector: takes care of the connection .i.e TSL, SASL etc
swim_proton/messaging_handlers.py
__init__
eurocontrol-swim/swim-qpid-proton
0
python
def __init__(self, connector: Connector) -> None: '\n An implementation of a broker handler that is supposed to act as a publisher. It keeps a\n list of message producers which will generate respective messages which will be routed in\n the broker via a `proton.Sender` instance\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' PubSubMessagingHandler.__init__(self, connector) self.endpoint: str = '/exchange/amq.topic' self._sender: Optional[proton.Sender] = None self._to_schedule: list[Messenger] = []
def __init__(self, connector: Connector) -> None: '\n An implementation of a broker handler that is supposed to act as a publisher. It keeps a\n list of message producers which will generate respective messages which will be routed in\n the broker via a `proton.Sender` instance\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' PubSubMessagingHandler.__init__(self, connector) self.endpoint: str = '/exchange/amq.topic' self._sender: Optional[proton.Sender] = None self._to_schedule: list[Messenger] = []<|docstring|>An implementation of a broker handler that is supposed to act as a publisher. It keeps a list of message producers which will generate respective messages which will be routed in the broker via a `proton.Sender` instance :param connector: takes care of the connection .i.e TSL, SASL etc<|endoftext|>
6b21262561af766bb39e4808ffd8c183778da0ccac1be2380d40355b9dacdd68
def on_start(self, event: proton.Event) -> None: '\n Is triggered upon running the `proton.Container` that uses this handler.\n If it has ScheduledMessageProducers items in its list they will be initialized and\n scheduled accordingly.\n\n :param event:\n ' super().on_start(event) if (not self.connection): return self._sender = self._create_sender_link(self.endpoint) if (not self._sender): return while self._to_schedule: messenger = self._to_schedule.pop(0) self._schedule_messenger(messenger)
Is triggered upon running the `proton.Container` that uses this handler. If it has ScheduledMessageProducers items in its list they will be initialized and scheduled accordingly. :param event:
swim_proton/messaging_handlers.py
on_start
eurocontrol-swim/swim-qpid-proton
0
python
def on_start(self, event: proton.Event) -> None: '\n Is triggered upon running the `proton.Container` that uses this handler.\n If it has ScheduledMessageProducers items in its list they will be initialized and\n scheduled accordingly.\n\n :param event:\n ' super().on_start(event) if (not self.connection): return self._sender = self._create_sender_link(self.endpoint) if (not self._sender): return while self._to_schedule: messenger = self._to_schedule.pop(0) self._schedule_messenger(messenger)
def on_start(self, event: proton.Event) -> None: '\n Is triggered upon running the `proton.Container` that uses this handler.\n If it has ScheduledMessageProducers items in its list they will be initialized and\n scheduled accordingly.\n\n :param event:\n ' super().on_start(event) if (not self.connection): return self._sender = self._create_sender_link(self.endpoint) if (not self._sender): return while self._to_schedule: messenger = self._to_schedule.pop(0) self._schedule_messenger(messenger)<|docstring|>Is triggered upon running the `proton.Container` that uses this handler. If it has ScheduledMessageProducers items in its list they will be initialized and scheduled accordingly. :param event:<|endoftext|>
742799bde826160de1b5a6e462522bfa3a0d65e0b5d360b9fc20f4733bb12d87
def trigger_messenger(self, messenger: Messenger, context: Optional[Any]=None) -> None: '\n Generates a message via the messenger.message_producer and sends it in the broker.\n\n :param messenger:\n :param context:\n ' _logger.info(f'Producing message for messenger {messenger.id}') message = messenger.get_message(context=context) if message: try: _logger.info(f'Attempting to send message for messenger `{messenger.id}`: {message}') self._send_message(message=message) _logger.info('Message sent') except Exception as e: traceback.print_exc() _logger.error(f'Error while sending message: {str(e)}') else: for callback in messenger.after_send: try: callback() except Exception as e: _logger.error(f'Error while running after send callback for messenger {{messenger.id}}')
Generates a message via the messenger.message_producer and sends it in the broker. :param messenger: :param context:
swim_proton/messaging_handlers.py
trigger_messenger
eurocontrol-swim/swim-qpid-proton
0
python
def trigger_messenger(self, messenger: Messenger, context: Optional[Any]=None) -> None: '\n Generates a message via the messenger.message_producer and sends it in the broker.\n\n :param messenger:\n :param context:\n ' _logger.info(f'Producing message for messenger {messenger.id}') message = messenger.get_message(context=context) if message: try: _logger.info(f'Attempting to send message for messenger `{messenger.id}`: {message}') self._send_message(message=message) _logger.info('Message sent') except Exception as e: traceback.print_exc() _logger.error(f'Error while sending message: {str(e)}') else: for callback in messenger.after_send: try: callback() except Exception as e: _logger.error(f'Error while running after send callback for messenger {{messenger.id}}')
def trigger_messenger(self, messenger: Messenger, context: Optional[Any]=None) -> None: '\n Generates a message via the messenger.message_producer and sends it in the broker.\n\n :param messenger:\n :param context:\n ' _logger.info(f'Producing message for messenger {messenger.id}') message = messenger.get_message(context=context) if message: try: _logger.info(f'Attempting to send message for messenger `{messenger.id}`: {message}') self._send_message(message=message) _logger.info('Message sent') except Exception as e: traceback.print_exc() _logger.error(f'Error while sending message: {str(e)}') else: for callback in messenger.after_send: try: callback() except Exception as e: _logger.error(f'Error while running after send callback for messenger {{messenger.id}}')<|docstring|>Generates a message via the messenger.message_producer and sends it in the broker. :param messenger: :param context:<|endoftext|>
1824d1be35dfdc8ea1a9b5456c8754e0d66828c99339c0ffccae4624dbb4b304
def _send_message(self, message: proton.Message) -> None: '\n Sends the provided message via the broker.\n\n :param message:\n ' if (not self._sender): raise AssertionError('Sender has not been defined yet') if (not self._sender.credit): raise ValueError('Not enough credit to send message') self._sender.send(message)
Sends the provided message via the broker. :param message:
swim_proton/messaging_handlers.py
_send_message
eurocontrol-swim/swim-qpid-proton
0
python
def _send_message(self, message: proton.Message) -> None: '\n Sends the provided message via the broker.\n\n :param message:\n ' if (not self._sender): raise AssertionError('Sender has not been defined yet') if (not self._sender.credit): raise ValueError('Not enough credit to send message') self._sender.send(message)
def _send_message(self, message: proton.Message) -> None: '\n Sends the provided message via the broker.\n\n :param message:\n ' if (not self._sender): raise AssertionError('Sender has not been defined yet') if (not self._sender.credit): raise ValueError('Not enough credit to send message') self._sender.send(message)<|docstring|>Sends the provided message via the broker. :param message:<|endoftext|>
432911ee13903049d36bd13e3573ab7e931ca3b584bb51d1affb9c638121e709
def __init__(self, connector: Connector) -> None: '\n An implementation of a broker client that is supposed to act as subscriber.\n It subscribes to endpoints of the broker by creating instances of `proton.Receiver`\n for each one of them.\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' PubSubMessagingHandler.__init__(self, connector) self.endpoints_registry: Dict[(str, Tuple[(Optional[proton.Receiver], Callable)])] = {}
An implementation of a broker client that is supposed to act as subscriber. It subscribes to endpoints of the broker by creating instances of `proton.Receiver` for each one of them. :param connector: takes care of the connection .i.e TSL, SASL etc
swim_proton/messaging_handlers.py
__init__
eurocontrol-swim/swim-qpid-proton
0
python
def __init__(self, connector: Connector) -> None: '\n An implementation of a broker client that is supposed to act as subscriber.\n It subscribes to endpoints of the broker by creating instances of `proton.Receiver`\n for each one of them.\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' PubSubMessagingHandler.__init__(self, connector) self.endpoints_registry: Dict[(str, Tuple[(Optional[proton.Receiver], Callable)])] = {}
def __init__(self, connector: Connector) -> None: '\n An implementation of a broker client that is supposed to act as subscriber.\n It subscribes to endpoints of the broker by creating instances of `proton.Receiver`\n for each one of them.\n\n :param connector: takes care of the connection .i.e TSL, SASL etc\n ' PubSubMessagingHandler.__init__(self, connector) self.endpoints_registry: Dict[(str, Tuple[(Optional[proton.Receiver], Callable)])] = {}<|docstring|>An implementation of a broker client that is supposed to act as subscriber. It subscribes to endpoints of the broker by creating instances of `proton.Receiver` for each one of them. :param connector: takes care of the connection .i.e TSL, SASL etc<|endoftext|>
394c446400b718acf4b11920f6235c6889f66ab094d84dd9c9d5ab1844fb9112
def _get_endpoint_reg_by_receiver(self, receiver: proton.Receiver) -> Tuple[(str, Callable)]: '\n Find the endpoint and message_consumer that corresponds to the given receiver.\n :param receiver:\n :return:\n ' for (endpoint, (registered_receiver, message_consumer)) in self.endpoints_registry.items(): if (receiver == registered_receiver): return (endpoint, message_consumer)
Find the endpoint and message_consumer that corresponds to the given receiver. :param receiver: :return:
swim_proton/messaging_handlers.py
_get_endpoint_reg_by_receiver
eurocontrol-swim/swim-qpid-proton
0
python
def _get_endpoint_reg_by_receiver(self, receiver: proton.Receiver) -> Tuple[(str, Callable)]: '\n Find the endpoint and message_consumer that corresponds to the given receiver.\n :param receiver:\n :return:\n ' for (endpoint, (registered_receiver, message_consumer)) in self.endpoints_registry.items(): if (receiver == registered_receiver): return (endpoint, message_consumer)
def _get_endpoint_reg_by_receiver(self, receiver: proton.Receiver) -> Tuple[(str, Callable)]: '\n Find the endpoint and message_consumer that corresponds to the given receiver.\n :param receiver:\n :return:\n ' for (endpoint, (registered_receiver, message_consumer)) in self.endpoints_registry.items(): if (receiver == registered_receiver): return (endpoint, message_consumer)<|docstring|>Find the endpoint and message_consumer that corresponds to the given receiver. :param receiver: :return:<|endoftext|>
61e049a571af5cd7e6de3554b694638efea5099ba5d2dd3981c8d17dec4bde58
def on_start(self, event: proton.Event) -> None: '\n Is triggered upon running the `proton.Container` that uses this handler.\n It checks if there are endpoints without a receiver attached to them and creates them\n\n :param event:\n ' super().on_start(event) if (not self.connection): return for (endpoint, (receiver, message_consumer)) in self.endpoints_registry.items(): if (receiver is None): receiver = self._create_receiver_link(endpoint) if receiver: self.endpoints_registry[endpoint] = (receiver, message_consumer)
Is triggered upon running the `proton.Container` that uses this handler. It checks if there are endpoints without a receiver attached to them and creates them :param event:
swim_proton/messaging_handlers.py
on_start
eurocontrol-swim/swim-qpid-proton
0
python
def on_start(self, event: proton.Event) -> None: '\n Is triggered upon running the `proton.Container` that uses this handler.\n It checks if there are endpoints without a receiver attached to them and creates them\n\n :param event:\n ' super().on_start(event) if (not self.connection): return for (endpoint, (receiver, message_consumer)) in self.endpoints_registry.items(): if (receiver is None): receiver = self._create_receiver_link(endpoint) if receiver: self.endpoints_registry[endpoint] = (receiver, message_consumer)
def on_start(self, event: proton.Event) -> None: '\n Is triggered upon running the `proton.Container` that uses this handler.\n It checks if there are endpoints without a receiver attached to them and creates them\n\n :param event:\n ' super().on_start(event) if (not self.connection): return for (endpoint, (receiver, message_consumer)) in self.endpoints_registry.items(): if (receiver is None): receiver = self._create_receiver_link(endpoint) if receiver: self.endpoints_registry[endpoint] = (receiver, message_consumer)<|docstring|>Is triggered upon running the `proton.Container` that uses this handler. It checks if there are endpoints without a receiver attached to them and creates them :param event:<|endoftext|>
7cf5c13f0c62d6bedd719d0f4b7fa1777718cffe9dcdf5ee523771ed4a9cce66
def attach_message_consumer(self, endpoint: str, message_consumer: Callable) -> None: '\n Creates a new `proton.Receiver` and assigns the message consumer to it\n\n :param endpoint:\n :param message_consumer: consumes the messages coming from its assigned endpoint in the broker\n ' self.endpoints_registry[endpoint] = (None, message_consumer) if self.is_connected(): receiver = self._create_receiver_link(endpoint) if receiver: self.endpoints_registry[endpoint] = (receiver, message_consumer)
Creates a new `proton.Receiver` and assigns the message consumer to it :param endpoint: :param message_consumer: consumes the messages coming from its assigned endpoint in the broker
swim_proton/messaging_handlers.py
attach_message_consumer
eurocontrol-swim/swim-qpid-proton
0
python
def attach_message_consumer(self, endpoint: str, message_consumer: Callable) -> None: '\n Creates a new `proton.Receiver` and assigns the message consumer to it\n\n :param endpoint:\n :param message_consumer: consumes the messages coming from its assigned endpoint in the broker\n ' self.endpoints_registry[endpoint] = (None, message_consumer) if self.is_connected(): receiver = self._create_receiver_link(endpoint) if receiver: self.endpoints_registry[endpoint] = (receiver, message_consumer)
def attach_message_consumer(self, endpoint: str, message_consumer: Callable) -> None: '\n Creates a new `proton.Receiver` and assigns the message consumer to it\n\n :param endpoint:\n :param message_consumer: consumes the messages coming from its assigned endpoint in the broker\n ' self.endpoints_registry[endpoint] = (None, message_consumer) if self.is_connected(): receiver = self._create_receiver_link(endpoint) if receiver: self.endpoints_registry[endpoint] = (receiver, message_consumer)<|docstring|>Creates a new `proton.Receiver` and assigns the message consumer to it :param endpoint: :param message_consumer: consumes the messages coming from its assigned endpoint in the broker<|endoftext|>
4a3ccbb05747251a5a8887322e5265996cbd015f4c02c919ebf879dc067cf26e
def detach_message_consumer(self, endpoint: str) -> None: '\n Removes the receiver that corresponds to the given endpoint.\n\n :param endpoint:\n ' (receiver, _) = self.endpoints_registry.pop(endpoint) if (receiver is not None): receiver.close() _logger.debug(f'Closed receiver {receiver} on endpoint {endpoint}')
Removes the receiver that corresponds to the given endpoint. :param endpoint:
swim_proton/messaging_handlers.py
detach_message_consumer
eurocontrol-swim/swim-qpid-proton
0
python
def detach_message_consumer(self, endpoint: str) -> None: '\n Removes the receiver that corresponds to the given endpoint.\n\n :param endpoint:\n ' (receiver, _) = self.endpoints_registry.pop(endpoint) if (receiver is not None): receiver.close() _logger.debug(f'Closed receiver {receiver} on endpoint {endpoint}')
def detach_message_consumer(self, endpoint: str) -> None: '\n Removes the receiver that corresponds to the given endpoint.\n\n :param endpoint:\n ' (receiver, _) = self.endpoints_registry.pop(endpoint) if (receiver is not None): receiver.close() _logger.debug(f'Closed receiver {receiver} on endpoint {endpoint}')<|docstring|>Removes the receiver that corresponds to the given endpoint. :param endpoint:<|endoftext|>
6a114e7e93496aa552de5d705078ca3460f8bf23f19293d55f903ef013167f41
def on_message(self, event: proton.Event) -> None: '\n Is triggered upon reception of messages via the broker.\n\n :param event:\n ' (endpoint, message_consumer) = self._get_endpoint_reg_by_receiver(event.receiver) try: message_consumer(event.message) except Exception as e: _logger.error(f'Error while processing message {event.message} on endpoint {endpoint}: {str(e)}')
Is triggered upon reception of messages via the broker. :param event:
swim_proton/messaging_handlers.py
on_message
eurocontrol-swim/swim-qpid-proton
0
python
def on_message(self, event: proton.Event) -> None: '\n Is triggered upon reception of messages via the broker.\n\n :param event:\n ' (endpoint, message_consumer) = self._get_endpoint_reg_by_receiver(event.receiver) try: message_consumer(event.message) except Exception as e: _logger.error(f'Error while processing message {event.message} on endpoint {endpoint}: {str(e)}')
def on_message(self, event: proton.Event) -> None: '\n Is triggered upon reception of messages via the broker.\n\n :param event:\n ' (endpoint, message_consumer) = self._get_endpoint_reg_by_receiver(event.receiver) try: message_consumer(event.message) except Exception as e: _logger.error(f'Error while processing message {event.message} on endpoint {endpoint}: {str(e)}')<|docstring|>Is triggered upon reception of messages via the broker. :param event:<|endoftext|>
0f0631afb7e8b1dcb0df710607274c4fc750bf33dc4cc1c80b3942e38d1a3e64
@master_only def _save_checkpoint(self, runner): 'Save the current checkpoint and delete unwanted checkpoint.' if (not self.out_dir): self.out_dir = runner.work_dir runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, **self.args) for (name, best_val) in runner.best_eval_res.items(): if (name not in runner.best_metrics): continue cur_val = runner.log_buffer.output[name][runner.best_type] runner.cur_eval_res[name] = cur_val if ((cur_val >= best_val[0]) and (cur_val < 1)): runner.best_eval_res[name] = [cur_val, (runner.epoch + 1)] runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, filename_tmpl=f'{name}_best_model.pth.tar', **self.args) runner.logger.info(f'Saving {name}_best checkpoint at {(runner.epoch + 1)} epochs') runner.log_buffer.output[('best_pred_' + name)] = runner.best_eval_res[name] if (runner.meta is not None): if self.by_epoch: cur_ckpt_filename = self.args.get('filename_tmpl', 'epoch_{}.pth').format((runner.epoch + 1)) else: cur_ckpt_filename = self.args.get('filename_tmpl', 'iter_{}.pth').format((runner.iter + 1)) runner.meta.setdefault('hook_msgs', dict()) runner.meta['hook_msgs']['last_ckpt'] = os.path.join(self.out_dir, cur_ckpt_filename) if (self.max_keep_ckpts > 0): if self.by_epoch: name = 'epoch_{}.pth' current_ckpt = (runner.epoch + 1) else: name = 'iter_{}.pth' current_ckpt = (runner.iter + 1) redundant_ckpts = range((current_ckpt - (self.max_keep_ckpts * self.interval)), 0, (- self.interval)) filename_tmpl = self.args.get('filename_tmpl', name) for _step in redundant_ckpts: ckpt_path = os.path.join(self.out_dir, filename_tmpl.format(_step)) if os.path.exists(ckpt_path): os.remove(ckpt_path) else: break
Save the current checkpoint and delete unwanted checkpoint.
mmseg/utils/trainer_hooks.py
_save_checkpoint
shuaizzZ/mmsegmentation
0
python
@master_only def _save_checkpoint(self, runner): if (not self.out_dir): self.out_dir = runner.work_dir runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, **self.args) for (name, best_val) in runner.best_eval_res.items(): if (name not in runner.best_metrics): continue cur_val = runner.log_buffer.output[name][runner.best_type] runner.cur_eval_res[name] = cur_val if ((cur_val >= best_val[0]) and (cur_val < 1)): runner.best_eval_res[name] = [cur_val, (runner.epoch + 1)] runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, filename_tmpl=f'{name}_best_model.pth.tar', **self.args) runner.logger.info(f'Saving {name}_best checkpoint at {(runner.epoch + 1)} epochs') runner.log_buffer.output[('best_pred_' + name)] = runner.best_eval_res[name] if (runner.meta is not None): if self.by_epoch: cur_ckpt_filename = self.args.get('filename_tmpl', 'epoch_{}.pth').format((runner.epoch + 1)) else: cur_ckpt_filename = self.args.get('filename_tmpl', 'iter_{}.pth').format((runner.iter + 1)) runner.meta.setdefault('hook_msgs', dict()) runner.meta['hook_msgs']['last_ckpt'] = os.path.join(self.out_dir, cur_ckpt_filename) if (self.max_keep_ckpts > 0): if self.by_epoch: name = 'epoch_{}.pth' current_ckpt = (runner.epoch + 1) else: name = 'iter_{}.pth' current_ckpt = (runner.iter + 1) redundant_ckpts = range((current_ckpt - (self.max_keep_ckpts * self.interval)), 0, (- self.interval)) filename_tmpl = self.args.get('filename_tmpl', name) for _step in redundant_ckpts: ckpt_path = os.path.join(self.out_dir, filename_tmpl.format(_step)) if os.path.exists(ckpt_path): os.remove(ckpt_path) else: break
@master_only def _save_checkpoint(self, runner): if (not self.out_dir): self.out_dir = runner.work_dir runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, **self.args) for (name, best_val) in runner.best_eval_res.items(): if (name not in runner.best_metrics): continue cur_val = runner.log_buffer.output[name][runner.best_type] runner.cur_eval_res[name] = cur_val if ((cur_val >= best_val[0]) and (cur_val < 1)): runner.best_eval_res[name] = [cur_val, (runner.epoch + 1)] runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, filename_tmpl=f'{name}_best_model.pth.tar', **self.args) runner.logger.info(f'Saving {name}_best checkpoint at {(runner.epoch + 1)} epochs') runner.log_buffer.output[('best_pred_' + name)] = runner.best_eval_res[name] if (runner.meta is not None): if self.by_epoch: cur_ckpt_filename = self.args.get('filename_tmpl', 'epoch_{}.pth').format((runner.epoch + 1)) else: cur_ckpt_filename = self.args.get('filename_tmpl', 'iter_{}.pth').format((runner.iter + 1)) runner.meta.setdefault('hook_msgs', dict()) runner.meta['hook_msgs']['last_ckpt'] = os.path.join(self.out_dir, cur_ckpt_filename) if (self.max_keep_ckpts > 0): if self.by_epoch: name = 'epoch_{}.pth' current_ckpt = (runner.epoch + 1) else: name = 'iter_{}.pth' current_ckpt = (runner.iter + 1) redundant_ckpts = range((current_ckpt - (self.max_keep_ckpts * self.interval)), 0, (- self.interval)) filename_tmpl = self.args.get('filename_tmpl', name) for _step in redundant_ckpts: ckpt_path = os.path.join(self.out_dir, filename_tmpl.format(_step)) if os.path.exists(ckpt_path): os.remove(ckpt_path) else: break<|docstring|>Save the current checkpoint and delete unwanted checkpoint.<|endoftext|>
42cd0a4b5153121290b81ac87813a35ff6812e9cd9502101779c372ddf358201
def _elem_from_scoperef(self, scoperef): 'A scoperef is (<blob>, <lpath>). Return the actual elem in\n the <blob> ciElementTree being referred to.\n ' elem = scoperef[0] i = 0 for lname in scoperef[1]: i += 1 if (self._alt_elem_from_scoperef is not None): scoperef_names = '.'.join(scoperef[1][:i]) alt_elem = self._alt_elem_from_scoperef.get(scoperef_names) if (alt_elem is not None): elem = alt_elem continue elem = elem.names[lname] return elem
A scoperef is (<blob>, <lpath>). Return the actual elem in the <blob> ciElementTree being referred to.
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_elem_from_scoperef
Maxize/Sublime_Text_3_Config
2
python
def _elem_from_scoperef(self, scoperef): 'A scoperef is (<blob>, <lpath>). Return the actual elem in\n the <blob> ciElementTree being referred to.\n ' elem = scoperef[0] i = 0 for lname in scoperef[1]: i += 1 if (self._alt_elem_from_scoperef is not None): scoperef_names = '.'.join(scoperef[1][:i]) alt_elem = self._alt_elem_from_scoperef.get(scoperef_names) if (alt_elem is not None): elem = alt_elem continue elem = elem.names[lname] return elem
def _elem_from_scoperef(self, scoperef): 'A scoperef is (<blob>, <lpath>). Return the actual elem in\n the <blob> ciElementTree being referred to.\n ' elem = scoperef[0] i = 0 for lname in scoperef[1]: i += 1 if (self._alt_elem_from_scoperef is not None): scoperef_names = '.'.join(scoperef[1][:i]) alt_elem = self._alt_elem_from_scoperef.get(scoperef_names) if (alt_elem is not None): elem = alt_elem continue elem = elem.names[lname] return elem<|docstring|>A scoperef is (<blob>, <lpath>). Return the actual elem in the <blob> ciElementTree being referred to.<|endoftext|>
3c1e7989292681c634ff07e51aeb21831bd30bd398d809e28d05465e17d64c92
def parent_scoperef_from_scoperef(self, scoperef, started_in_builtin_window_scope=False): "\n For JavaScript-in-the-browser the top-level scope is the\n Window object instance. For now we are always presuming we\n are running in the browser if the language is JavaScript.\n\n Problem: if we *started* on the Window class then the parent\n scope should be -> built-in-blob. This is what\n 'started_in_builtin_window_scope' is used for.\n " (blob, lpath) = scoperef global_var = self._global_var if ((not started_in_builtin_window_scope) and (lpath == [global_var]) and (blob is self.built_in_blob)): return None elif lpath: return (blob, lpath[:(- 1)]) elif (blob is self.built_in_blob): if started_in_builtin_window_scope: return None elif (global_var is not None): return (self.built_in_blob, [global_var]) else: return (self.built_in_blob, [])
For JavaScript-in-the-browser the top-level scope is the Window object instance. For now we are always presuming we are running in the browser if the language is JavaScript. Problem: if we *started* on the Window class then the parent scope should be -> built-in-blob. This is what 'started_in_builtin_window_scope' is used for.
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
parent_scoperef_from_scoperef
Maxize/Sublime_Text_3_Config
2
python
def parent_scoperef_from_scoperef(self, scoperef, started_in_builtin_window_scope=False): "\n For JavaScript-in-the-browser the top-level scope is the\n Window object instance. For now we are always presuming we\n are running in the browser if the language is JavaScript.\n\n Problem: if we *started* on the Window class then the parent\n scope should be -> built-in-blob. This is what\n 'started_in_builtin_window_scope' is used for.\n " (blob, lpath) = scoperef global_var = self._global_var if ((not started_in_builtin_window_scope) and (lpath == [global_var]) and (blob is self.built_in_blob)): return None elif lpath: return (blob, lpath[:(- 1)]) elif (blob is self.built_in_blob): if started_in_builtin_window_scope: return None elif (global_var is not None): return (self.built_in_blob, [global_var]) else: return (self.built_in_blob, [])
def parent_scoperef_from_scoperef(self, scoperef, started_in_builtin_window_scope=False): "\n For JavaScript-in-the-browser the top-level scope is the\n Window object instance. For now we are always presuming we\n are running in the browser if the language is JavaScript.\n\n Problem: if we *started* on the Window class then the parent\n scope should be -> built-in-blob. This is what\n 'started_in_builtin_window_scope' is used for.\n " (blob, lpath) = scoperef global_var = self._global_var if ((not started_in_builtin_window_scope) and (lpath == [global_var]) and (blob is self.built_in_blob)): return None elif lpath: return (blob, lpath[:(- 1)]) elif (blob is self.built_in_blob): if started_in_builtin_window_scope: return None elif (global_var is not None): return (self.built_in_blob, [global_var]) else: return (self.built_in_blob, [])<|docstring|>For JavaScript-in-the-browser the top-level scope is the Window object instance. For now we are always presuming we are running in the browser if the language is JavaScript. Problem: if we *started* on the Window class then the parent scope should be -> built-in-blob. This is what 'started_in_builtin_window_scope' is used for.<|endoftext|>
5c89e918ab706025b502f9f1e3bd6cd751efa2185e9f1a4a9cfddb9e48c81db6
@property def _global_var(self): '\n The type of the global variable\n ' if (self.trg.lang == 'Node.js'): return 'global' return 'Window'
The type of the global variable
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_global_var
Maxize/Sublime_Text_3_Config
2
python
@property def _global_var(self): '\n \n ' if (self.trg.lang == 'Node.js'): return 'global' return 'Window'
@property def _global_var(self): '\n \n ' if (self.trg.lang == 'Node.js'): return 'global' return 'Window'<|docstring|>The type of the global variable<|endoftext|>
a4ea3d3fee14f71e3025eb4a93b6f040f176c1c421bf9bf181fb88d913504825
def _hit_from_first_token(self, token, scoperef): 'Find the token at the given or a parent scope.\n\n Returns the found elem and the scope at which it was found. If\n not found, this returns (None, None).\n ' self.log("find '%s' starting at %s", token, scoperef) try: elem = self._elem_from_scoperef(scoperef) except KeyError as ex: self.warn('_hit_from_first_token:: no elem for scoperef: %r', scoperef) return (None, None) if (elem.get('ilk') == 'class'): class_name = elem.get('name') try: ctor = elem.names[class_name] except KeyError: pass else: if ('__ctor__' in ctor.get('attributes', '')): scoperef = (scoperef[0], (scoperef[1] + [class_name])) self.log('push scope to class ctor %s', scoperef) started_in_builtin_window_scope = ((scoperef[0] is self.built_in_blob) and scoperef[1] and (scoperef[1][0] == self._global_var)) while 1: try: elem = self._elem_from_scoperef(scoperef) except KeyError as ex: raise EvalError(('could not resolve scoperef %r: %s' % (scoperef, ex))) try: candidate = elem.names[token] if ('__ctor__' in candidate.get('attributes', '')): raise KeyError('skipping JavaScript ctor') self.log("is '%s' accessible on %s? yes", token, scoperef) return (candidate, scoperef) except KeyError: self.log("is '%s' accessible on %s? no", token, scoperef) scoperef = self.parent_scoperef_from_scoperef(scoperef, started_in_builtin_window_scope) if (not scoperef): return (None, None)
Find the token at the given or a parent scope. Returns the found elem and the scope at which it was found. If not found, this returns (None, None).
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_hit_from_first_token
Maxize/Sublime_Text_3_Config
2
python
def _hit_from_first_token(self, token, scoperef): 'Find the token at the given or a parent scope.\n\n Returns the found elem and the scope at which it was found. If\n not found, this returns (None, None).\n ' self.log("find '%s' starting at %s", token, scoperef) try: elem = self._elem_from_scoperef(scoperef) except KeyError as ex: self.warn('_hit_from_first_token:: no elem for scoperef: %r', scoperef) return (None, None) if (elem.get('ilk') == 'class'): class_name = elem.get('name') try: ctor = elem.names[class_name] except KeyError: pass else: if ('__ctor__' in ctor.get('attributes', )): scoperef = (scoperef[0], (scoperef[1] + [class_name])) self.log('push scope to class ctor %s', scoperef) started_in_builtin_window_scope = ((scoperef[0] is self.built_in_blob) and scoperef[1] and (scoperef[1][0] == self._global_var)) while 1: try: elem = self._elem_from_scoperef(scoperef) except KeyError as ex: raise EvalError(('could not resolve scoperef %r: %s' % (scoperef, ex))) try: candidate = elem.names[token] if ('__ctor__' in candidate.get('attributes', )): raise KeyError('skipping JavaScript ctor') self.log("is '%s' accessible on %s? yes", token, scoperef) return (candidate, scoperef) except KeyError: self.log("is '%s' accessible on %s? no", token, scoperef) scoperef = self.parent_scoperef_from_scoperef(scoperef, started_in_builtin_window_scope) if (not scoperef): return (None, None)
def _hit_from_first_token(self, token, scoperef): 'Find the token at the given or a parent scope.\n\n Returns the found elem and the scope at which it was found. If\n not found, this returns (None, None).\n ' self.log("find '%s' starting at %s", token, scoperef) try: elem = self._elem_from_scoperef(scoperef) except KeyError as ex: self.warn('_hit_from_first_token:: no elem for scoperef: %r', scoperef) return (None, None) if (elem.get('ilk') == 'class'): class_name = elem.get('name') try: ctor = elem.names[class_name] except KeyError: pass else: if ('__ctor__' in ctor.get('attributes', )): scoperef = (scoperef[0], (scoperef[1] + [class_name])) self.log('push scope to class ctor %s', scoperef) started_in_builtin_window_scope = ((scoperef[0] is self.built_in_blob) and scoperef[1] and (scoperef[1][0] == self._global_var)) while 1: try: elem = self._elem_from_scoperef(scoperef) except KeyError as ex: raise EvalError(('could not resolve scoperef %r: %s' % (scoperef, ex))) try: candidate = elem.names[token] if ('__ctor__' in candidate.get('attributes', )): raise KeyError('skipping JavaScript ctor') self.log("is '%s' accessible on %s? yes", token, scoperef) return (candidate, scoperef) except KeyError: self.log("is '%s' accessible on %s? no", token, scoperef) scoperef = self.parent_scoperef_from_scoperef(scoperef, started_in_builtin_window_scope) if (not scoperef): return (None, None)<|docstring|>Find the token at the given or a parent scope. Returns the found elem and the scope at which it was found. If not found, this returns (None, None).<|endoftext|>
2febdba935718932cc6a3fd14d712b3af7d13b5faa21df89f0b1dd0938863940
def _calltips_from_hits(self, hits): '\n c.f. CitadelEvaluator._getSymbolCallTips()\n ' calltips = [] for (elem, scoperef) in hits: if (elem.tag == 'variable'): self.debug('_calltips_from_hits:: ignoring variable: %r', elem) continue elif (elem.tag == 'scope'): ilk = elem.get('ilk') if (ilk == 'function'): calltips.append(self._calltip_from_func(elem)) elif (ilk == 'class'): calltips.append(self._calltip_from_class(elem)) else: raise NotImplementedError(('unexpected scope ilk for calltip hit: %r' % elem)) else: raise NotImplementedError(('unexpected elem for calltip hit: %r' % elem)) return calltips
c.f. CitadelEvaluator._getSymbolCallTips()
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_calltips_from_hits
Maxize/Sublime_Text_3_Config
2
python
def _calltips_from_hits(self, hits): '\n \n ' calltips = [] for (elem, scoperef) in hits: if (elem.tag == 'variable'): self.debug('_calltips_from_hits:: ignoring variable: %r', elem) continue elif (elem.tag == 'scope'): ilk = elem.get('ilk') if (ilk == 'function'): calltips.append(self._calltip_from_func(elem)) elif (ilk == 'class'): calltips.append(self._calltip_from_class(elem)) else: raise NotImplementedError(('unexpected scope ilk for calltip hit: %r' % elem)) else: raise NotImplementedError(('unexpected elem for calltip hit: %r' % elem)) return calltips
def _calltips_from_hits(self, hits): '\n \n ' calltips = [] for (elem, scoperef) in hits: if (elem.tag == 'variable'): self.debug('_calltips_from_hits:: ignoring variable: %r', elem) continue elif (elem.tag == 'scope'): ilk = elem.get('ilk') if (ilk == 'function'): calltips.append(self._calltip_from_func(elem)) elif (ilk == 'class'): calltips.append(self._calltip_from_class(elem)) else: raise NotImplementedError(('unexpected scope ilk for calltip hit: %r' % elem)) else: raise NotImplementedError(('unexpected elem for calltip hit: %r' % elem)) return calltips<|docstring|>c.f. CitadelEvaluator._getSymbolCallTips()<|endoftext|>
e32f62d3e964e2a7fb18225d89b5974f580bdafeee4cf26a81e13bb1c2b4d87f
def _hits_from_argument(self, elem, scoperef): '\n Return hits for an argument of a function based on its caller\n @param elem The argument; must have ilk=argument\n @param scoperef The scope containing the element\n @returns list of hits\n ' assert (elem.get('ilk') == 'argument'), ('_hits_from_argument expects an argument, got a %r' % elem.get('ilk')) hits = [] scope = self._elem_from_scoperef(scoperef) args = [arg for arg in scope.findall('variable') if (arg.get('ilk') == 'argument')] for pos in range(len(args)): if (args[pos].get('name') == elem.get('name')): break else: return [] for caller in scope.getiterator('caller'): citdl = caller.get('citdl') caller_pos = int((caller.get('pos') or 0)) if ((citdl is None) or (caller_pos < 1)): continue for caller_hit in self._hits_from_citdl(citdl, scoperef): caller_func = caller_hit[0] if (caller_func.get('ilk') != 'function'): continue caller_args = [arg for arg in caller_func.getiterator('variable') if (arg.get('ilk') == 'argument')] if (caller_pos > len(caller_args)): continue caller_arg = caller_args[(caller_pos - 1)] citdl = caller_arg.get('citdl') if (not citdl): continue for citdl_hit in self._hits_from_citdl(citdl, caller_hit[1]): func = citdl_hit[0] if (func.get('ilk') != 'function'): continue args = [arg for arg in func.getiterator('variable') if (arg.get('ilk') == 'argument')] if (pos >= len(args)): continue citdl = args[pos].get('citdl') if (not citdl): continue hits += self._hits_from_citdl(citdl, citdl_hit[1]) return hits
Return hits for an argument of a function based on its caller @param elem The argument; must have ilk=argument @param scoperef The scope containing the element @returns list of hits
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_hits_from_argument
Maxize/Sublime_Text_3_Config
2
python
def _hits_from_argument(self, elem, scoperef): '\n Return hits for an argument of a function based on its caller\n @param elem The argument; must have ilk=argument\n @param scoperef The scope containing the element\n @returns list of hits\n ' assert (elem.get('ilk') == 'argument'), ('_hits_from_argument expects an argument, got a %r' % elem.get('ilk')) hits = [] scope = self._elem_from_scoperef(scoperef) args = [arg for arg in scope.findall('variable') if (arg.get('ilk') == 'argument')] for pos in range(len(args)): if (args[pos].get('name') == elem.get('name')): break else: return [] for caller in scope.getiterator('caller'): citdl = caller.get('citdl') caller_pos = int((caller.get('pos') or 0)) if ((citdl is None) or (caller_pos < 1)): continue for caller_hit in self._hits_from_citdl(citdl, scoperef): caller_func = caller_hit[0] if (caller_func.get('ilk') != 'function'): continue caller_args = [arg for arg in caller_func.getiterator('variable') if (arg.get('ilk') == 'argument')] if (caller_pos > len(caller_args)): continue caller_arg = caller_args[(caller_pos - 1)] citdl = caller_arg.get('citdl') if (not citdl): continue for citdl_hit in self._hits_from_citdl(citdl, caller_hit[1]): func = citdl_hit[0] if (func.get('ilk') != 'function'): continue args = [arg for arg in func.getiterator('variable') if (arg.get('ilk') == 'argument')] if (pos >= len(args)): continue citdl = args[pos].get('citdl') if (not citdl): continue hits += self._hits_from_citdl(citdl, citdl_hit[1]) return hits
def _hits_from_argument(self, elem, scoperef): '\n Return hits for an argument of a function based on its caller\n @param elem The argument; must have ilk=argument\n @param scoperef The scope containing the element\n @returns list of hits\n ' assert (elem.get('ilk') == 'argument'), ('_hits_from_argument expects an argument, got a %r' % elem.get('ilk')) hits = [] scope = self._elem_from_scoperef(scoperef) args = [arg for arg in scope.findall('variable') if (arg.get('ilk') == 'argument')] for pos in range(len(args)): if (args[pos].get('name') == elem.get('name')): break else: return [] for caller in scope.getiterator('caller'): citdl = caller.get('citdl') caller_pos = int((caller.get('pos') or 0)) if ((citdl is None) or (caller_pos < 1)): continue for caller_hit in self._hits_from_citdl(citdl, scoperef): caller_func = caller_hit[0] if (caller_func.get('ilk') != 'function'): continue caller_args = [arg for arg in caller_func.getiterator('variable') if (arg.get('ilk') == 'argument')] if (caller_pos > len(caller_args)): continue caller_arg = caller_args[(caller_pos - 1)] citdl = caller_arg.get('citdl') if (not citdl): continue for citdl_hit in self._hits_from_citdl(citdl, caller_hit[1]): func = citdl_hit[0] if (func.get('ilk') != 'function'): continue args = [arg for arg in func.getiterator('variable') if (arg.get('ilk') == 'argument')] if (pos >= len(args)): continue citdl = args[pos].get('citdl') if (not citdl): continue hits += self._hits_from_citdl(citdl, citdl_hit[1]) return hits<|docstring|>Return hits for an argument of a function based on its caller @param elem The argument; must have ilk=argument @param scoperef The scope containing the element @returns list of hits<|endoftext|>
b92f0b5fe4e79e726389fc3c8cf293ed26d44d6ec047b21ccadbba2e6ed7435d
def _hits_from_call(self, elem, scoperef): "Resolve the function call inference for 'elem' at 'scoperef'." if (elem.tag == 'variable'): hits = [] var_hits = self._hits_from_variable_type_inference(elem, scoperef) for (var_elem, var_scoperef) in var_hits: if (var_elem != elem): try: hits += self._hits_from_call(var_elem, var_scoperef) except CodeIntelError: pass if (not hits): raise CodeIntelError(('could not resolve call on %r.' % elem)) return hits if (elem.get('ilk') == 'class'): return [(elem, scoperef)] if (elem.get('ilk') != 'function'): raise CodeIntelError(('_hits_from_call:: unexpected element type %r' % elem)) if ((elem.get('name') == 'require') and (scoperef[0] is self.built_in_blob) and (not scoperef[1])): try: requirename = self.trg.extra.get('_params', []).pop(0) except IndexError: requirename = None if (requirename is not None): import codeintel2.lang_javascript requirename = codeintel2.lang_javascript.Utils.unquoteJsString(requirename) self.log('_hits_from_call: resolving CommonJS require(%r)', requirename) hits = self._hits_from_commonjs_require(requirename, scoperef) if (len(hits) > 0): return hits resolver = getattr(elem, 'resolve', None) try: param = self.trg.extra.get('_params', []).pop(0) except IndexError: param = None if (resolver and (param is not None)): try: self.log('Attempting to use extra resolver %r param %r', resolver, param) hits = resolver(evlr=self, action='call', scoperef=scoperef, param=param) if hits: return hits except: self.log('Extra resolver %r: Failed to resolve %s', resolver, scoperef) else: self.log('_hits_from_call: no resolver on %r', elem) citdl = elem.get('returns') if (not citdl): raise CodeIntelError(('no return type info for %r' % elem)) self.log("_hits_from_call: resolve '%s' for %r, scoperef: %r", citdl, elem, scoperef) scoperef = (scoperef[0], (scoperef[1] + [elem.get('name')])) return self._hits_from_citdl(citdl, scoperef)
Resolve the function call inference for 'elem' at 'scoperef'.
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_hits_from_call
Maxize/Sublime_Text_3_Config
2
python
def _hits_from_call(self, elem, scoperef): if (elem.tag == 'variable'): hits = [] var_hits = self._hits_from_variable_type_inference(elem, scoperef) for (var_elem, var_scoperef) in var_hits: if (var_elem != elem): try: hits += self._hits_from_call(var_elem, var_scoperef) except CodeIntelError: pass if (not hits): raise CodeIntelError(('could not resolve call on %r.' % elem)) return hits if (elem.get('ilk') == 'class'): return [(elem, scoperef)] if (elem.get('ilk') != 'function'): raise CodeIntelError(('_hits_from_call:: unexpected element type %r' % elem)) if ((elem.get('name') == 'require') and (scoperef[0] is self.built_in_blob) and (not scoperef[1])): try: requirename = self.trg.extra.get('_params', []).pop(0) except IndexError: requirename = None if (requirename is not None): import codeintel2.lang_javascript requirename = codeintel2.lang_javascript.Utils.unquoteJsString(requirename) self.log('_hits_from_call: resolving CommonJS require(%r)', requirename) hits = self._hits_from_commonjs_require(requirename, scoperef) if (len(hits) > 0): return hits resolver = getattr(elem, 'resolve', None) try: param = self.trg.extra.get('_params', []).pop(0) except IndexError: param = None if (resolver and (param is not None)): try: self.log('Attempting to use extra resolver %r param %r', resolver, param) hits = resolver(evlr=self, action='call', scoperef=scoperef, param=param) if hits: return hits except: self.log('Extra resolver %r: Failed to resolve %s', resolver, scoperef) else: self.log('_hits_from_call: no resolver on %r', elem) citdl = elem.get('returns') if (not citdl): raise CodeIntelError(('no return type info for %r' % elem)) self.log("_hits_from_call: resolve '%s' for %r, scoperef: %r", citdl, elem, scoperef) scoperef = (scoperef[0], (scoperef[1] + [elem.get('name')])) return self._hits_from_citdl(citdl, scoperef)
def _hits_from_call(self, elem, scoperef): if (elem.tag == 'variable'): hits = [] var_hits = self._hits_from_variable_type_inference(elem, scoperef) for (var_elem, var_scoperef) in var_hits: if (var_elem != elem): try: hits += self._hits_from_call(var_elem, var_scoperef) except CodeIntelError: pass if (not hits): raise CodeIntelError(('could not resolve call on %r.' % elem)) return hits if (elem.get('ilk') == 'class'): return [(elem, scoperef)] if (elem.get('ilk') != 'function'): raise CodeIntelError(('_hits_from_call:: unexpected element type %r' % elem)) if ((elem.get('name') == 'require') and (scoperef[0] is self.built_in_blob) and (not scoperef[1])): try: requirename = self.trg.extra.get('_params', []).pop(0) except IndexError: requirename = None if (requirename is not None): import codeintel2.lang_javascript requirename = codeintel2.lang_javascript.Utils.unquoteJsString(requirename) self.log('_hits_from_call: resolving CommonJS require(%r)', requirename) hits = self._hits_from_commonjs_require(requirename, scoperef) if (len(hits) > 0): return hits resolver = getattr(elem, 'resolve', None) try: param = self.trg.extra.get('_params', []).pop(0) except IndexError: param = None if (resolver and (param is not None)): try: self.log('Attempting to use extra resolver %r param %r', resolver, param) hits = resolver(evlr=self, action='call', scoperef=scoperef, param=param) if hits: return hits except: self.log('Extra resolver %r: Failed to resolve %s', resolver, scoperef) else: self.log('_hits_from_call: no resolver on %r', elem) citdl = elem.get('returns') if (not citdl): raise CodeIntelError(('no return type info for %r' % elem)) self.log("_hits_from_call: resolve '%s' for %r, scoperef: %r", citdl, elem, scoperef) scoperef = (scoperef[0], (scoperef[1] + [elem.get('name')])) return self._hits_from_citdl(citdl, scoperef)<|docstring|>Resolve the function call inference for 'elem' at 'scoperef'.<|endoftext|>
982f02f2028f03715ffb3096e62ee5873e27a35e9d98acbca75c6a9a36b0efba
def _hit_from_getattr(self, elem, scoperef, token): "Resolve the getattr of 'token' on the given 'elem'.\n\n Raises CodeIntelError if could not resolve it.\n\n Algorithm:\n - Try to resolve it.\n - Call a hook to make an educated guess. Some attribute names\n are strong signals as to the object type -- typically those\n for common built-in classes.\n " self.log("resolve getattr '%s' on %r in %r:", token, elem, scoperef) if (elem.tag == 'variable'): hits = self._hits_from_variable_type_inference(elem, scoperef) elif ((elem.tag == 'scope') and (elem.get('ilk') == 'function')): hits = self._hits_from_type_inference('Function', scoperef) else: assert (elem.tag == 'scope'), ("elem tag is not 'scope': %r" % elem.tag) hits = [(elem, scoperef)] for (hit_elem, hit_scoperef) in hits: self.log('_hit_from_getattr:: hit elem %r, scoperef: %r', hit_elem, hit_scoperef) ilk = hit_elem.get('ilk') if (hit_elem.tag == 'variable'): attr = hit_elem.names.get(token) if (attr is not None): self.log('attr is %r on %r', attr, hit_elem) var_scoperef = (hit_scoperef[0], (hit_scoperef[1] + [hit_elem.get('name')])) return (attr, var_scoperef) elif (ilk == 'function'): return self._hit_from_getattr(hit_elem, hit_scoperef, token) elif (ilk == 'class'): attr = hit_elem.names.get(token) if (attr is not None): self.log('attr is %r on %r', attr, hit_elem) if hit_scoperef: class_scoperef = (hit_scoperef[0], (hit_scoperef[1] + [hit_elem.get('name')])) lineno = int(attr.get('line', '-1')) if ((attr.tag == 'variable') and (lineno > int(hit_elem.get('line', '-1'))) and (lineno <= int(hit_elem.get('lineend', '-1')))): (blob, lpath) = self.buf.scoperef_from_blob_and_line(hit_elem, lineno) if lpath: class_scoperef = (class_scoperef[0], (class_scoperef[1] + lpath)) self.log('Updating scoperef to: %r', class_scoperef) else: class_scoperef = (None, [hit_elem.get('name')]) return (attr, class_scoperef) for classref in hit_elem.get('classrefs', '').split(): try: base_hits = self._hits_from_type_inference(classref, hit_scoperef) except CodeIntelError: pass else: for (base_elem, base_scoperef) in base_hits: if (token in base_elem.names): self.log("is '%s' from %s base class? yes", token, base_elem) new_scoperef = (base_scoperef[0], (base_scoperef[1] + [base_elem.get('name')])) return (base_elem.names[token], new_scoperef) self.log("is '%s' from %s base class? no", token, base_elem) else: raise NotImplementedError(('unexpected scope ilk: %r' % ilk)) raise CodeIntelError(("could not resolve '%s' getattr on %r in %r" % (token, elem, scoperef)))
Resolve the getattr of 'token' on the given 'elem'. Raises CodeIntelError if could not resolve it. Algorithm: - Try to resolve it. - Call a hook to make an educated guess. Some attribute names are strong signals as to the object type -- typically those for common built-in classes.
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_hit_from_getattr
Maxize/Sublime_Text_3_Config
2
python
def _hit_from_getattr(self, elem, scoperef, token): "Resolve the getattr of 'token' on the given 'elem'.\n\n Raises CodeIntelError if could not resolve it.\n\n Algorithm:\n - Try to resolve it.\n - Call a hook to make an educated guess. Some attribute names\n are strong signals as to the object type -- typically those\n for common built-in classes.\n " self.log("resolve getattr '%s' on %r in %r:", token, elem, scoperef) if (elem.tag == 'variable'): hits = self._hits_from_variable_type_inference(elem, scoperef) elif ((elem.tag == 'scope') and (elem.get('ilk') == 'function')): hits = self._hits_from_type_inference('Function', scoperef) else: assert (elem.tag == 'scope'), ("elem tag is not 'scope': %r" % elem.tag) hits = [(elem, scoperef)] for (hit_elem, hit_scoperef) in hits: self.log('_hit_from_getattr:: hit elem %r, scoperef: %r', hit_elem, hit_scoperef) ilk = hit_elem.get('ilk') if (hit_elem.tag == 'variable'): attr = hit_elem.names.get(token) if (attr is not None): self.log('attr is %r on %r', attr, hit_elem) var_scoperef = (hit_scoperef[0], (hit_scoperef[1] + [hit_elem.get('name')])) return (attr, var_scoperef) elif (ilk == 'function'): return self._hit_from_getattr(hit_elem, hit_scoperef, token) elif (ilk == 'class'): attr = hit_elem.names.get(token) if (attr is not None): self.log('attr is %r on %r', attr, hit_elem) if hit_scoperef: class_scoperef = (hit_scoperef[0], (hit_scoperef[1] + [hit_elem.get('name')])) lineno = int(attr.get('line', '-1')) if ((attr.tag == 'variable') and (lineno > int(hit_elem.get('line', '-1'))) and (lineno <= int(hit_elem.get('lineend', '-1')))): (blob, lpath) = self.buf.scoperef_from_blob_and_line(hit_elem, lineno) if lpath: class_scoperef = (class_scoperef[0], (class_scoperef[1] + lpath)) self.log('Updating scoperef to: %r', class_scoperef) else: class_scoperef = (None, [hit_elem.get('name')]) return (attr, class_scoperef) for classref in hit_elem.get('classrefs', ).split(): try: base_hits = self._hits_from_type_inference(classref, hit_scoperef) except CodeIntelError: pass else: for (base_elem, base_scoperef) in base_hits: if (token in base_elem.names): self.log("is '%s' from %s base class? yes", token, base_elem) new_scoperef = (base_scoperef[0], (base_scoperef[1] + [base_elem.get('name')])) return (base_elem.names[token], new_scoperef) self.log("is '%s' from %s base class? no", token, base_elem) else: raise NotImplementedError(('unexpected scope ilk: %r' % ilk)) raise CodeIntelError(("could not resolve '%s' getattr on %r in %r" % (token, elem, scoperef)))
def _hit_from_getattr(self, elem, scoperef, token): "Resolve the getattr of 'token' on the given 'elem'.\n\n Raises CodeIntelError if could not resolve it.\n\n Algorithm:\n - Try to resolve it.\n - Call a hook to make an educated guess. Some attribute names\n are strong signals as to the object type -- typically those\n for common built-in classes.\n " self.log("resolve getattr '%s' on %r in %r:", token, elem, scoperef) if (elem.tag == 'variable'): hits = self._hits_from_variable_type_inference(elem, scoperef) elif ((elem.tag == 'scope') and (elem.get('ilk') == 'function')): hits = self._hits_from_type_inference('Function', scoperef) else: assert (elem.tag == 'scope'), ("elem tag is not 'scope': %r" % elem.tag) hits = [(elem, scoperef)] for (hit_elem, hit_scoperef) in hits: self.log('_hit_from_getattr:: hit elem %r, scoperef: %r', hit_elem, hit_scoperef) ilk = hit_elem.get('ilk') if (hit_elem.tag == 'variable'): attr = hit_elem.names.get(token) if (attr is not None): self.log('attr is %r on %r', attr, hit_elem) var_scoperef = (hit_scoperef[0], (hit_scoperef[1] + [hit_elem.get('name')])) return (attr, var_scoperef) elif (ilk == 'function'): return self._hit_from_getattr(hit_elem, hit_scoperef, token) elif (ilk == 'class'): attr = hit_elem.names.get(token) if (attr is not None): self.log('attr is %r on %r', attr, hit_elem) if hit_scoperef: class_scoperef = (hit_scoperef[0], (hit_scoperef[1] + [hit_elem.get('name')])) lineno = int(attr.get('line', '-1')) if ((attr.tag == 'variable') and (lineno > int(hit_elem.get('line', '-1'))) and (lineno <= int(hit_elem.get('lineend', '-1')))): (blob, lpath) = self.buf.scoperef_from_blob_and_line(hit_elem, lineno) if lpath: class_scoperef = (class_scoperef[0], (class_scoperef[1] + lpath)) self.log('Updating scoperef to: %r', class_scoperef) else: class_scoperef = (None, [hit_elem.get('name')]) return (attr, class_scoperef) for classref in hit_elem.get('classrefs', ).split(): try: base_hits = self._hits_from_type_inference(classref, hit_scoperef) except CodeIntelError: pass else: for (base_elem, base_scoperef) in base_hits: if (token in base_elem.names): self.log("is '%s' from %s base class? yes", token, base_elem) new_scoperef = (base_scoperef[0], (base_scoperef[1] + [base_elem.get('name')])) return (base_elem.names[token], new_scoperef) self.log("is '%s' from %s base class? no", token, base_elem) else: raise NotImplementedError(('unexpected scope ilk: %r' % ilk)) raise CodeIntelError(("could not resolve '%s' getattr on %r in %r" % (token, elem, scoperef)))<|docstring|>Resolve the getattr of 'token' on the given 'elem'. Raises CodeIntelError if could not resolve it. Algorithm: - Try to resolve it. - Call a hook to make an educated guess. Some attribute names are strong signals as to the object type -- typically those for common built-in classes.<|endoftext|>
d745afb43b1c025cbf0d9114e6042678e21f21b714fd880b20df38d8f8a86e34
def _hits_from_variable_type_inference(self, elem, scoperef): "Resolve the type inference for 'elem' at 'scoperef'." assert (elem.tag == 'variable') hits = [] citdl = elem.get('citdl') if (citdl == 'require()'): requirename = elem.get('required_library_name') if requirename: self.log('_hits_from_variable_type_inference: resolving require(%r)', requirename) hits += self._hits_from_commonjs_require(requirename, scoperef) if (len(elem) != 0): return (hits + [(elem, scoperef)]) if (not citdl): raise CodeIntelError(('no type-inference info for %r' % elem)) self.log("resolve '%s' type inference for %r:", citdl, elem) if ((citdl == elem.get('name')) and (citdl not in elem.names)): self.log('_hits_from_variable_type_inference:: recursive citdl expression found, trying alternatives.') try: parent_elem = self._elem_from_scoperef(scoperef) except KeyError as ex: raise CodeIntelError(('could not resolve recursive citdl expression %r' % citdl)) else: alt_hits = [] for child in parent_elem: if ((child.tag != 'variable') and (child.get('name') == citdl)): alt_hits.append((child, scoperef)) if (self._alt_elem_from_scoperef is None): self._alt_elem_from_scoperef = {} alt_sref_name = '.'.join((scoperef[1] + [citdl])) self._alt_elem_from_scoperef[alt_sref_name] = child self.log('Alternative hit found: %r, scoperef: %r', child, scoperef) if alt_hits: return alt_hits scoperef = self.parent_scoperef_from_scoperef(scoperef) if (scoperef is None): raise CodeIntelError(('could not resolve recursive citdl expression %r' % citdl)) self.log('Continue search for %r from the parent scope.', citdl) try: hits += self._hits_from_citdl(citdl, scoperef) except EvalError: if (not hits): raise return hits
Resolve the type inference for 'elem' at 'scoperef'.
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_hits_from_variable_type_inference
Maxize/Sublime_Text_3_Config
2
python
def _hits_from_variable_type_inference(self, elem, scoperef): assert (elem.tag == 'variable') hits = [] citdl = elem.get('citdl') if (citdl == 'require()'): requirename = elem.get('required_library_name') if requirename: self.log('_hits_from_variable_type_inference: resolving require(%r)', requirename) hits += self._hits_from_commonjs_require(requirename, scoperef) if (len(elem) != 0): return (hits + [(elem, scoperef)]) if (not citdl): raise CodeIntelError(('no type-inference info for %r' % elem)) self.log("resolve '%s' type inference for %r:", citdl, elem) if ((citdl == elem.get('name')) and (citdl not in elem.names)): self.log('_hits_from_variable_type_inference:: recursive citdl expression found, trying alternatives.') try: parent_elem = self._elem_from_scoperef(scoperef) except KeyError as ex: raise CodeIntelError(('could not resolve recursive citdl expression %r' % citdl)) else: alt_hits = [] for child in parent_elem: if ((child.tag != 'variable') and (child.get('name') == citdl)): alt_hits.append((child, scoperef)) if (self._alt_elem_from_scoperef is None): self._alt_elem_from_scoperef = {} alt_sref_name = '.'.join((scoperef[1] + [citdl])) self._alt_elem_from_scoperef[alt_sref_name] = child self.log('Alternative hit found: %r, scoperef: %r', child, scoperef) if alt_hits: return alt_hits scoperef = self.parent_scoperef_from_scoperef(scoperef) if (scoperef is None): raise CodeIntelError(('could not resolve recursive citdl expression %r' % citdl)) self.log('Continue search for %r from the parent scope.', citdl) try: hits += self._hits_from_citdl(citdl, scoperef) except EvalError: if (not hits): raise return hits
def _hits_from_variable_type_inference(self, elem, scoperef): assert (elem.tag == 'variable') hits = [] citdl = elem.get('citdl') if (citdl == 'require()'): requirename = elem.get('required_library_name') if requirename: self.log('_hits_from_variable_type_inference: resolving require(%r)', requirename) hits += self._hits_from_commonjs_require(requirename, scoperef) if (len(elem) != 0): return (hits + [(elem, scoperef)]) if (not citdl): raise CodeIntelError(('no type-inference info for %r' % elem)) self.log("resolve '%s' type inference for %r:", citdl, elem) if ((citdl == elem.get('name')) and (citdl not in elem.names)): self.log('_hits_from_variable_type_inference:: recursive citdl expression found, trying alternatives.') try: parent_elem = self._elem_from_scoperef(scoperef) except KeyError as ex: raise CodeIntelError(('could not resolve recursive citdl expression %r' % citdl)) else: alt_hits = [] for child in parent_elem: if ((child.tag != 'variable') and (child.get('name') == citdl)): alt_hits.append((child, scoperef)) if (self._alt_elem_from_scoperef is None): self._alt_elem_from_scoperef = {} alt_sref_name = '.'.join((scoperef[1] + [citdl])) self._alt_elem_from_scoperef[alt_sref_name] = child self.log('Alternative hit found: %r, scoperef: %r', child, scoperef) if alt_hits: return alt_hits scoperef = self.parent_scoperef_from_scoperef(scoperef) if (scoperef is None): raise CodeIntelError(('could not resolve recursive citdl expression %r' % citdl)) self.log('Continue search for %r from the parent scope.', citdl) try: hits += self._hits_from_citdl(citdl, scoperef) except EvalError: if (not hits): raise return hits<|docstring|>Resolve the type inference for 'elem' at 'scoperef'.<|endoftext|>
6d110a09cad2a5f234a85b434c582ec86bcaff0092ce58b45d2b066d5c403adc
def _hits_from_type_inference(self, citdl, scoperef): "Resolve the 'citdl' type inference at 'scoperef'." self.log("resolve '%s' type inference:", citdl) return self._hits_from_citdl(citdl, scoperef)
Resolve the 'citdl' type inference at 'scoperef'.
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_hits_from_type_inference
Maxize/Sublime_Text_3_Config
2
python
def _hits_from_type_inference(self, citdl, scoperef): self.log("resolve '%s' type inference:", citdl) return self._hits_from_citdl(citdl, scoperef)
def _hits_from_type_inference(self, citdl, scoperef): self.log("resolve '%s' type inference:", citdl) return self._hits_from_citdl(citdl, scoperef)<|docstring|>Resolve the 'citdl' type inference at 'scoperef'.<|endoftext|>
e12fbb022eb5019f083ce69e156a524327c828e1aaf95eb4b81649560a6f373d
def _hits_from_first_part(self, tokens, scoperef): 'Resolve the first part of the expression.\n\n If the first token is found at the global or built-in level (or\n not found at all locally) then it may be a shared namespace with\n other files in the execution set. Get that down to a list of\n hits and a remaining list of expression tokens.\n ' (elem, scoperef) = self._hit_from_first_token(tokens[0], scoperef) if (elem is not None): self.log('_hit_from_first_part: found elem: %s %r at %r', (elem.get('ilk') or elem.tag), elem.get('name'), scoperef[1]) if ((elem is None) or (not scoperef[1]) or ((scoperef[1] == ['Window']) and (scoperef[0].get('name') == '*'))): for (first_call_idx, token) in enumerate(tokens): if (token == '()'): break else: first_call_idx = len(tokens) hits = [] for nconsumed in range(first_call_idx, 0, (- 1)): lpath = tuple(tokens[:nconsumed]) if ((elem is not None) and (len(lpath) > 1)): try: self.log('Checking for deeper local match %r from scoperef %r', lpath[1:], scoperef) check_elem = elem for p in lpath[1:]: check_elem = check_elem.names[p] check_scoperef = (scoperef[0], (scoperef[1] + list(lpath[:(- 1)]))) hits.insert(0, (check_elem, check_scoperef)) self.log('_hit_from_first_part: found deeper local elem: %s %r at %r', (check_elem.get('ilk') or check_elem.tag), check_elem.get('name'), check_scoperef[1]) except KeyError: pass for lib in self.libs: self.log("lookup '%s' in %s", '.'.join(lpath), lib) hits_here = lib.hits_from_lpath(lpath, self.ctlr, curr_buf=self.buf) if hits_here: self.log('found %d hits in lib', len(hits_here)) hits += hits_here if hits: break if (elem is not None): if ((not hits) or (nconsumed == 1)): hits.insert(0, (elem, scoperef)) nconsumed = 1 else: new_elem = elem for token in tokens[1:nconsumed]: try: new_elem = new_elem.names[token] except KeyError: break else: if (new_elem not in (e for (e, sr) in hits)): new_scoperef = (scoperef[0], tokens[:(nconsumed - 1)]) hits.insert(0, (new_elem, new_scoperef)) else: hits = [(elem, scoperef)] nconsumed = 1 return (hits, nconsumed)
Resolve the first part of the expression. If the first token is found at the global or built-in level (or not found at all locally) then it may be a shared namespace with other files in the execution set. Get that down to a list of hits and a remaining list of expression tokens.
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_hits_from_first_part
Maxize/Sublime_Text_3_Config
2
python
def _hits_from_first_part(self, tokens, scoperef): 'Resolve the first part of the expression.\n\n If the first token is found at the global or built-in level (or\n not found at all locally) then it may be a shared namespace with\n other files in the execution set. Get that down to a list of\n hits and a remaining list of expression tokens.\n ' (elem, scoperef) = self._hit_from_first_token(tokens[0], scoperef) if (elem is not None): self.log('_hit_from_first_part: found elem: %s %r at %r', (elem.get('ilk') or elem.tag), elem.get('name'), scoperef[1]) if ((elem is None) or (not scoperef[1]) or ((scoperef[1] == ['Window']) and (scoperef[0].get('name') == '*'))): for (first_call_idx, token) in enumerate(tokens): if (token == '()'): break else: first_call_idx = len(tokens) hits = [] for nconsumed in range(first_call_idx, 0, (- 1)): lpath = tuple(tokens[:nconsumed]) if ((elem is not None) and (len(lpath) > 1)): try: self.log('Checking for deeper local match %r from scoperef %r', lpath[1:], scoperef) check_elem = elem for p in lpath[1:]: check_elem = check_elem.names[p] check_scoperef = (scoperef[0], (scoperef[1] + list(lpath[:(- 1)]))) hits.insert(0, (check_elem, check_scoperef)) self.log('_hit_from_first_part: found deeper local elem: %s %r at %r', (check_elem.get('ilk') or check_elem.tag), check_elem.get('name'), check_scoperef[1]) except KeyError: pass for lib in self.libs: self.log("lookup '%s' in %s", '.'.join(lpath), lib) hits_here = lib.hits_from_lpath(lpath, self.ctlr, curr_buf=self.buf) if hits_here: self.log('found %d hits in lib', len(hits_here)) hits += hits_here if hits: break if (elem is not None): if ((not hits) or (nconsumed == 1)): hits.insert(0, (elem, scoperef)) nconsumed = 1 else: new_elem = elem for token in tokens[1:nconsumed]: try: new_elem = new_elem.names[token] except KeyError: break else: if (new_elem not in (e for (e, sr) in hits)): new_scoperef = (scoperef[0], tokens[:(nconsumed - 1)]) hits.insert(0, (new_elem, new_scoperef)) else: hits = [(elem, scoperef)] nconsumed = 1 return (hits, nconsumed)
def _hits_from_first_part(self, tokens, scoperef): 'Resolve the first part of the expression.\n\n If the first token is found at the global or built-in level (or\n not found at all locally) then it may be a shared namespace with\n other files in the execution set. Get that down to a list of\n hits and a remaining list of expression tokens.\n ' (elem, scoperef) = self._hit_from_first_token(tokens[0], scoperef) if (elem is not None): self.log('_hit_from_first_part: found elem: %s %r at %r', (elem.get('ilk') or elem.tag), elem.get('name'), scoperef[1]) if ((elem is None) or (not scoperef[1]) or ((scoperef[1] == ['Window']) and (scoperef[0].get('name') == '*'))): for (first_call_idx, token) in enumerate(tokens): if (token == '()'): break else: first_call_idx = len(tokens) hits = [] for nconsumed in range(first_call_idx, 0, (- 1)): lpath = tuple(tokens[:nconsumed]) if ((elem is not None) and (len(lpath) > 1)): try: self.log('Checking for deeper local match %r from scoperef %r', lpath[1:], scoperef) check_elem = elem for p in lpath[1:]: check_elem = check_elem.names[p] check_scoperef = (scoperef[0], (scoperef[1] + list(lpath[:(- 1)]))) hits.insert(0, (check_elem, check_scoperef)) self.log('_hit_from_first_part: found deeper local elem: %s %r at %r', (check_elem.get('ilk') or check_elem.tag), check_elem.get('name'), check_scoperef[1]) except KeyError: pass for lib in self.libs: self.log("lookup '%s' in %s", '.'.join(lpath), lib) hits_here = lib.hits_from_lpath(lpath, self.ctlr, curr_buf=self.buf) if hits_here: self.log('found %d hits in lib', len(hits_here)) hits += hits_here if hits: break if (elem is not None): if ((not hits) or (nconsumed == 1)): hits.insert(0, (elem, scoperef)) nconsumed = 1 else: new_elem = elem for token in tokens[1:nconsumed]: try: new_elem = new_elem.names[token] except KeyError: break else: if (new_elem not in (e for (e, sr) in hits)): new_scoperef = (scoperef[0], tokens[:(nconsumed - 1)]) hits.insert(0, (new_elem, new_scoperef)) else: hits = [(elem, scoperef)] nconsumed = 1 return (hits, nconsumed)<|docstring|>Resolve the first part of the expression. If the first token is found at the global or built-in level (or not found at all locally) then it may be a shared namespace with other files in the execution set. Get that down to a list of hits and a remaining list of expression tokens.<|endoftext|>
b8d9ecdb6dc2cf66b915145e2e3d2e63fc0747583414a38e1955ca149ba588ef
def _hits_from_commonjs_require(self, requirename, scoperef): 'Resolve hits from a CommonJS require() invocation' requirename += '.js' from codeintel2.database.langlib import LangDirsLib from codeintel2.database.multilanglib import MultiLangDirsLib from codeintel2.database.catalog import CatalogLib hits = [] for lib in self.libs: blobs = None if isinstance(lib, (LangDirsLib, MultiLangDirsLib)): blobs = lib.blobs_with_basename(requirename, ctlr=self.ctlr) elif isinstance(lib, CatalogLib): blob = lib.get_blob(requirename) if (blob is not None): blobs = [blob] for blob in (blobs or []): exports = blob.names.get('exports') if ((exports is not None) and (exports.tag == 'variable')): hits += self._hits_from_variable_type_inference(exports, [blob, ['exports']]) else: self.log('Exported exports to be a variable, got %r instead', exports) return hits
Resolve hits from a CommonJS require() invocation
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_hits_from_commonjs_require
Maxize/Sublime_Text_3_Config
2
python
def _hits_from_commonjs_require(self, requirename, scoperef): requirename += '.js' from codeintel2.database.langlib import LangDirsLib from codeintel2.database.multilanglib import MultiLangDirsLib from codeintel2.database.catalog import CatalogLib hits = [] for lib in self.libs: blobs = None if isinstance(lib, (LangDirsLib, MultiLangDirsLib)): blobs = lib.blobs_with_basename(requirename, ctlr=self.ctlr) elif isinstance(lib, CatalogLib): blob = lib.get_blob(requirename) if (blob is not None): blobs = [blob] for blob in (blobs or []): exports = blob.names.get('exports') if ((exports is not None) and (exports.tag == 'variable')): hits += self._hits_from_variable_type_inference(exports, [blob, ['exports']]) else: self.log('Exported exports to be a variable, got %r instead', exports) return hits
def _hits_from_commonjs_require(self, requirename, scoperef): requirename += '.js' from codeintel2.database.langlib import LangDirsLib from codeintel2.database.multilanglib import MultiLangDirsLib from codeintel2.database.catalog import CatalogLib hits = [] for lib in self.libs: blobs = None if isinstance(lib, (LangDirsLib, MultiLangDirsLib)): blobs = lib.blobs_with_basename(requirename, ctlr=self.ctlr) elif isinstance(lib, CatalogLib): blob = lib.get_blob(requirename) if (blob is not None): blobs = [blob] for blob in (blobs or []): exports = blob.names.get('exports') if ((exports is not None) and (exports.tag == 'variable')): hits += self._hits_from_variable_type_inference(exports, [blob, ['exports']]) else: self.log('Exported exports to be a variable, got %r instead', exports) return hits<|docstring|>Resolve hits from a CommonJS require() invocation<|endoftext|>
7c594ccd9e4b14cd0163c821071cd688229be1875a5bd3ee4ed0acea9522b45b
def _completion_names_from_scope(self, expr, scoperef): 'Return all available element names beginning with expr' self.log('_completion_names_from_scope:: %r, scoperef: %r', expr, scoperef) all_completions = {} keywords = self.langintel.langinfo.keywords for name in keywords: if ((not expr) or name.startswith(expr)): all_completions[name] = 'keyword' loopcount = (- 1) while (scoperef and (scoperef[0] is not None)): loopcount += 1 self.log('_completion_names_from_scope:: checking scoperef: %r', scoperef) elem = self._elem_from_scoperef(scoperef) if (elem is None): continue for name in elem.names: if (name and name.startswith(expr)): if (name not in all_completions): hit_elem = elem.names[name] if (loopcount and ('__local__' in hit_elem.get('attributes', '').split())): continue all_completions[name] = (hit_elem.get('ilk') or hit_elem.tag) scoperef = self.parent_scoperef_from_scoperef(scoperef) cplns = self.stdlib.toplevel_cplns(prefix=expr) for (ilk, name) in cplns: if (name not in all_completions): all_completions[name] = ilk for lib in self.libs: self.log('_completion_names_from_scope:: include everything from lib: %r', lib) cplns = lib.toplevel_cplns(prefix=expr) for (ilk, name) in cplns: if (name not in all_completions): all_completions[name] = ilk return [(ilk, name) for (name, ilk) in list(all_completions.items())]
Return all available element names beginning with expr
Data/Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
_completion_names_from_scope
Maxize/Sublime_Text_3_Config
2
python
def _completion_names_from_scope(self, expr, scoperef): self.log('_completion_names_from_scope:: %r, scoperef: %r', expr, scoperef) all_completions = {} keywords = self.langintel.langinfo.keywords for name in keywords: if ((not expr) or name.startswith(expr)): all_completions[name] = 'keyword' loopcount = (- 1) while (scoperef and (scoperef[0] is not None)): loopcount += 1 self.log('_completion_names_from_scope:: checking scoperef: %r', scoperef) elem = self._elem_from_scoperef(scoperef) if (elem is None): continue for name in elem.names: if (name and name.startswith(expr)): if (name not in all_completions): hit_elem = elem.names[name] if (loopcount and ('__local__' in hit_elem.get('attributes', ).split())): continue all_completions[name] = (hit_elem.get('ilk') or hit_elem.tag) scoperef = self.parent_scoperef_from_scoperef(scoperef) cplns = self.stdlib.toplevel_cplns(prefix=expr) for (ilk, name) in cplns: if (name not in all_completions): all_completions[name] = ilk for lib in self.libs: self.log('_completion_names_from_scope:: include everything from lib: %r', lib) cplns = lib.toplevel_cplns(prefix=expr) for (ilk, name) in cplns: if (name not in all_completions): all_completions[name] = ilk return [(ilk, name) for (name, ilk) in list(all_completions.items())]
def _completion_names_from_scope(self, expr, scoperef): self.log('_completion_names_from_scope:: %r, scoperef: %r', expr, scoperef) all_completions = {} keywords = self.langintel.langinfo.keywords for name in keywords: if ((not expr) or name.startswith(expr)): all_completions[name] = 'keyword' loopcount = (- 1) while (scoperef and (scoperef[0] is not None)): loopcount += 1 self.log('_completion_names_from_scope:: checking scoperef: %r', scoperef) elem = self._elem_from_scoperef(scoperef) if (elem is None): continue for name in elem.names: if (name and name.startswith(expr)): if (name not in all_completions): hit_elem = elem.names[name] if (loopcount and ('__local__' in hit_elem.get('attributes', ).split())): continue all_completions[name] = (hit_elem.get('ilk') or hit_elem.tag) scoperef = self.parent_scoperef_from_scoperef(scoperef) cplns = self.stdlib.toplevel_cplns(prefix=expr) for (ilk, name) in cplns: if (name not in all_completions): all_completions[name] = ilk for lib in self.libs: self.log('_completion_names_from_scope:: include everything from lib: %r', lib) cplns = lib.toplevel_cplns(prefix=expr) for (ilk, name) in cplns: if (name not in all_completions): all_completions[name] = ilk return [(ilk, name) for (name, ilk) in list(all_completions.items())]<|docstring|>Return all available element names beginning with expr<|endoftext|>
04b10cac92380f4dd358481a773f9d8b8807c32f0afe16c0b3f147e4cae6ac45
def configure_logging(verbosity: int=2) -> None: '\n Configure the root logger and set the verbosity level on a per-module\n basis as desired.\n\n Args:\n verbosity: One of the following options:\n\n * 0: silent (only show CRITICAL)\n * 1: quiet (show proboards_scraper ERROR, imported module ERROR)\n * 2: normal (show proboards_scraper INFO, imported module ERROR)\n * 3: verbose (show proboards_scraper DEBUG, imported module ERROR)\n * 4: vverbose (show proboards_scraper DEBUG, imported module INFO)\n * 5: vvverbose (show proboards_scraper DEBUG, imported module DEBUG)\n\n ' datetime_fmt = '%Y-%m-%d-%H-%M-%S' current_dt = datetime.now().strftime(datetime_fmt) filename = f'{current_dt}.log' scraper_log_level = logging.CRITICAL import_log_level = logging.CRITICAL if (verbosity >= 1): scraper_log_level = logging.ERROR import_log_level = logging.ERROR if (verbosity >= 3): scraper_log_level = logging.DEBUG elif (verbosity >= 2): scraper_log_level = logging.INFO if (verbosity == 5): import_log_level = logging.DEBUG elif (verbosity >= 4): import_log_level = logging.INFO logging.basicConfig(datefmt='%H:%M:%S', format='[%(asctime)s][%(levelname)s][%(name)s] %(message)s', level=scraper_log_level, handlers=[logging.FileHandler(filename), logging.StreamHandler()]) for module in ['asyncio', 'selenium', 'urllib3']: module_logger = logging.getLogger(module) module_logger.setLevel(import_log_level)
Configure the root logger and set the verbosity level on a per-module basis as desired. Args: verbosity: One of the following options: * 0: silent (only show CRITICAL) * 1: quiet (show proboards_scraper ERROR, imported module ERROR) * 2: normal (show proboards_scraper INFO, imported module ERROR) * 3: verbose (show proboards_scraper DEBUG, imported module ERROR) * 4: vverbose (show proboards_scraper DEBUG, imported module INFO) * 5: vvverbose (show proboards_scraper DEBUG, imported module DEBUG)
proboards_scraper/__main__.py
configure_logging
ScottMastro/proboards-scraper
2
python
def configure_logging(verbosity: int=2) -> None: '\n Configure the root logger and set the verbosity level on a per-module\n basis as desired.\n\n Args:\n verbosity: One of the following options:\n\n * 0: silent (only show CRITICAL)\n * 1: quiet (show proboards_scraper ERROR, imported module ERROR)\n * 2: normal (show proboards_scraper INFO, imported module ERROR)\n * 3: verbose (show proboards_scraper DEBUG, imported module ERROR)\n * 4: vverbose (show proboards_scraper DEBUG, imported module INFO)\n * 5: vvverbose (show proboards_scraper DEBUG, imported module DEBUG)\n\n ' datetime_fmt = '%Y-%m-%d-%H-%M-%S' current_dt = datetime.now().strftime(datetime_fmt) filename = f'{current_dt}.log' scraper_log_level = logging.CRITICAL import_log_level = logging.CRITICAL if (verbosity >= 1): scraper_log_level = logging.ERROR import_log_level = logging.ERROR if (verbosity >= 3): scraper_log_level = logging.DEBUG elif (verbosity >= 2): scraper_log_level = logging.INFO if (verbosity == 5): import_log_level = logging.DEBUG elif (verbosity >= 4): import_log_level = logging.INFO logging.basicConfig(datefmt='%H:%M:%S', format='[%(asctime)s][%(levelname)s][%(name)s] %(message)s', level=scraper_log_level, handlers=[logging.FileHandler(filename), logging.StreamHandler()]) for module in ['asyncio', 'selenium', 'urllib3']: module_logger = logging.getLogger(module) module_logger.setLevel(import_log_level)
def configure_logging(verbosity: int=2) -> None: '\n Configure the root logger and set the verbosity level on a per-module\n basis as desired.\n\n Args:\n verbosity: One of the following options:\n\n * 0: silent (only show CRITICAL)\n * 1: quiet (show proboards_scraper ERROR, imported module ERROR)\n * 2: normal (show proboards_scraper INFO, imported module ERROR)\n * 3: verbose (show proboards_scraper DEBUG, imported module ERROR)\n * 4: vverbose (show proboards_scraper DEBUG, imported module INFO)\n * 5: vvverbose (show proboards_scraper DEBUG, imported module DEBUG)\n\n ' datetime_fmt = '%Y-%m-%d-%H-%M-%S' current_dt = datetime.now().strftime(datetime_fmt) filename = f'{current_dt}.log' scraper_log_level = logging.CRITICAL import_log_level = logging.CRITICAL if (verbosity >= 1): scraper_log_level = logging.ERROR import_log_level = logging.ERROR if (verbosity >= 3): scraper_log_level = logging.DEBUG elif (verbosity >= 2): scraper_log_level = logging.INFO if (verbosity == 5): import_log_level = logging.DEBUG elif (verbosity >= 4): import_log_level = logging.INFO logging.basicConfig(datefmt='%H:%M:%S', format='[%(asctime)s][%(levelname)s][%(name)s] %(message)s', level=scraper_log_level, handlers=[logging.FileHandler(filename), logging.StreamHandler()]) for module in ['asyncio', 'selenium', 'urllib3']: module_logger = logging.getLogger(module) module_logger.setLevel(import_log_level)<|docstring|>Configure the root logger and set the verbosity level on a per-module basis as desired. Args: verbosity: One of the following options: * 0: silent (only show CRITICAL) * 1: quiet (show proboards_scraper ERROR, imported module ERROR) * 2: normal (show proboards_scraper INFO, imported module ERROR) * 3: verbose (show proboards_scraper DEBUG, imported module ERROR) * 4: vverbose (show proboards_scraper DEBUG, imported module INFO) * 5: vvverbose (show proboards_scraper DEBUG, imported module DEBUG)<|endoftext|>
03a2b77e6a97736a4bf34d214dbbceb3f815f5e37871cbe592b85d7581603c74
def pbs_cli(): '\n Entrypoint for the main ``pbs`` (proboards scraper) tool.\n ' parser = argparse.ArgumentParser() parser.add_argument('url', type=str, help='URL for either the main page, a board, a thread, or a user') login_group = parser.add_argument_group('Login arguments') login_group.add_argument('-u', '--username', type=str, help='Login username') login_group.add_argument('-p', '--password', type=str, help='Login password') parser.add_argument('-o', '--output', type=pathlib.Path, default='site', metavar='<path>', help='Path to output directory containing database and site files (default ./site)') parser.add_argument('-D', '--no-delay', action='store_true', help='Do not rate limit requests') parser.add_argument('-U', '--no-users', action='store_true', dest='skip_users', help='Do not grab user profiles (only use this option if a database exists and users have already been added to it)') parser.add_argument('-v', '--verbosity', type=int, choices=[0, 1, 2, 3, 4, 5], default=2, help='Verbosity level from 0 (silent) to 5 (full debug); default 2') args = parser.parse_args() if ((args.username and (not args.password)) or (args.password and (not args.username))): print('If providing login credentials, both username and password are required') exit(1) configure_logging(args.verbosity) proboards_scraper.run_scraper(args.url, dst_dir=args.output, username=args.username, password=args.password, skip_users=args.skip_users, no_delay=args.no_delay)
Entrypoint for the main ``pbs`` (proboards scraper) tool.
proboards_scraper/__main__.py
pbs_cli
ScottMastro/proboards-scraper
2
python
def pbs_cli(): '\n \n ' parser = argparse.ArgumentParser() parser.add_argument('url', type=str, help='URL for either the main page, a board, a thread, or a user') login_group = parser.add_argument_group('Login arguments') login_group.add_argument('-u', '--username', type=str, help='Login username') login_group.add_argument('-p', '--password', type=str, help='Login password') parser.add_argument('-o', '--output', type=pathlib.Path, default='site', metavar='<path>', help='Path to output directory containing database and site files (default ./site)') parser.add_argument('-D', '--no-delay', action='store_true', help='Do not rate limit requests') parser.add_argument('-U', '--no-users', action='store_true', dest='skip_users', help='Do not grab user profiles (only use this option if a database exists and users have already been added to it)') parser.add_argument('-v', '--verbosity', type=int, choices=[0, 1, 2, 3, 4, 5], default=2, help='Verbosity level from 0 (silent) to 5 (full debug); default 2') args = parser.parse_args() if ((args.username and (not args.password)) or (args.password and (not args.username))): print('If providing login credentials, both username and password are required') exit(1) configure_logging(args.verbosity) proboards_scraper.run_scraper(args.url, dst_dir=args.output, username=args.username, password=args.password, skip_users=args.skip_users, no_delay=args.no_delay)
def pbs_cli(): '\n \n ' parser = argparse.ArgumentParser() parser.add_argument('url', type=str, help='URL for either the main page, a board, a thread, or a user') login_group = parser.add_argument_group('Login arguments') login_group.add_argument('-u', '--username', type=str, help='Login username') login_group.add_argument('-p', '--password', type=str, help='Login password') parser.add_argument('-o', '--output', type=pathlib.Path, default='site', metavar='<path>', help='Path to output directory containing database and site files (default ./site)') parser.add_argument('-D', '--no-delay', action='store_true', help='Do not rate limit requests') parser.add_argument('-U', '--no-users', action='store_true', dest='skip_users', help='Do not grab user profiles (only use this option if a database exists and users have already been added to it)') parser.add_argument('-v', '--verbosity', type=int, choices=[0, 1, 2, 3, 4, 5], default=2, help='Verbosity level from 0 (silent) to 5 (full debug); default 2') args = parser.parse_args() if ((args.username and (not args.password)) or (args.password and (not args.username))): print('If providing login credentials, both username and password are required') exit(1) configure_logging(args.verbosity) proboards_scraper.run_scraper(args.url, dst_dir=args.output, username=args.username, password=args.password, skip_users=args.skip_users, no_delay=args.no_delay)<|docstring|>Entrypoint for the main ``pbs`` (proboards scraper) tool.<|endoftext|>
d2c0c09f133d67171caadff0eba58be8ea527cbfa14104bb78d04751e18f37cf
def pbd_cli(): '\n Entrypoint for ``pbd`` (proboards scraper database) database query tool.\n ' parser = argparse.ArgumentParser() parser.add_argument('-d', '--database', type=str, default='site/forum.db', metavar='<path>', help='Path to database file; default ./site/forum.db') actions = parser.add_mutually_exclusive_group(required=True) actions.add_argument('--board', '-b', nargs='?', type=int, default=0, const=None, metavar='board_id', help='Board id; if omitted, list all boards') actions.add_argument('--user', '-u', nargs='?', type=int, default=0, const=None, metavar='user_id', help='User id; if omitted, list all users') actions.add_argument('--thread', '-t', nargs='?', type=int, default=0, const=None, metavar='thread_id', help='Thread id; if omitted, list all threads') args = vars(parser.parse_args()) db = proboards_scraper.database.Database(args['database']) action = None value = None for _action in ('board', 'user', 'thread'): if (args[_action] != 0): action = _action value = args[_action] if (action == 'user'): result = db.query_users(user_id=value) if isinstance(result, list): users = [] for user in result: users.append((user['id'], user['name'])) users.sort(key=(lambda tup: tup[0])) for user in users: user_id = user[0] user_name = user[1] print(f'{user_id}: {user_name}') else: user = result pprint(user) elif (action == 'board'): result = db.query_boards(board_id=value) if isinstance(result, list): boards = [] for board in result: boards.append((board['id'], board['name'])) boards.sort(key=(lambda tup: tup[0])) for board in boards: board_id = board[0] board_name = board[1] print(f'{board_id}: {board_name}') else: board = result if ('moderators' in board): mods = [user['name'] for user in board['moderators']] board['moderators'] = mods if ('sub_boards' in board): sub = [sub['id'] for sub in board['sub_boards']] board['sub_boards'] = sub threads = [] for thread in board['threads']: last_post = max((post['date'] for post in thread['posts'])) threads.append({'thread_id': thread['id'], 'title': thread['title'], 'num_posts': len(thread['posts']), 'last_post': last_post}) threads.sort(key=(lambda t: t['last_post']), reverse=True) for thread in threads: del thread['last_post'] board['threads'] = threads board['num_threads'] = len(threads) board['posts'] = sum((t['num_posts'] for t in threads)) pprint(board) elif (action == 'thread'): result = db.query_threads(thread_id=value) thread = result if ((thread is not None) and ('poll' in thread)): poll_options = [{'name': opt['name'], 'votes': opt['votes']} for opt in thread['poll']['options']] thread['poll']['options'] = poll_options poll_voters = [user['name'] for user in thread['poll']['voters']] thread['poll']['voters'] = poll_voters pprint(thread) else: raise ValueError('Invalid action')
Entrypoint for ``pbd`` (proboards scraper database) database query tool.
proboards_scraper/__main__.py
pbd_cli
ScottMastro/proboards-scraper
2
python
def pbd_cli(): '\n \n ' parser = argparse.ArgumentParser() parser.add_argument('-d', '--database', type=str, default='site/forum.db', metavar='<path>', help='Path to database file; default ./site/forum.db') actions = parser.add_mutually_exclusive_group(required=True) actions.add_argument('--board', '-b', nargs='?', type=int, default=0, const=None, metavar='board_id', help='Board id; if omitted, list all boards') actions.add_argument('--user', '-u', nargs='?', type=int, default=0, const=None, metavar='user_id', help='User id; if omitted, list all users') actions.add_argument('--thread', '-t', nargs='?', type=int, default=0, const=None, metavar='thread_id', help='Thread id; if omitted, list all threads') args = vars(parser.parse_args()) db = proboards_scraper.database.Database(args['database']) action = None value = None for _action in ('board', 'user', 'thread'): if (args[_action] != 0): action = _action value = args[_action] if (action == 'user'): result = db.query_users(user_id=value) if isinstance(result, list): users = [] for user in result: users.append((user['id'], user['name'])) users.sort(key=(lambda tup: tup[0])) for user in users: user_id = user[0] user_name = user[1] print(f'{user_id}: {user_name}') else: user = result pprint(user) elif (action == 'board'): result = db.query_boards(board_id=value) if isinstance(result, list): boards = [] for board in result: boards.append((board['id'], board['name'])) boards.sort(key=(lambda tup: tup[0])) for board in boards: board_id = board[0] board_name = board[1] print(f'{board_id}: {board_name}') else: board = result if ('moderators' in board): mods = [user['name'] for user in board['moderators']] board['moderators'] = mods if ('sub_boards' in board): sub = [sub['id'] for sub in board['sub_boards']] board['sub_boards'] = sub threads = [] for thread in board['threads']: last_post = max((post['date'] for post in thread['posts'])) threads.append({'thread_id': thread['id'], 'title': thread['title'], 'num_posts': len(thread['posts']), 'last_post': last_post}) threads.sort(key=(lambda t: t['last_post']), reverse=True) for thread in threads: del thread['last_post'] board['threads'] = threads board['num_threads'] = len(threads) board['posts'] = sum((t['num_posts'] for t in threads)) pprint(board) elif (action == 'thread'): result = db.query_threads(thread_id=value) thread = result if ((thread is not None) and ('poll' in thread)): poll_options = [{'name': opt['name'], 'votes': opt['votes']} for opt in thread['poll']['options']] thread['poll']['options'] = poll_options poll_voters = [user['name'] for user in thread['poll']['voters']] thread['poll']['voters'] = poll_voters pprint(thread) else: raise ValueError('Invalid action')
def pbd_cli(): '\n \n ' parser = argparse.ArgumentParser() parser.add_argument('-d', '--database', type=str, default='site/forum.db', metavar='<path>', help='Path to database file; default ./site/forum.db') actions = parser.add_mutually_exclusive_group(required=True) actions.add_argument('--board', '-b', nargs='?', type=int, default=0, const=None, metavar='board_id', help='Board id; if omitted, list all boards') actions.add_argument('--user', '-u', nargs='?', type=int, default=0, const=None, metavar='user_id', help='User id; if omitted, list all users') actions.add_argument('--thread', '-t', nargs='?', type=int, default=0, const=None, metavar='thread_id', help='Thread id; if omitted, list all threads') args = vars(parser.parse_args()) db = proboards_scraper.database.Database(args['database']) action = None value = None for _action in ('board', 'user', 'thread'): if (args[_action] != 0): action = _action value = args[_action] if (action == 'user'): result = db.query_users(user_id=value) if isinstance(result, list): users = [] for user in result: users.append((user['id'], user['name'])) users.sort(key=(lambda tup: tup[0])) for user in users: user_id = user[0] user_name = user[1] print(f'{user_id}: {user_name}') else: user = result pprint(user) elif (action == 'board'): result = db.query_boards(board_id=value) if isinstance(result, list): boards = [] for board in result: boards.append((board['id'], board['name'])) boards.sort(key=(lambda tup: tup[0])) for board in boards: board_id = board[0] board_name = board[1] print(f'{board_id}: {board_name}') else: board = result if ('moderators' in board): mods = [user['name'] for user in board['moderators']] board['moderators'] = mods if ('sub_boards' in board): sub = [sub['id'] for sub in board['sub_boards']] board['sub_boards'] = sub threads = [] for thread in board['threads']: last_post = max((post['date'] for post in thread['posts'])) threads.append({'thread_id': thread['id'], 'title': thread['title'], 'num_posts': len(thread['posts']), 'last_post': last_post}) threads.sort(key=(lambda t: t['last_post']), reverse=True) for thread in threads: del thread['last_post'] board['threads'] = threads board['num_threads'] = len(threads) board['posts'] = sum((t['num_posts'] for t in threads)) pprint(board) elif (action == 'thread'): result = db.query_threads(thread_id=value) thread = result if ((thread is not None) and ('poll' in thread)): poll_options = [{'name': opt['name'], 'votes': opt['votes']} for opt in thread['poll']['options']] thread['poll']['options'] = poll_options poll_voters = [user['name'] for user in thread['poll']['voters']] thread['poll']['voters'] = poll_voters pprint(thread) else: raise ValueError('Invalid action')<|docstring|>Entrypoint for ``pbd`` (proboards scraper database) database query tool.<|endoftext|>
1bd47d8a07287f59ac67798f99fc8240ba037cd140481e962360f4734b609801
def _assign_picking(self): ' Create picking seperatly for each move ' Picking = self.env['stock.picking'] dynamic_carrier = self.env['delivery.carrier'].search([('is_sol_carrier', '=', True)], limit=1) for move in self: if (move.sale_line_id.order_id.carrier_id.id == dynamic_carrier.id): values = move._get_new_picking_values() values.update({'carrier_id': move.sale_line_id.delivery_carrier_id.id}) picking = Picking.create(values) move.write({'picking_id': picking.id}) else: super(StockMove, move)._assign_picking() return True
Create picking seperatly for each move
addons/shipping_per_product/models/inherit_delivery.py
_assign_picking
marionumza/vocal_v12
0
python
def _assign_picking(self): ' ' Picking = self.env['stock.picking'] dynamic_carrier = self.env['delivery.carrier'].search([('is_sol_carrier', '=', True)], limit=1) for move in self: if (move.sale_line_id.order_id.carrier_id.id == dynamic_carrier.id): values = move._get_new_picking_values() values.update({'carrier_id': move.sale_line_id.delivery_carrier_id.id}) picking = Picking.create(values) move.write({'picking_id': picking.id}) else: super(StockMove, move)._assign_picking() return True
def _assign_picking(self): ' ' Picking = self.env['stock.picking'] dynamic_carrier = self.env['delivery.carrier'].search([('is_sol_carrier', '=', True)], limit=1) for move in self: if (move.sale_line_id.order_id.carrier_id.id == dynamic_carrier.id): values = move._get_new_picking_values() values.update({'carrier_id': move.sale_line_id.delivery_carrier_id.id}) picking = Picking.create(values) move.write({'picking_id': picking.id}) else: super(StockMove, move)._assign_picking() return True<|docstring|>Create picking seperatly for each move<|endoftext|>
6fb0891ee15e48f1658dc2aae56335d1f4263d3993422a2f753b923135399030
def build_url(*args, **kwargs): 'Builds a url just like reverse, but adds get parameters to it' get_params = kwargs.pop('get', {}) get_params = {key: value for (key, value) in get_params.items() if (value or (value == 0))} url = reverse(*args, **kwargs) return ('{url}?{params}'.format(url=url, params=urlencode(get_params)) if get_params else url)
Builds a url just like reverse, but adds get parameters to it
src/tools/build_url.py
build_url
adrienlina/dnd-spellbook
0
python
def build_url(*args, **kwargs): get_params = kwargs.pop('get', {}) get_params = {key: value for (key, value) in get_params.items() if (value or (value == 0))} url = reverse(*args, **kwargs) return ('{url}?{params}'.format(url=url, params=urlencode(get_params)) if get_params else url)
def build_url(*args, **kwargs): get_params = kwargs.pop('get', {}) get_params = {key: value for (key, value) in get_params.items() if (value or (value == 0))} url = reverse(*args, **kwargs) return ('{url}?{params}'.format(url=url, params=urlencode(get_params)) if get_params else url)<|docstring|>Builds a url just like reverse, but adds get parameters to it<|endoftext|>
bd0a558c8e130d71a24d5cf48378a033f953a30736f22883a4b6c0f0e880f232
def ee_initialize(token_name='EARTHENGINE_TOKEN'): 'Authenticates Earth Engine and initialize an Earth Engine session\n\n ' if (ee.data._credentials is None): try: ee_token = os.environ.get(token_name) if (ee_token is not None): credential_file_path = os.path.expanduser('~/.config/earthengine/') if (not os.path.exists(credential_file_path)): credential = ('{"refresh_token":"%s"}' % ee_token) os.makedirs(credential_file_path, exist_ok=True) with open((credential_file_path + 'credentials'), 'w') as file: file.write(credential) elif in_colab_shell(): if (credentials_in_drive() and (not credentials_in_colab())): copy_credentials_to_colab() elif (not credentials_in_colab): ee.Authenticate() if (is_drive_mounted() and (not credentials_in_drive())): copy_credentials_to_drive() elif is_drive_mounted(): copy_credentials_to_drive() ee.Initialize() except: ee.Authenticate() ee.Initialize()
Authenticates Earth Engine and initialize an Earth Engine session
geemap/common.py
ee_initialize
arheem/geemap
1
python
def ee_initialize(token_name='EARTHENGINE_TOKEN'): '\n\n ' if (ee.data._credentials is None): try: ee_token = os.environ.get(token_name) if (ee_token is not None): credential_file_path = os.path.expanduser('~/.config/earthengine/') if (not os.path.exists(credential_file_path)): credential = ('{"refresh_token":"%s"}' % ee_token) os.makedirs(credential_file_path, exist_ok=True) with open((credential_file_path + 'credentials'), 'w') as file: file.write(credential) elif in_colab_shell(): if (credentials_in_drive() and (not credentials_in_colab())): copy_credentials_to_colab() elif (not credentials_in_colab): ee.Authenticate() if (is_drive_mounted() and (not credentials_in_drive())): copy_credentials_to_drive() elif is_drive_mounted(): copy_credentials_to_drive() ee.Initialize() except: ee.Authenticate() ee.Initialize()
def ee_initialize(token_name='EARTHENGINE_TOKEN'): '\n\n ' if (ee.data._credentials is None): try: ee_token = os.environ.get(token_name) if (ee_token is not None): credential_file_path = os.path.expanduser('~/.config/earthengine/') if (not os.path.exists(credential_file_path)): credential = ('{"refresh_token":"%s"}' % ee_token) os.makedirs(credential_file_path, exist_ok=True) with open((credential_file_path + 'credentials'), 'w') as file: file.write(credential) elif in_colab_shell(): if (credentials_in_drive() and (not credentials_in_colab())): copy_credentials_to_colab() elif (not credentials_in_colab): ee.Authenticate() if (is_drive_mounted() and (not credentials_in_drive())): copy_credentials_to_drive() elif is_drive_mounted(): copy_credentials_to_drive() ee.Initialize() except: ee.Authenticate() ee.Initialize()<|docstring|>Authenticates Earth Engine and initialize an Earth Engine session<|endoftext|>
5ae867587e0880a16385d5d4e9400788a60692636cbbf57f8e373f4483fee166
def set_proxy(port=1080, ip='http://127.0.0.1'): "Sets proxy if needed. This is only needed for countries where Google services are not available.\n\n Args:\n port (int, optional): The proxy port number. Defaults to 1080.\n ip (str, optional): The IP address. Defaults to 'http://127.0.0.1'.\n " import os import requests try: if (not ip.startswith('http')): ip = ('http://' + ip) proxy = '{}:{}'.format(ip, port) os.environ['HTTP_PROXY'] = proxy os.environ['HTTPS_PROXY'] = proxy a = requests.get('https://earthengine.google.com/') if (a.status_code != 200): print('Failed to connect to Earth Engine. Please double check the port number and ip address.') except Exception as e: print(e)
Sets proxy if needed. This is only needed for countries where Google services are not available. Args: port (int, optional): The proxy port number. Defaults to 1080. ip (str, optional): The IP address. Defaults to 'http://127.0.0.1'.
geemap/common.py
set_proxy
arheem/geemap
1
python
def set_proxy(port=1080, ip='http://127.0.0.1'): "Sets proxy if needed. This is only needed for countries where Google services are not available.\n\n Args:\n port (int, optional): The proxy port number. Defaults to 1080.\n ip (str, optional): The IP address. Defaults to 'http://127.0.0.1'.\n " import os import requests try: if (not ip.startswith('http')): ip = ('http://' + ip) proxy = '{}:{}'.format(ip, port) os.environ['HTTP_PROXY'] = proxy os.environ['HTTPS_PROXY'] = proxy a = requests.get('https://earthengine.google.com/') if (a.status_code != 200): print('Failed to connect to Earth Engine. Please double check the port number and ip address.') except Exception as e: print(e)
def set_proxy(port=1080, ip='http://127.0.0.1'): "Sets proxy if needed. This is only needed for countries where Google services are not available.\n\n Args:\n port (int, optional): The proxy port number. Defaults to 1080.\n ip (str, optional): The IP address. Defaults to 'http://127.0.0.1'.\n " import os import requests try: if (not ip.startswith('http')): ip = ('http://' + ip) proxy = '{}:{}'.format(ip, port) os.environ['HTTP_PROXY'] = proxy os.environ['HTTPS_PROXY'] = proxy a = requests.get('https://earthengine.google.com/') if (a.status_code != 200): print('Failed to connect to Earth Engine. Please double check the port number and ip address.') except Exception as e: print(e)<|docstring|>Sets proxy if needed. This is only needed for countries where Google services are not available. Args: port (int, optional): The proxy port number. Defaults to 1080. ip (str, optional): The IP address. Defaults to 'http://127.0.0.1'.<|endoftext|>
2f8fb87e71ad373f449ad0c2ab6d33bf553bebd8840cd1cba55cc5a6cf463634
def in_colab_shell(): 'Tests if the code is being executed within Google Colab.' try: import google.colab return True except ImportError: return False
Tests if the code is being executed within Google Colab.
geemap/common.py
in_colab_shell
arheem/geemap
1
python
def in_colab_shell(): try: import google.colab return True except ImportError: return False
def in_colab_shell(): try: import google.colab return True except ImportError: return False<|docstring|>Tests if the code is being executed within Google Colab.<|endoftext|>
df112f45c1d6b8c56d41a10dbc81c8dc99b09fec6c345e6578bee92ea009f41a
def is_drive_mounted(): 'Checks whether Google Drive is mounted in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' drive_path = '/content/drive/My Drive' if os.path.exists(drive_path): return True else: return False
Checks whether Google Drive is mounted in Google Colab. Returns: bool: Returns True if Google Drive is mounted, False otherwise.
geemap/common.py
is_drive_mounted
arheem/geemap
1
python
def is_drive_mounted(): 'Checks whether Google Drive is mounted in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' drive_path = '/content/drive/My Drive' if os.path.exists(drive_path): return True else: return False
def is_drive_mounted(): 'Checks whether Google Drive is mounted in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' drive_path = '/content/drive/My Drive' if os.path.exists(drive_path): return True else: return False<|docstring|>Checks whether Google Drive is mounted in Google Colab. Returns: bool: Returns True if Google Drive is mounted, False otherwise.<|endoftext|>
a50f21134c334693aff0833937016e6eb825daec27a9076ad91444ba94510462
def credentials_in_drive(): 'Checks if the ee credentials file exists in Google Drive.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' credentials_path = '/content/drive/My Drive/.config/earthengine/credentials' if os.path.exists(credentials_path): return True else: return False
Checks if the ee credentials file exists in Google Drive. Returns: bool: Returns True if Google Drive is mounted, False otherwise.
geemap/common.py
credentials_in_drive
arheem/geemap
1
python
def credentials_in_drive(): 'Checks if the ee credentials file exists in Google Drive.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' credentials_path = '/content/drive/My Drive/.config/earthengine/credentials' if os.path.exists(credentials_path): return True else: return False
def credentials_in_drive(): 'Checks if the ee credentials file exists in Google Drive.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' credentials_path = '/content/drive/My Drive/.config/earthengine/credentials' if os.path.exists(credentials_path): return True else: return False<|docstring|>Checks if the ee credentials file exists in Google Drive. Returns: bool: Returns True if Google Drive is mounted, False otherwise.<|endoftext|>
ee88f9e2c2eb12bbc8687080779c32b2ea9f3580113058b722bdd096cd9ad003
def credentials_in_colab(): 'Checks if the ee credentials file exists in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' credentials_path = '/root/.config/earthengine/credentials' if os.path.exists(credentials_path): return True else: return False
Checks if the ee credentials file exists in Google Colab. Returns: bool: Returns True if Google Drive is mounted, False otherwise.
geemap/common.py
credentials_in_colab
arheem/geemap
1
python
def credentials_in_colab(): 'Checks if the ee credentials file exists in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' credentials_path = '/root/.config/earthengine/credentials' if os.path.exists(credentials_path): return True else: return False
def credentials_in_colab(): 'Checks if the ee credentials file exists in Google Colab.\n\n Returns:\n bool: Returns True if Google Drive is mounted, False otherwise.\n ' credentials_path = '/root/.config/earthengine/credentials' if os.path.exists(credentials_path): return True else: return False<|docstring|>Checks if the ee credentials file exists in Google Colab. Returns: bool: Returns True if Google Drive is mounted, False otherwise.<|endoftext|>
b8a71028c53fa7cbf60fb1f705b467c32c030133c1c3592335d749255ca5bc5d
def copy_credentials_to_drive(): 'Copies ee credentials from Google Colab to Google Drive.\n ' src = '/root/.config/earthengine/credentials' dst = '/content/drive/My Drive/.config/earthengine/credentials' wd = os.path.dirname(dst) if (not os.path.exists(wd)): os.makedirs(wd) shutil.copyfile(src, dst)
Copies ee credentials from Google Colab to Google Drive.
geemap/common.py
copy_credentials_to_drive
arheem/geemap
1
python
def copy_credentials_to_drive(): '\n ' src = '/root/.config/earthengine/credentials' dst = '/content/drive/My Drive/.config/earthengine/credentials' wd = os.path.dirname(dst) if (not os.path.exists(wd)): os.makedirs(wd) shutil.copyfile(src, dst)
def copy_credentials_to_drive(): '\n ' src = '/root/.config/earthengine/credentials' dst = '/content/drive/My Drive/.config/earthengine/credentials' wd = os.path.dirname(dst) if (not os.path.exists(wd)): os.makedirs(wd) shutil.copyfile(src, dst)<|docstring|>Copies ee credentials from Google Colab to Google Drive.<|endoftext|>
d5fae9543e390fc0e44df5ed038dd9d9bfe5395ff794836297dd27c88b810564
def copy_credentials_to_colab(): 'Copies ee credentials from Google Drive to Google Colab.\n ' src = '/content/drive/My Drive/.config/earthengine/credentials' dst = '/root/.config/earthengine/credentials' wd = os.path.dirname(dst) if (not os.path.exists(wd)): os.makedirs(wd) shutil.copyfile(src, dst)
Copies ee credentials from Google Drive to Google Colab.
geemap/common.py
copy_credentials_to_colab
arheem/geemap
1
python
def copy_credentials_to_colab(): '\n ' src = '/content/drive/My Drive/.config/earthengine/credentials' dst = '/root/.config/earthengine/credentials' wd = os.path.dirname(dst) if (not os.path.exists(wd)): os.makedirs(wd) shutil.copyfile(src, dst)
def copy_credentials_to_colab(): '\n ' src = '/content/drive/My Drive/.config/earthengine/credentials' dst = '/root/.config/earthengine/credentials' wd = os.path.dirname(dst) if (not os.path.exists(wd)): os.makedirs(wd) shutil.copyfile(src, dst)<|docstring|>Copies ee credentials from Google Drive to Google Colab.<|endoftext|>
50122032d5da42dc9999e5cc7a7efa5bc29cf402cb5f22346ebc9dd55e5b91af
def check_install(package): 'Checks whether a package is installed. If not, it will install the package.\n\n Args:\n package (str): The name of the package to check.\n ' import subprocess try: __import__(package) except ImportError: print('{} is not installed. Installing ...'.format(package)) try: subprocess.check_call(['python', '-m', 'pip', 'install', package]) except Exception as e: print('Failed to install {}'.format(package)) print(e) print('{} has been installed successfully.'.format(package))
Checks whether a package is installed. If not, it will install the package. Args: package (str): The name of the package to check.
geemap/common.py
check_install
arheem/geemap
1
python
def check_install(package): 'Checks whether a package is installed. If not, it will install the package.\n\n Args:\n package (str): The name of the package to check.\n ' import subprocess try: __import__(package) except ImportError: print('{} is not installed. Installing ...'.format(package)) try: subprocess.check_call(['python', '-m', 'pip', 'install', package]) except Exception as e: print('Failed to install {}'.format(package)) print(e) print('{} has been installed successfully.'.format(package))
def check_install(package): 'Checks whether a package is installed. If not, it will install the package.\n\n Args:\n package (str): The name of the package to check.\n ' import subprocess try: __import__(package) except ImportError: print('{} is not installed. Installing ...'.format(package)) try: subprocess.check_call(['python', '-m', 'pip', 'install', package]) except Exception as e: print('Failed to install {}'.format(package)) print(e) print('{} has been installed successfully.'.format(package))<|docstring|>Checks whether a package is installed. If not, it will install the package. Args: package (str): The name of the package to check.<|endoftext|>
6643891683840bfc9c655c20597eddda5b77b91af0f212ca766d8c4d92db2258
def update_package(): "Updates the geemap package from the geemap GitHub repository without the need to use pip or conda.\n In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.\n\n " import shutil try: download_dir = os.path.join(os.path.expanduser('~'), 'Downloads') if (not os.path.exists(download_dir)): os.makedirs(download_dir) clone_repo(out_dir=download_dir) pkg_dir = os.path.join(download_dir, 'geemap-master') work_dir = os.getcwd() os.chdir(pkg_dir) if (shutil.which('pip') is None): cmd = 'pip3 install .' else: cmd = 'pip install .' os.system(cmd) os.chdir(work_dir) print("\nPlease comment out 'geemap.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output") except Exception as e: print(e)
Updates the geemap package from the geemap GitHub repository without the need to use pip or conda. In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.
geemap/common.py
update_package
arheem/geemap
1
python
def update_package(): "Updates the geemap package from the geemap GitHub repository without the need to use pip or conda.\n In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.\n\n " import shutil try: download_dir = os.path.join(os.path.expanduser('~'), 'Downloads') if (not os.path.exists(download_dir)): os.makedirs(download_dir) clone_repo(out_dir=download_dir) pkg_dir = os.path.join(download_dir, 'geemap-master') work_dir = os.getcwd() os.chdir(pkg_dir) if (shutil.which('pip') is None): cmd = 'pip3 install .' else: cmd = 'pip install .' os.system(cmd) os.chdir(work_dir) print("\nPlease comment out 'geemap.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output") except Exception as e: print(e)
def update_package(): "Updates the geemap package from the geemap GitHub repository without the need to use pip or conda.\n In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.\n\n " import shutil try: download_dir = os.path.join(os.path.expanduser('~'), 'Downloads') if (not os.path.exists(download_dir)): os.makedirs(download_dir) clone_repo(out_dir=download_dir) pkg_dir = os.path.join(download_dir, 'geemap-master') work_dir = os.getcwd() os.chdir(pkg_dir) if (shutil.which('pip') is None): cmd = 'pip3 install .' else: cmd = 'pip install .' os.system(cmd) os.chdir(work_dir) print("\nPlease comment out 'geemap.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output") except Exception as e: print(e)<|docstring|>Updates the geemap package from the geemap GitHub repository without the need to use pip or conda. In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.<|endoftext|>
8bf8869d68d123aee02d5de62f99bf18ab77d4648e608ab4b226002bb7575742
def clone_repo(out_dir='.', unzip=True): "Clones the geemap GitHub repository.\n\n Args:\n out_dir (str, optional): Output folder for the repo. Defaults to '.'.\n unzip (bool, optional): Whether to unzip the repository. Defaults to True.\n " url = 'https://github.com/giswqs/geemap/archive/master.zip' filename = 'geemap-master.zip' download_from_url(url, out_file_name=filename, out_dir=out_dir, unzip=unzip)
Clones the geemap GitHub repository. Args: out_dir (str, optional): Output folder for the repo. Defaults to '.'. unzip (bool, optional): Whether to unzip the repository. Defaults to True.
geemap/common.py
clone_repo
arheem/geemap
1
python
def clone_repo(out_dir='.', unzip=True): "Clones the geemap GitHub repository.\n\n Args:\n out_dir (str, optional): Output folder for the repo. Defaults to '.'.\n unzip (bool, optional): Whether to unzip the repository. Defaults to True.\n " url = 'https://github.com/giswqs/geemap/archive/master.zip' filename = 'geemap-master.zip' download_from_url(url, out_file_name=filename, out_dir=out_dir, unzip=unzip)
def clone_repo(out_dir='.', unzip=True): "Clones the geemap GitHub repository.\n\n Args:\n out_dir (str, optional): Output folder for the repo. Defaults to '.'.\n unzip (bool, optional): Whether to unzip the repository. Defaults to True.\n " url = 'https://github.com/giswqs/geemap/archive/master.zip' filename = 'geemap-master.zip' download_from_url(url, out_file_name=filename, out_dir=out_dir, unzip=unzip)<|docstring|>Clones the geemap GitHub repository. Args: out_dir (str, optional): Output folder for the repo. Defaults to '.'. unzip (bool, optional): Whether to unzip the repository. Defaults to True.<|endoftext|>
bf709a3ea0bd2d58badb62554675bba7fb0fc648b6d18f58fcee82d43502d3ca
def install_from_github(url): 'Install a package from a GitHub repository.\n\n Args:\n url (str): The URL of the GitHub repository.\n ' try: download_dir = os.path.join(os.path.expanduser('~'), 'Downloads') if (not os.path.exists(download_dir)): os.makedirs(download_dir) repo_name = os.path.basename(url) zip_url = os.path.join(url, 'archive/master.zip') filename = (repo_name + '-master.zip') download_from_url(url=zip_url, out_file_name=filename, out_dir=download_dir, unzip=True) pkg_dir = os.path.join(download_dir, (repo_name + '-master')) pkg_name = os.path.basename(url) work_dir = os.getcwd() os.chdir(pkg_dir) print('Installing {}...'.format(pkg_name)) cmd = 'pip install .' os.system(cmd) os.chdir(work_dir) print('{} has been installed successfully.'.format(pkg_name)) except Exception as e: print(e)
Install a package from a GitHub repository. Args: url (str): The URL of the GitHub repository.
geemap/common.py
install_from_github
arheem/geemap
1
python
def install_from_github(url): 'Install a package from a GitHub repository.\n\n Args:\n url (str): The URL of the GitHub repository.\n ' try: download_dir = os.path.join(os.path.expanduser('~'), 'Downloads') if (not os.path.exists(download_dir)): os.makedirs(download_dir) repo_name = os.path.basename(url) zip_url = os.path.join(url, 'archive/master.zip') filename = (repo_name + '-master.zip') download_from_url(url=zip_url, out_file_name=filename, out_dir=download_dir, unzip=True) pkg_dir = os.path.join(download_dir, (repo_name + '-master')) pkg_name = os.path.basename(url) work_dir = os.getcwd() os.chdir(pkg_dir) print('Installing {}...'.format(pkg_name)) cmd = 'pip install .' os.system(cmd) os.chdir(work_dir) print('{} has been installed successfully.'.format(pkg_name)) except Exception as e: print(e)
def install_from_github(url): 'Install a package from a GitHub repository.\n\n Args:\n url (str): The URL of the GitHub repository.\n ' try: download_dir = os.path.join(os.path.expanduser('~'), 'Downloads') if (not os.path.exists(download_dir)): os.makedirs(download_dir) repo_name = os.path.basename(url) zip_url = os.path.join(url, 'archive/master.zip') filename = (repo_name + '-master.zip') download_from_url(url=zip_url, out_file_name=filename, out_dir=download_dir, unzip=True) pkg_dir = os.path.join(download_dir, (repo_name + '-master')) pkg_name = os.path.basename(url) work_dir = os.getcwd() os.chdir(pkg_dir) print('Installing {}...'.format(pkg_name)) cmd = 'pip install .' os.system(cmd) os.chdir(work_dir) print('{} has been installed successfully.'.format(pkg_name)) except Exception as e: print(e)<|docstring|>Install a package from a GitHub repository. Args: url (str): The URL of the GitHub repository.<|endoftext|>
6bb215b1ca2798355d144715e0d14c868d6c9d7a59421d5bb775be5208980c02
def check_git_install(): 'Checks if Git is installed.\n\n Returns:\n bool: Returns True if Git is installed, otherwise returns False.\n ' import webbrowser cmd = 'git --version' output = os.popen(cmd).read() if ('git version' in output): return True else: url = 'https://git-scm.com/downloads' print('Git is not installed. Please download Git from {} and install it.'.format(url)) webbrowser.open_new_tab(url) return False
Checks if Git is installed. Returns: bool: Returns True if Git is installed, otherwise returns False.
geemap/common.py
check_git_install
arheem/geemap
1
python
def check_git_install(): 'Checks if Git is installed.\n\n Returns:\n bool: Returns True if Git is installed, otherwise returns False.\n ' import webbrowser cmd = 'git --version' output = os.popen(cmd).read() if ('git version' in output): return True else: url = 'https://git-scm.com/downloads' print('Git is not installed. Please download Git from {} and install it.'.format(url)) webbrowser.open_new_tab(url) return False
def check_git_install(): 'Checks if Git is installed.\n\n Returns:\n bool: Returns True if Git is installed, otherwise returns False.\n ' import webbrowser cmd = 'git --version' output = os.popen(cmd).read() if ('git version' in output): return True else: url = 'https://git-scm.com/downloads' print('Git is not installed. Please download Git from {} and install it.'.format(url)) webbrowser.open_new_tab(url) return False<|docstring|>Checks if Git is installed. Returns: bool: Returns True if Git is installed, otherwise returns False.<|endoftext|>
260503137acd242cb084614afc0a98c200a4a310177a8f01b4424de48f77c545
def clone_github_repo(url, out_dir): 'Clones a GitHub repository.\n\n Args:\n url (str): The link to the GitHub repository\n out_dir (str): The output directory for the cloned repository. \n ' import zipfile repo_name = os.path.basename(url) url_zip = (url + '/archive/master.zip') if os.path.exists(out_dir): print('The specified output directory already exists. Please choose a new directory.') return parent_dir = os.path.dirname(out_dir) out_file_path = os.path.join(parent_dir, (repo_name + '.zip')) try: urllib.request.urlretrieve(url_zip, out_file_path) except: print('The provided URL is invalid. Please double check the URL.') return with zipfile.ZipFile(out_file_path, 'r') as zip_ref: zip_ref.extractall(parent_dir) src = out_file_path.replace('.zip', '-master') os.rename(src, out_dir) os.remove(out_file_path)
Clones a GitHub repository. Args: url (str): The link to the GitHub repository out_dir (str): The output directory for the cloned repository.
geemap/common.py
clone_github_repo
arheem/geemap
1
python
def clone_github_repo(url, out_dir): 'Clones a GitHub repository.\n\n Args:\n url (str): The link to the GitHub repository\n out_dir (str): The output directory for the cloned repository. \n ' import zipfile repo_name = os.path.basename(url) url_zip = (url + '/archive/master.zip') if os.path.exists(out_dir): print('The specified output directory already exists. Please choose a new directory.') return parent_dir = os.path.dirname(out_dir) out_file_path = os.path.join(parent_dir, (repo_name + '.zip')) try: urllib.request.urlretrieve(url_zip, out_file_path) except: print('The provided URL is invalid. Please double check the URL.') return with zipfile.ZipFile(out_file_path, 'r') as zip_ref: zip_ref.extractall(parent_dir) src = out_file_path.replace('.zip', '-master') os.rename(src, out_dir) os.remove(out_file_path)
def clone_github_repo(url, out_dir): 'Clones a GitHub repository.\n\n Args:\n url (str): The link to the GitHub repository\n out_dir (str): The output directory for the cloned repository. \n ' import zipfile repo_name = os.path.basename(url) url_zip = (url + '/archive/master.zip') if os.path.exists(out_dir): print('The specified output directory already exists. Please choose a new directory.') return parent_dir = os.path.dirname(out_dir) out_file_path = os.path.join(parent_dir, (repo_name + '.zip')) try: urllib.request.urlretrieve(url_zip, out_file_path) except: print('The provided URL is invalid. Please double check the URL.') return with zipfile.ZipFile(out_file_path, 'r') as zip_ref: zip_ref.extractall(parent_dir) src = out_file_path.replace('.zip', '-master') os.rename(src, out_dir) os.remove(out_file_path)<|docstring|>Clones a GitHub repository. Args: url (str): The link to the GitHub repository out_dir (str): The output directory for the cloned repository.<|endoftext|>
6f3dacc41c93c0a98e1502f1c2c97c68339c99c6d7902c3f25d1ba557ec342a2
def clone_google_repo(url, out_dir=None): 'Clones an Earth Engine repository from https://earthengine.googlesource.com, such as https://earthengine.googlesource.com/users/google/datasets\n\n Args:\n url (str): The link to the Earth Engine repository\n out_dir (str, optional): The output directory for the cloned repository. Defaults to None.\n ' repo_name = os.path.basename(url) if (out_dir is None): out_dir = os.path.join(os.getcwd(), repo_name) if (not os.path.exists(os.path.dirname(out_dir))): os.makedirs(os.path.dirname(out_dir)) if os.path.exists(out_dir): print('The specified output directory already exists. Please choose a new directory.') return if check_git_install(): cmd = 'git clone "{}" "{}"'.format(url, out_dir) os.popen(cmd).read()
Clones an Earth Engine repository from https://earthengine.googlesource.com, such as https://earthengine.googlesource.com/users/google/datasets Args: url (str): The link to the Earth Engine repository out_dir (str, optional): The output directory for the cloned repository. Defaults to None.
geemap/common.py
clone_google_repo
arheem/geemap
1
python
def clone_google_repo(url, out_dir=None): 'Clones an Earth Engine repository from https://earthengine.googlesource.com, such as https://earthengine.googlesource.com/users/google/datasets\n\n Args:\n url (str): The link to the Earth Engine repository\n out_dir (str, optional): The output directory for the cloned repository. Defaults to None.\n ' repo_name = os.path.basename(url) if (out_dir is None): out_dir = os.path.join(os.getcwd(), repo_name) if (not os.path.exists(os.path.dirname(out_dir))): os.makedirs(os.path.dirname(out_dir)) if os.path.exists(out_dir): print('The specified output directory already exists. Please choose a new directory.') return if check_git_install(): cmd = 'git clone "{}" "{}"'.format(url, out_dir) os.popen(cmd).read()
def clone_google_repo(url, out_dir=None): 'Clones an Earth Engine repository from https://earthengine.googlesource.com, such as https://earthengine.googlesource.com/users/google/datasets\n\n Args:\n url (str): The link to the Earth Engine repository\n out_dir (str, optional): The output directory for the cloned repository. Defaults to None.\n ' repo_name = os.path.basename(url) if (out_dir is None): out_dir = os.path.join(os.getcwd(), repo_name) if (not os.path.exists(os.path.dirname(out_dir))): os.makedirs(os.path.dirname(out_dir)) if os.path.exists(out_dir): print('The specified output directory already exists. Please choose a new directory.') return if check_git_install(): cmd = 'git clone "{}" "{}"'.format(url, out_dir) os.popen(cmd).read()<|docstring|>Clones an Earth Engine repository from https://earthengine.googlesource.com, such as https://earthengine.googlesource.com/users/google/datasets Args: url (str): The link to the Earth Engine repository out_dir (str, optional): The output directory for the cloned repository. Defaults to None.<|endoftext|>
86ddfd8bb34a9bff585abc5fe26de03f3d448a29cda45cc82817b18bd1c70f79
def open_github(subdir=None): 'Opens the GitHub repository for this package.\n\n Args:\n subdir (str, optional): Sub-directory of the repository. Defaults to None.\n ' import webbrowser url = 'https://github.com/giswqs/geemap' if (subdir == 'source'): url += '/tree/master/geemap/' elif (subdir == 'examples'): url += '/tree/master/examples' elif (subdir == 'tutorials'): url += '/tree/master/tutorials' webbrowser.open_new_tab(url)
Opens the GitHub repository for this package. Args: subdir (str, optional): Sub-directory of the repository. Defaults to None.
geemap/common.py
open_github
arheem/geemap
1
python
def open_github(subdir=None): 'Opens the GitHub repository for this package.\n\n Args:\n subdir (str, optional): Sub-directory of the repository. Defaults to None.\n ' import webbrowser url = 'https://github.com/giswqs/geemap' if (subdir == 'source'): url += '/tree/master/geemap/' elif (subdir == 'examples'): url += '/tree/master/examples' elif (subdir == 'tutorials'): url += '/tree/master/tutorials' webbrowser.open_new_tab(url)
def open_github(subdir=None): 'Opens the GitHub repository for this package.\n\n Args:\n subdir (str, optional): Sub-directory of the repository. Defaults to None.\n ' import webbrowser url = 'https://github.com/giswqs/geemap' if (subdir == 'source'): url += '/tree/master/geemap/' elif (subdir == 'examples'): url += '/tree/master/examples' elif (subdir == 'tutorials'): url += '/tree/master/tutorials' webbrowser.open_new_tab(url)<|docstring|>Opens the GitHub repository for this package. Args: subdir (str, optional): Sub-directory of the repository. Defaults to None.<|endoftext|>
9306390997f6d1ab399a3c6709474cfbcd8d99c0eed9e2786dd78817e4009b7e
def open_youtube(): 'Opens the YouTube tutorials for geemap.\n ' import webbrowser url = 'https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3' webbrowser.open_new_tab(url)
Opens the YouTube tutorials for geemap.
geemap/common.py
open_youtube
arheem/geemap
1
python
def open_youtube(): '\n ' import webbrowser url = 'https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3' webbrowser.open_new_tab(url)
def open_youtube(): '\n ' import webbrowser url = 'https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3' webbrowser.open_new_tab(url)<|docstring|>Opens the YouTube tutorials for geemap.<|endoftext|>
c3e980a4902c897a44b21587b45a2abefcf961679341a09109c7d910014fe8a8
def is_tool(name): 'Check whether `name` is on PATH and marked as executable.' from shutil import which return (which(name) is not None)
Check whether `name` is on PATH and marked as executable.
geemap/common.py
is_tool
arheem/geemap
1
python
def is_tool(name): from shutil import which return (which(name) is not None)
def is_tool(name): from shutil import which return (which(name) is not None)<|docstring|>Check whether `name` is on PATH and marked as executable.<|endoftext|>
5ecb9beef465c8be484dda38949a41921839fc7b8bf930158277ad0020a87472
def random_string(string_length=3): 'Generates a random string of fixed length. \n\n Args:\n string_length (int, optional): Fixed length. Defaults to 3.\n\n Returns:\n str: A random string\n ' import random import string letters = string.ascii_lowercase return ''.join((random.choice(letters) for i in range(string_length)))
Generates a random string of fixed length. Args: string_length (int, optional): Fixed length. Defaults to 3. Returns: str: A random string
geemap/common.py
random_string
arheem/geemap
1
python
def random_string(string_length=3): 'Generates a random string of fixed length. \n\n Args:\n string_length (int, optional): Fixed length. Defaults to 3.\n\n Returns:\n str: A random string\n ' import random import string letters = string.ascii_lowercase return .join((random.choice(letters) for i in range(string_length)))
def random_string(string_length=3): 'Generates a random string of fixed length. \n\n Args:\n string_length (int, optional): Fixed length. Defaults to 3.\n\n Returns:\n str: A random string\n ' import random import string letters = string.ascii_lowercase return .join((random.choice(letters) for i in range(string_length)))<|docstring|>Generates a random string of fixed length. Args: string_length (int, optional): Fixed length. Defaults to 3. Returns: str: A random string<|endoftext|>