body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def create_scraper_configuration(self, instance=None):
'\n Creates a scraper configuration.\n\n If instance does not specify a value for a configuration option, the value will default to the `init_config`.\n Otherwise, the `default_instance` value will be used.\n\n A default mixin configuration will be returned if there is no instance.\n '
if ('openmetrics_endpoint' in instance):
raise CheckException('The setting `openmetrics_endpoint` is only available for Agent version 7 or later')
if (instance is None):
instance = {}
config = copy.deepcopy(instance)
endpoint = instance.get('prometheus_url')
if (instance and (endpoint is None)):
raise CheckException('You have to define a prometheus_url for each prometheus instance')
config['prometheus_url'] = endpoint
namespace = instance.get('namespace')
if (instance and (namespace is None)):
if (self.default_namespace is None):
raise CheckException('You have to define a namespace for each prometheus check')
namespace = self.default_namespace
config['namespace'] = namespace
default_instance = self.default_instances.get(namespace, {})
metrics_mapper = {}
metrics = (default_instance.get('metrics', []) + instance.get('metrics', []))
for metric in metrics:
if isinstance(metric, string_types):
metrics_mapper[metric] = metric
else:
metrics_mapper.update(metric)
config['metrics_mapper'] = metrics_mapper
config['_wildcards_re'] = None
wildcards = set()
for metric in config['metrics_mapper']:
if ('*' in metric):
wildcards.add(translate(metric))
if wildcards:
config['_wildcards_re'] = compile('|'.join(wildcards))
config['prometheus_metrics_prefix'] = instance.get('prometheus_metrics_prefix', default_instance.get('prometheus_metrics_prefix', ''))
config['label_joins'] = default_instance.get('label_joins', {})
config['label_joins'].update(instance.get('label_joins', {}))
config['_label_mapping'] = {}
config['_active_label_mapping'] = {}
config['_watched_labels'] = {}
config['_dry_run'] = True
config['ignore_metrics'] = instance.get('ignore_metrics', default_instance.get('ignore_metrics', []))
config['_ignored_metrics'] = set()
config['_ignored_re'] = None
ignored_patterns = set()
for metric in config['ignore_metrics']:
if ('*' in metric):
ignored_patterns.add(translate(metric))
else:
config['_ignored_metrics'].add(metric)
if ignored_patterns:
config['_ignored_re'] = compile('|'.join(ignored_patterns))
config['ignore_metrics_by_labels'] = instance.get('ignore_metrics_by_labels', default_instance.get('ignore_metrics_by_labels', {}))
config['send_histograms_buckets'] = is_affirmative(instance.get('send_histograms_buckets', default_instance.get('send_histograms_buckets', True)))
config['non_cumulative_buckets'] = is_affirmative(instance.get('non_cumulative_buckets', default_instance.get('non_cumulative_buckets', False)))
config['send_distribution_buckets'] = is_affirmative(instance.get('send_distribution_buckets', default_instance.get('send_distribution_buckets', False)))
if (config['send_distribution_buckets'] is True):
config['non_cumulative_buckets'] = True
config['send_monotonic_counter'] = is_affirmative(instance.get('send_monotonic_counter', default_instance.get('send_monotonic_counter', True)))
config['send_monotonic_with_gauge'] = is_affirmative(instance.get('send_monotonic_with_gauge', default_instance.get('send_monotonic_with_gauge', False)))
config['send_distribution_counts_as_monotonic'] = is_affirmative(instance.get('send_distribution_counts_as_monotonic', default_instance.get('send_distribution_counts_as_monotonic', False)))
config['send_distribution_sums_as_monotonic'] = is_affirmative(instance.get('send_distribution_sums_as_monotonic', default_instance.get('send_distribution_sums_as_monotonic', False)))
config['labels_mapper'] = default_instance.get('labels_mapper', {})
config['labels_mapper'].update(instance.get('labels_mapper', {}))
config['labels_mapper']['le'] = 'upper_bound'
config['exclude_labels'] = (default_instance.get('exclude_labels', []) + instance.get('exclude_labels', []))
config['type_overrides'] = default_instance.get('type_overrides', {})
config['type_overrides'].update(instance.get('type_overrides', {}))
config['_type_override_patterns'] = {}
with_wildcards = set()
for (metric, type) in iteritems(config['type_overrides']):
if ('*' in metric):
config['_type_override_patterns'][compile(translate(metric))] = type
with_wildcards.add(metric)
for metric in with_wildcards:
del config['type_overrides'][metric]
config['label_to_hostname'] = instance.get('label_to_hostname', default_instance.get('label_to_hostname', None))
config['label_to_hostname_suffix'] = instance.get('label_to_hostname_suffix', default_instance.get('label_to_hostname_suffix', None))
config['health_service_check'] = is_affirmative(instance.get('health_service_check', default_instance.get('health_service_check', True)))
config['ssl_cert'] = instance.get('ssl_cert', default_instance.get('ssl_cert', None))
config['ssl_private_key'] = instance.get('ssl_private_key', default_instance.get('ssl_private_key', None))
config['ssl_ca_cert'] = instance.get('ssl_ca_cert', default_instance.get('ssl_ca_cert', None))
config['ssl_verify'] = is_affirmative(instance.get('ssl_verify', default_instance.get('ssl_verify', True)))
config['extra_headers'] = default_instance.get('extra_headers', {})
config['extra_headers'].update(instance.get('extra_headers', {}))
config['prometheus_timeout'] = instance.get('prometheus_timeout', default_instance.get('prometheus_timeout', 10))
config['username'] = instance.get('username', default_instance.get('username', None))
config['password'] = instance.get('password', default_instance.get('password', None))
config['custom_tags'] = instance.get('tags', [])
config['_metric_tags'] = []
config['_text_filter_blacklist'] = []
config['bearer_token_auth'] = is_affirmative(instance.get('bearer_token_auth', default_instance.get('bearer_token_auth', False)))
config['bearer_token_path'] = instance.get('bearer_token_path', default_instance.get('bearer_token_path', None))
config['_bearer_token'] = self._get_bearer_token(config['bearer_token_auth'], config['bearer_token_path'])
config['telemetry'] = is_affirmative(instance.get('telemetry', default_instance.get('telemetry', False)))
config['metadata_metric_name'] = instance.get('metadata_metric_name', default_instance.get('metadata_metric_name'))
config['metadata_label_map'] = instance.get('metadata_label_map', default_instance.get('metadata_label_map', {}))
config['_default_metric_transformers'] = {}
if (config['metadata_metric_name'] and config['metadata_label_map']):
config['_default_metric_transformers'][config['metadata_metric_name']] = self.transform_metadata
config['_successfully_executed'] = False
return config
| 7,534,735,580,054,286,000
|
Creates a scraper configuration.
If instance does not specify a value for a configuration option, the value will default to the `init_config`.
Otherwise, the `default_instance` value will be used.
A default mixin configuration will be returned if there is no instance.
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
create_scraper_configuration
|
DingGGu/integrations-core
|
python
|
def create_scraper_configuration(self, instance=None):
'\n Creates a scraper configuration.\n\n If instance does not specify a value for a configuration option, the value will default to the `init_config`.\n Otherwise, the `default_instance` value will be used.\n\n A default mixin configuration will be returned if there is no instance.\n '
if ('openmetrics_endpoint' in instance):
raise CheckException('The setting `openmetrics_endpoint` is only available for Agent version 7 or later')
if (instance is None):
instance = {}
config = copy.deepcopy(instance)
endpoint = instance.get('prometheus_url')
if (instance and (endpoint is None)):
raise CheckException('You have to define a prometheus_url for each prometheus instance')
config['prometheus_url'] = endpoint
namespace = instance.get('namespace')
if (instance and (namespace is None)):
if (self.default_namespace is None):
raise CheckException('You have to define a namespace for each prometheus check')
namespace = self.default_namespace
config['namespace'] = namespace
default_instance = self.default_instances.get(namespace, {})
metrics_mapper = {}
metrics = (default_instance.get('metrics', []) + instance.get('metrics', []))
for metric in metrics:
if isinstance(metric, string_types):
metrics_mapper[metric] = metric
else:
metrics_mapper.update(metric)
config['metrics_mapper'] = metrics_mapper
config['_wildcards_re'] = None
wildcards = set()
for metric in config['metrics_mapper']:
if ('*' in metric):
wildcards.add(translate(metric))
if wildcards:
config['_wildcards_re'] = compile('|'.join(wildcards))
config['prometheus_metrics_prefix'] = instance.get('prometheus_metrics_prefix', default_instance.get('prometheus_metrics_prefix', ))
config['label_joins'] = default_instance.get('label_joins', {})
config['label_joins'].update(instance.get('label_joins', {}))
config['_label_mapping'] = {}
config['_active_label_mapping'] = {}
config['_watched_labels'] = {}
config['_dry_run'] = True
config['ignore_metrics'] = instance.get('ignore_metrics', default_instance.get('ignore_metrics', []))
config['_ignored_metrics'] = set()
config['_ignored_re'] = None
ignored_patterns = set()
for metric in config['ignore_metrics']:
if ('*' in metric):
ignored_patterns.add(translate(metric))
else:
config['_ignored_metrics'].add(metric)
if ignored_patterns:
config['_ignored_re'] = compile('|'.join(ignored_patterns))
config['ignore_metrics_by_labels'] = instance.get('ignore_metrics_by_labels', default_instance.get('ignore_metrics_by_labels', {}))
config['send_histograms_buckets'] = is_affirmative(instance.get('send_histograms_buckets', default_instance.get('send_histograms_buckets', True)))
config['non_cumulative_buckets'] = is_affirmative(instance.get('non_cumulative_buckets', default_instance.get('non_cumulative_buckets', False)))
config['send_distribution_buckets'] = is_affirmative(instance.get('send_distribution_buckets', default_instance.get('send_distribution_buckets', False)))
if (config['send_distribution_buckets'] is True):
config['non_cumulative_buckets'] = True
config['send_monotonic_counter'] = is_affirmative(instance.get('send_monotonic_counter', default_instance.get('send_monotonic_counter', True)))
config['send_monotonic_with_gauge'] = is_affirmative(instance.get('send_monotonic_with_gauge', default_instance.get('send_monotonic_with_gauge', False)))
config['send_distribution_counts_as_monotonic'] = is_affirmative(instance.get('send_distribution_counts_as_monotonic', default_instance.get('send_distribution_counts_as_monotonic', False)))
config['send_distribution_sums_as_monotonic'] = is_affirmative(instance.get('send_distribution_sums_as_monotonic', default_instance.get('send_distribution_sums_as_monotonic', False)))
config['labels_mapper'] = default_instance.get('labels_mapper', {})
config['labels_mapper'].update(instance.get('labels_mapper', {}))
config['labels_mapper']['le'] = 'upper_bound'
config['exclude_labels'] = (default_instance.get('exclude_labels', []) + instance.get('exclude_labels', []))
config['type_overrides'] = default_instance.get('type_overrides', {})
config['type_overrides'].update(instance.get('type_overrides', {}))
config['_type_override_patterns'] = {}
with_wildcards = set()
for (metric, type) in iteritems(config['type_overrides']):
if ('*' in metric):
config['_type_override_patterns'][compile(translate(metric))] = type
with_wildcards.add(metric)
for metric in with_wildcards:
del config['type_overrides'][metric]
config['label_to_hostname'] = instance.get('label_to_hostname', default_instance.get('label_to_hostname', None))
config['label_to_hostname_suffix'] = instance.get('label_to_hostname_suffix', default_instance.get('label_to_hostname_suffix', None))
config['health_service_check'] = is_affirmative(instance.get('health_service_check', default_instance.get('health_service_check', True)))
config['ssl_cert'] = instance.get('ssl_cert', default_instance.get('ssl_cert', None))
config['ssl_private_key'] = instance.get('ssl_private_key', default_instance.get('ssl_private_key', None))
config['ssl_ca_cert'] = instance.get('ssl_ca_cert', default_instance.get('ssl_ca_cert', None))
config['ssl_verify'] = is_affirmative(instance.get('ssl_verify', default_instance.get('ssl_verify', True)))
config['extra_headers'] = default_instance.get('extra_headers', {})
config['extra_headers'].update(instance.get('extra_headers', {}))
config['prometheus_timeout'] = instance.get('prometheus_timeout', default_instance.get('prometheus_timeout', 10))
config['username'] = instance.get('username', default_instance.get('username', None))
config['password'] = instance.get('password', default_instance.get('password', None))
config['custom_tags'] = instance.get('tags', [])
config['_metric_tags'] = []
config['_text_filter_blacklist'] = []
config['bearer_token_auth'] = is_affirmative(instance.get('bearer_token_auth', default_instance.get('bearer_token_auth', False)))
config['bearer_token_path'] = instance.get('bearer_token_path', default_instance.get('bearer_token_path', None))
config['_bearer_token'] = self._get_bearer_token(config['bearer_token_auth'], config['bearer_token_path'])
config['telemetry'] = is_affirmative(instance.get('telemetry', default_instance.get('telemetry', False)))
config['metadata_metric_name'] = instance.get('metadata_metric_name', default_instance.get('metadata_metric_name'))
config['metadata_label_map'] = instance.get('metadata_label_map', default_instance.get('metadata_label_map', {}))
config['_default_metric_transformers'] = {}
if (config['metadata_metric_name'] and config['metadata_label_map']):
config['_default_metric_transformers'][config['metadata_metric_name']] = self.transform_metadata
config['_successfully_executed'] = False
return config
|
def get_http_handler(self, scraper_config):
'\n Get http handler for a specific scraper config.\n The http handler is cached using `prometheus_url` as key.\n '
prometheus_url = scraper_config['prometheus_url']
if (prometheus_url in self._http_handlers):
return self._http_handlers[prometheus_url]
if (scraper_config['ssl_ca_cert'] is False):
scraper_config['ssl_verify'] = False
if (scraper_config['ssl_verify'] is False):
scraper_config.setdefault('tls_ignore_warning', True)
http_handler = self._http_handlers[prometheus_url] = RequestsWrapper(scraper_config, self.init_config, self.HTTP_CONFIG_REMAPPER, self.log)
headers = http_handler.options['headers']
bearer_token = scraper_config['_bearer_token']
if (bearer_token is not None):
headers['Authorization'] = 'Bearer {}'.format(bearer_token)
headers.setdefault('accept-encoding', 'gzip')
headers.setdefault('accept', 'text/plain')
return http_handler
| 1,936,616,197,089,814,800
|
Get http handler for a specific scraper config.
The http handler is cached using `prometheus_url` as key.
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
get_http_handler
|
DingGGu/integrations-core
|
python
|
def get_http_handler(self, scraper_config):
'\n Get http handler for a specific scraper config.\n The http handler is cached using `prometheus_url` as key.\n '
prometheus_url = scraper_config['prometheus_url']
if (prometheus_url in self._http_handlers):
return self._http_handlers[prometheus_url]
if (scraper_config['ssl_ca_cert'] is False):
scraper_config['ssl_verify'] = False
if (scraper_config['ssl_verify'] is False):
scraper_config.setdefault('tls_ignore_warning', True)
http_handler = self._http_handlers[prometheus_url] = RequestsWrapper(scraper_config, self.init_config, self.HTTP_CONFIG_REMAPPER, self.log)
headers = http_handler.options['headers']
bearer_token = scraper_config['_bearer_token']
if (bearer_token is not None):
headers['Authorization'] = 'Bearer {}'.format(bearer_token)
headers.setdefault('accept-encoding', 'gzip')
headers.setdefault('accept', 'text/plain')
return http_handler
|
def reset_http_config(self):
'\n You may need to use this when configuration is determined dynamically during every\n check run, such as when polling an external resource like the Kubelet.\n '
self._http_handlers.clear()
| 9,124,018,060,828,578,000
|
You may need to use this when configuration is determined dynamically during every
check run, such as when polling an external resource like the Kubelet.
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
reset_http_config
|
DingGGu/integrations-core
|
python
|
def reset_http_config(self):
'\n You may need to use this when configuration is determined dynamically during every\n check run, such as when polling an external resource like the Kubelet.\n '
self._http_handlers.clear()
|
def parse_metric_family(self, response, scraper_config):
'\n Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object.\n The text format uses iter_lines() generator.\n '
if (response.encoding is None):
response.encoding = 'utf-8'
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_INPUT_COUNT, len(metric.samples), scraper_config)
type_override = scraper_config['type_overrides'].get(metric.name)
if type_override:
metric.type = type_override
elif scraper_config['_type_override_patterns']:
for (pattern, new_type) in iteritems(scraper_config['_type_override_patterns']):
if pattern.search(metric.name):
metric.type = new_type
break
if (metric.type not in self.METRIC_TYPES):
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
(yield metric)
| 1,450,872,603,286,986,000
|
Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object.
The text format uses iter_lines() generator.
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
parse_metric_family
|
DingGGu/integrations-core
|
python
|
def parse_metric_family(self, response, scraper_config):
'\n Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object.\n The text format uses iter_lines() generator.\n '
if (response.encoding is None):
response.encoding = 'utf-8'
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_INPUT_COUNT, len(metric.samples), scraper_config)
type_override = scraper_config['type_overrides'].get(metric.name)
if type_override:
metric.type = type_override
elif scraper_config['_type_override_patterns']:
for (pattern, new_type) in iteritems(scraper_config['_type_override_patterns']):
if pattern.search(metric.name):
metric.type = new_type
break
if (metric.type not in self.METRIC_TYPES):
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
(yield metric)
|
def _text_filter_input(self, input_gen, scraper_config):
"\n Filters out the text input line by line to avoid parsing and processing\n metrics we know we don't want to process. This only works on `text/plain`\n payloads, and is an INTERNAL FEATURE implemented for the kubelet check\n :param input_get: line generator\n :output: generator of filtered lines\n "
for line in input_gen:
for item in scraper_config['_text_filter_blacklist']:
if (item in line):
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT, 1, scraper_config)
break
else:
(yield line)
| -6,281,635,910,394,082,000
|
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
_text_filter_input
|
DingGGu/integrations-core
|
python
|
def _text_filter_input(self, input_gen, scraper_config):
"\n Filters out the text input line by line to avoid parsing and processing\n metrics we know we don't want to process. This only works on `text/plain`\n payloads, and is an INTERNAL FEATURE implemented for the kubelet check\n :param input_get: line generator\n :output: generator of filtered lines\n "
for line in input_gen:
for item in scraper_config['_text_filter_blacklist']:
if (item in line):
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT, 1, scraper_config)
break
else:
(yield line)
|
def scrape_metrics(self, scraper_config):
'\n Poll the data from Prometheus and return the metrics as a generator.\n '
response = self.poll(scraper_config)
if scraper_config['telemetry']:
if ('content-length' in response.headers):
content_len = int(response.headers['content-length'])
else:
content_len = len(response.content)
self._send_telemetry_gauge(self.TELEMETRY_GAUGE_MESSAGE_SIZE, content_len, scraper_config)
try:
if (not scraper_config['label_joins']):
scraper_config['_dry_run'] = False
elif (not scraper_config['_watched_labels']):
watched = scraper_config['_watched_labels']
watched['sets'] = {}
watched['keys'] = {}
watched['singles'] = set()
for (key, val) in iteritems(scraper_config['label_joins']):
labels = []
if ('labels_to_match' in val):
labels = val['labels_to_match']
elif ('label_to_match' in val):
self.log.warning('`label_to_match` is being deprecated, please use `labels_to_match`')
if isinstance(val['label_to_match'], list):
labels = val['label_to_match']
else:
labels = [val['label_to_match']]
if labels:
s = frozenset(labels)
watched['sets'][key] = s
watched['keys'][key] = ','.join(s)
if (len(labels) == 1):
watched['singles'].add(labels[0])
for metric in self.parse_metric_family(response, scraper_config):
(yield metric)
scraper_config['_dry_run'] = False
for (metric, mapping) in list(iteritems(scraper_config['_label_mapping'])):
for key in list(mapping):
if ((metric in scraper_config['_active_label_mapping']) and (key not in scraper_config['_active_label_mapping'][metric])):
del scraper_config['_label_mapping'][metric][key]
scraper_config['_active_label_mapping'] = {}
finally:
response.close()
| -5,311,989,745,671,305,000
|
Poll the data from Prometheus and return the metrics as a generator.
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
scrape_metrics
|
DingGGu/integrations-core
|
python
|
def scrape_metrics(self, scraper_config):
'\n \n '
response = self.poll(scraper_config)
if scraper_config['telemetry']:
if ('content-length' in response.headers):
content_len = int(response.headers['content-length'])
else:
content_len = len(response.content)
self._send_telemetry_gauge(self.TELEMETRY_GAUGE_MESSAGE_SIZE, content_len, scraper_config)
try:
if (not scraper_config['label_joins']):
scraper_config['_dry_run'] = False
elif (not scraper_config['_watched_labels']):
watched = scraper_config['_watched_labels']
watched['sets'] = {}
watched['keys'] = {}
watched['singles'] = set()
for (key, val) in iteritems(scraper_config['label_joins']):
labels = []
if ('labels_to_match' in val):
labels = val['labels_to_match']
elif ('label_to_match' in val):
self.log.warning('`label_to_match` is being deprecated, please use `labels_to_match`')
if isinstance(val['label_to_match'], list):
labels = val['label_to_match']
else:
labels = [val['label_to_match']]
if labels:
s = frozenset(labels)
watched['sets'][key] = s
watched['keys'][key] = ','.join(s)
if (len(labels) == 1):
watched['singles'].add(labels[0])
for metric in self.parse_metric_family(response, scraper_config):
(yield metric)
scraper_config['_dry_run'] = False
for (metric, mapping) in list(iteritems(scraper_config['_label_mapping'])):
for key in list(mapping):
if ((metric in scraper_config['_active_label_mapping']) and (key not in scraper_config['_active_label_mapping'][metric])):
del scraper_config['_label_mapping'][metric][key]
scraper_config['_active_label_mapping'] = {}
finally:
response.close()
|
def process(self, scraper_config, metric_transformers=None):
'\n Polls the data from Prometheus and submits them as Datadog metrics.\n `endpoint` is the metrics endpoint to use to poll metrics from Prometheus\n\n Note that if the instance has a `tags` attribute, it will be pushed\n automatically as additional custom tags and added to the metrics\n '
transformers = scraper_config['_default_metric_transformers'].copy()
if metric_transformers:
transformers.update(metric_transformers)
for metric in self.scrape_metrics(scraper_config):
self.process_metric(metric, scraper_config, metric_transformers=transformers)
scraper_config['_successfully_executed'] = True
| 4,262,099,596,921,157,000
|
Polls the data from Prometheus and submits them as Datadog metrics.
`endpoint` is the metrics endpoint to use to poll metrics from Prometheus
Note that if the instance has a `tags` attribute, it will be pushed
automatically as additional custom tags and added to the metrics
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
process
|
DingGGu/integrations-core
|
python
|
def process(self, scraper_config, metric_transformers=None):
'\n Polls the data from Prometheus and submits them as Datadog metrics.\n `endpoint` is the metrics endpoint to use to poll metrics from Prometheus\n\n Note that if the instance has a `tags` attribute, it will be pushed\n automatically as additional custom tags and added to the metrics\n '
transformers = scraper_config['_default_metric_transformers'].copy()
if metric_transformers:
transformers.update(metric_transformers)
for metric in self.scrape_metrics(scraper_config):
self.process_metric(metric, scraper_config, metric_transformers=transformers)
scraper_config['_successfully_executed'] = True
|
def process_metric(self, metric, scraper_config, metric_transformers=None):
"\n Handle a Prometheus metric according to the following flow:\n - search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping\n - call check method with the same name as the metric\n - log info if none of the above worked\n\n `metric_transformers` is a dict of `<metric name>:<function to run when the metric name is encountered>`\n "
self._store_labels(metric, scraper_config)
if scraper_config['ignore_metrics']:
if (metric.name in scraper_config['_ignored_metrics']):
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config)
return
if (scraper_config['_ignored_re'] and scraper_config['_ignored_re'].search(metric.name)):
scraper_config['_ignored_metrics'].add(metric.name)
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config)
return
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_PROCESS_COUNT, len(metric.samples), scraper_config)
if self._filter_metric(metric, scraper_config):
return
self._join_labels(metric, scraper_config)
if scraper_config['_dry_run']:
return
try:
self.submit_openmetric(scraper_config['metrics_mapper'][metric.name], metric, scraper_config)
except KeyError:
if ((metric_transformers is not None) and (metric.name in metric_transformers)):
try:
transformer = metric_transformers[metric.name]
transformer(metric, scraper_config)
except Exception as err:
self.log.warning('Error handling metric: %s - error: %s', metric.name, err)
return
for (transformer_name, transformer) in iteritems(metric_transformers):
if (transformer_name.endswith('*') and metric.name.startswith(transformer_name[:(- 1)])):
transformer(metric, scraper_config, transformer_name)
if (scraper_config['_wildcards_re'] and scraper_config['_wildcards_re'].search(metric.name)):
self.submit_openmetric(metric.name, metric, scraper_config)
return
self.log.debug('Skipping metric `%s` as it is not defined in the metrics mapper, has no transformer function, nor does it match any wildcards.', metric.name)
| -3,420,036,718,522,466,300
|
Handle a Prometheus metric according to the following flow:
- search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping
- call check method with the same name as the metric
- log info if none of the above worked
`metric_transformers` is a dict of `<metric name>:<function to run when the metric name is encountered>`
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
process_metric
|
DingGGu/integrations-core
|
python
|
def process_metric(self, metric, scraper_config, metric_transformers=None):
"\n Handle a Prometheus metric according to the following flow:\n - search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping\n - call check method with the same name as the metric\n - log info if none of the above worked\n\n `metric_transformers` is a dict of `<metric name>:<function to run when the metric name is encountered>`\n "
self._store_labels(metric, scraper_config)
if scraper_config['ignore_metrics']:
if (metric.name in scraper_config['_ignored_metrics']):
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config)
return
if (scraper_config['_ignored_re'] and scraper_config['_ignored_re'].search(metric.name)):
scraper_config['_ignored_metrics'].add(metric.name)
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config)
return
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_PROCESS_COUNT, len(metric.samples), scraper_config)
if self._filter_metric(metric, scraper_config):
return
self._join_labels(metric, scraper_config)
if scraper_config['_dry_run']:
return
try:
self.submit_openmetric(scraper_config['metrics_mapper'][metric.name], metric, scraper_config)
except KeyError:
if ((metric_transformers is not None) and (metric.name in metric_transformers)):
try:
transformer = metric_transformers[metric.name]
transformer(metric, scraper_config)
except Exception as err:
self.log.warning('Error handling metric: %s - error: %s', metric.name, err)
return
for (transformer_name, transformer) in iteritems(metric_transformers):
if (transformer_name.endswith('*') and metric.name.startswith(transformer_name[:(- 1)])):
transformer(metric, scraper_config, transformer_name)
if (scraper_config['_wildcards_re'] and scraper_config['_wildcards_re'].search(metric.name)):
self.submit_openmetric(metric.name, metric, scraper_config)
return
self.log.debug('Skipping metric `%s` as it is not defined in the metrics mapper, has no transformer function, nor does it match any wildcards.', metric.name)
|
def poll(self, scraper_config, headers=None):
"\n Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the\n response isn't valid - see `response.raise_for_status()`\n\n The caller needs to close the requests.Response.\n\n Custom headers can be added to the default headers.\n "
endpoint = scraper_config.get('prometheus_url')
health_service_check = scraper_config['health_service_check']
service_check_name = self._metric_name_with_namespace('prometheus.health', scraper_config)
service_check_tags = ['endpoint:{}'.format(endpoint)]
service_check_tags.extend(scraper_config['custom_tags'])
try:
response = self.send_request(endpoint, scraper_config, headers)
except requests.exceptions.SSLError:
self.log.error('Invalid SSL settings for requesting %s endpoint', endpoint)
raise
except IOError:
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
try:
response.raise_for_status()
if health_service_check:
self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags)
return response
except requests.HTTPError:
response.close()
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
| 2,956,356,092,003,690,500
|
Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the
response isn't valid - see `response.raise_for_status()`
The caller needs to close the requests.Response.
Custom headers can be added to the default headers.
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
poll
|
DingGGu/integrations-core
|
python
|
def poll(self, scraper_config, headers=None):
"\n Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the\n response isn't valid - see `response.raise_for_status()`\n\n The caller needs to close the requests.Response.\n\n Custom headers can be added to the default headers.\n "
endpoint = scraper_config.get('prometheus_url')
health_service_check = scraper_config['health_service_check']
service_check_name = self._metric_name_with_namespace('prometheus.health', scraper_config)
service_check_tags = ['endpoint:{}'.format(endpoint)]
service_check_tags.extend(scraper_config['custom_tags'])
try:
response = self.send_request(endpoint, scraper_config, headers)
except requests.exceptions.SSLError:
self.log.error('Invalid SSL settings for requesting %s endpoint', endpoint)
raise
except IOError:
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
try:
response.raise_for_status()
if health_service_check:
self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags)
return response
except requests.HTTPError:
response.close()
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
|
def get_hostname_for_sample(self, sample, scraper_config):
'\n Expose the label_to_hostname mapping logic to custom handler methods\n '
return self._get_hostname(None, sample, scraper_config)
| -893,631,662,106,031,400
|
Expose the label_to_hostname mapping logic to custom handler methods
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
get_hostname_for_sample
|
DingGGu/integrations-core
|
python
|
def get_hostname_for_sample(self, sample, scraper_config):
'\n \n '
return self._get_hostname(None, sample, scraper_config)
|
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
"\n For each sample in the metric, report it as a gauge with all labels as tags\n except if a labels `dict` is passed, in which case keys are label names we'll extract\n and corresponding values are tag names we'll use (eg: {'node': 'node'}).\n\n Histograms generate a set of values instead of a unique metric.\n `send_histograms_buckets` is used to specify if you want to\n send the buckets as tagged values when dealing with histograms.\n\n `custom_tags` is an array of `tag:value` that will be added to the\n metric when sending the gauge to Datadog.\n "
if (metric.type in ['gauge', 'counter', 'rate']):
metric_name_with_namespace = self._metric_name_with_namespace(metric_name, scraper_config)
for sample in metric.samples:
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
val = sample[self.SAMPLE_VALUE]
if (not self._is_value_valid(val)):
self.log.debug('Metric value is not supported for metric %s', sample[self.SAMPLE_NAME])
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if ((metric.type == 'counter') and scraper_config['send_monotonic_counter']):
self.monotonic_count(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif (metric.type == 'rate'):
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
if ((metric.type == 'counter') and scraper_config['send_monotonic_with_gauge']):
self.monotonic_count((metric_name_with_namespace + '.total'), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif (metric.type == 'histogram'):
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif (metric.type == 'summary'):
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error('Metric type %s unsupported for metric %s.', metric.type, metric_name)
| -6,410,313,078,005,258,000
|
For each sample in the metric, report it as a gauge with all labels as tags
except if a labels `dict` is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
`send_histograms_buckets` is used to specify if you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of `tag:value` that will be added to the
metric when sending the gauge to Datadog.
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
submit_openmetric
|
DingGGu/integrations-core
|
python
|
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
"\n For each sample in the metric, report it as a gauge with all labels as tags\n except if a labels `dict` is passed, in which case keys are label names we'll extract\n and corresponding values are tag names we'll use (eg: {'node': 'node'}).\n\n Histograms generate a set of values instead of a unique metric.\n `send_histograms_buckets` is used to specify if you want to\n send the buckets as tagged values when dealing with histograms.\n\n `custom_tags` is an array of `tag:value` that will be added to the\n metric when sending the gauge to Datadog.\n "
if (metric.type in ['gauge', 'counter', 'rate']):
metric_name_with_namespace = self._metric_name_with_namespace(metric_name, scraper_config)
for sample in metric.samples:
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
val = sample[self.SAMPLE_VALUE]
if (not self._is_value_valid(val)):
self.log.debug('Metric value is not supported for metric %s', sample[self.SAMPLE_NAME])
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if ((metric.type == 'counter') and scraper_config['send_monotonic_counter']):
self.monotonic_count(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif (metric.type == 'rate'):
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
if ((metric.type == 'counter') and scraper_config['send_monotonic_with_gauge']):
self.monotonic_count((metric_name_with_namespace + '.total'), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif (metric.type == 'histogram'):
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif (metric.type == 'summary'):
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error('Metric type %s unsupported for metric %s.', metric.type, metric_name)
|
def _get_hostname(self, hostname, sample, scraper_config):
'\n If hostname is None, look at label_to_hostname setting\n '
if ((hostname is None) and (scraper_config['label_to_hostname'] is not None) and sample[self.SAMPLE_LABELS].get(scraper_config['label_to_hostname'])):
hostname = sample[self.SAMPLE_LABELS][scraper_config['label_to_hostname']]
suffix = scraper_config['label_to_hostname_suffix']
if (suffix is not None):
hostname += suffix
return hostname
| -251,633,848,672,179,940
|
If hostname is None, look at label_to_hostname setting
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
_get_hostname
|
DingGGu/integrations-core
|
python
|
def _get_hostname(self, hostname, sample, scraper_config):
'\n \n '
if ((hostname is None) and (scraper_config['label_to_hostname'] is not None) and sample[self.SAMPLE_LABELS].get(scraper_config['label_to_hostname'])):
hostname = sample[self.SAMPLE_LABELS][scraper_config['label_to_hostname']]
suffix = scraper_config['label_to_hostname_suffix']
if (suffix is not None):
hostname += suffix
return hostname
|
def _submit_gauges_from_summary(self, metric_name, metric, scraper_config, hostname=None):
'\n Extracts metrics from a prometheus summary metric and sends them as gauges\n '
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if (not self._is_value_valid(val)):
self.log.debug('Metric value is not supported for metric %s', sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith('_sum'):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(scraper_config['send_distribution_sums_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.sum'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif sample[self.SAMPLE_NAME].endswith('_count'):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(scraper_config['send_distribution_counts_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.count'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
else:
try:
quantile = sample[self.SAMPLE_LABELS]['quantile']
except KeyError:
message = '"quantile" label not present in metric %r. Quantile-less summary metrics are not currently supported. Skipping...'
self.log.debug(message, metric_name)
continue
sample[self.SAMPLE_LABELS]['quantile'] = str(float(quantile))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge('{}.quantile'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname)
| -5,473,741,916,677,806,000
|
Extracts metrics from a prometheus summary metric and sends them as gauges
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
_submit_gauges_from_summary
|
DingGGu/integrations-core
|
python
|
def _submit_gauges_from_summary(self, metric_name, metric, scraper_config, hostname=None):
'\n \n '
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if (not self._is_value_valid(val)):
self.log.debug('Metric value is not supported for metric %s', sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith('_sum'):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(scraper_config['send_distribution_sums_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.sum'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif sample[self.SAMPLE_NAME].endswith('_count'):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(scraper_config['send_distribution_counts_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.count'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
else:
try:
quantile = sample[self.SAMPLE_LABELS]['quantile']
except KeyError:
message = '"quantile" label not present in metric %r. Quantile-less summary metrics are not currently supported. Skipping...'
self.log.debug(message, metric_name)
continue
sample[self.SAMPLE_LABELS]['quantile'] = str(float(quantile))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge('{}.quantile'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname)
|
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None):
'\n Extracts metrics from a prometheus histogram and sends them as gauges\n '
if scraper_config['non_cumulative_buckets']:
self._decumulate_histogram_buckets(metric)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if (not self._is_value_valid(val)):
self.log.debug('Metric value is not supported for metric %s', sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if (sample[self.SAMPLE_NAME].endswith('_sum') and (not scraper_config['send_distribution_buckets'])):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(scraper_config['send_distribution_sums_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.sum'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif (sample[self.SAMPLE_NAME].endswith('_count') and (not scraper_config['send_distribution_buckets'])):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
if scraper_config['send_histograms_buckets']:
tags.append('upper_bound:none')
self._submit_distribution_count(scraper_config['send_distribution_counts_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.count'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif (scraper_config['send_histograms_buckets'] and sample[self.SAMPLE_NAME].endswith('_bucket')):
if scraper_config['send_distribution_buckets']:
self._submit_sample_histogram_buckets(metric_name, sample, scraper_config, hostname)
elif (('Inf' not in sample[self.SAMPLE_LABELS]['le']) or scraper_config['non_cumulative_buckets']):
sample[self.SAMPLE_LABELS]['le'] = str(float(sample[self.SAMPLE_LABELS]['le']))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(scraper_config['send_distribution_counts_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.count'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
| -6,556,666,123,089,748,000
|
Extracts metrics from a prometheus histogram and sends them as gauges
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
_submit_gauges_from_histogram
|
DingGGu/integrations-core
|
python
|
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None):
'\n \n '
if scraper_config['non_cumulative_buckets']:
self._decumulate_histogram_buckets(metric)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if (not self._is_value_valid(val)):
self.log.debug('Metric value is not supported for metric %s', sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if (sample[self.SAMPLE_NAME].endswith('_sum') and (not scraper_config['send_distribution_buckets'])):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(scraper_config['send_distribution_sums_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.sum'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif (sample[self.SAMPLE_NAME].endswith('_count') and (not scraper_config['send_distribution_buckets'])):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
if scraper_config['send_histograms_buckets']:
tags.append('upper_bound:none')
self._submit_distribution_count(scraper_config['send_distribution_counts_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.count'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
elif (scraper_config['send_histograms_buckets'] and sample[self.SAMPLE_NAME].endswith('_bucket')):
if scraper_config['send_distribution_buckets']:
self._submit_sample_histogram_buckets(metric_name, sample, scraper_config, hostname)
elif (('Inf' not in sample[self.SAMPLE_LABELS]['le']) or scraper_config['non_cumulative_buckets']):
sample[self.SAMPLE_LABELS]['le'] = str(float(sample[self.SAMPLE_LABELS]['le']))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(scraper_config['send_distribution_counts_as_monotonic'], scraper_config['send_monotonic_with_gauge'], '{}.count'.format(self._metric_name_with_namespace(metric_name, scraper_config)), val, tags=tags, hostname=custom_hostname, flush_first_value=scraper_config['_successfully_executed'])
|
def _decumulate_histogram_buckets(self, metric):
'\n Decumulate buckets in a given histogram metric and adds the lower_bound label (le being upper_bound)\n '
bucket_values_by_context_upper_bound = {}
for sample in metric.samples:
if sample[self.SAMPLE_NAME].endswith('_bucket'):
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
if (context_key not in bucket_values_by_context_upper_bound):
bucket_values_by_context_upper_bound[context_key] = {}
bucket_values_by_context_upper_bound[context_key][float(sample[self.SAMPLE_LABELS]['le'])] = sample[self.SAMPLE_VALUE]
sorted_buckets_by_context = {}
for context in bucket_values_by_context_upper_bound:
sorted_buckets_by_context[context] = sorted(bucket_values_by_context_upper_bound[context])
bucket_tuples_by_context_upper_bound = {}
for context in sorted_buckets_by_context:
for (i, upper_b) in enumerate(sorted_buckets_by_context[context]):
if (i == 0):
if (context not in bucket_tuples_by_context_upper_bound):
bucket_tuples_by_context_upper_bound[context] = {}
if (upper_b > 0):
bucket_tuples_by_context_upper_bound[context][upper_b] = (0, upper_b, bucket_values_by_context_upper_bound[context][upper_b])
else:
bucket_tuples_by_context_upper_bound[context][upper_b] = (self.MINUS_INF, upper_b, bucket_values_by_context_upper_bound[context][upper_b])
continue
tmp = (bucket_values_by_context_upper_bound[context][upper_b] - bucket_values_by_context_upper_bound[context][sorted_buckets_by_context[context][(i - 1)]])
bucket_tuples_by_context_upper_bound[context][upper_b] = (sorted_buckets_by_context[context][(i - 1)], upper_b, tmp)
for (i, sample) in enumerate(metric.samples):
if (not sample[self.SAMPLE_NAME].endswith('_bucket')):
continue
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
matching_bucket_tuple = bucket_tuples_by_context_upper_bound[context_key][float(sample[self.SAMPLE_LABELS]['le'])]
sample[self.SAMPLE_LABELS]['lower_bound'] = str(matching_bucket_tuple[0])
metric.samples[i] = Sample(sample[self.SAMPLE_NAME], sample[self.SAMPLE_LABELS], matching_bucket_tuple[2])
| 4,912,369,996,977,096,000
|
Decumulate buckets in a given histogram metric and adds the lower_bound label (le being upper_bound)
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
_decumulate_histogram_buckets
|
DingGGu/integrations-core
|
python
|
def _decumulate_histogram_buckets(self, metric):
'\n \n '
bucket_values_by_context_upper_bound = {}
for sample in metric.samples:
if sample[self.SAMPLE_NAME].endswith('_bucket'):
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
if (context_key not in bucket_values_by_context_upper_bound):
bucket_values_by_context_upper_bound[context_key] = {}
bucket_values_by_context_upper_bound[context_key][float(sample[self.SAMPLE_LABELS]['le'])] = sample[self.SAMPLE_VALUE]
sorted_buckets_by_context = {}
for context in bucket_values_by_context_upper_bound:
sorted_buckets_by_context[context] = sorted(bucket_values_by_context_upper_bound[context])
bucket_tuples_by_context_upper_bound = {}
for context in sorted_buckets_by_context:
for (i, upper_b) in enumerate(sorted_buckets_by_context[context]):
if (i == 0):
if (context not in bucket_tuples_by_context_upper_bound):
bucket_tuples_by_context_upper_bound[context] = {}
if (upper_b > 0):
bucket_tuples_by_context_upper_bound[context][upper_b] = (0, upper_b, bucket_values_by_context_upper_bound[context][upper_b])
else:
bucket_tuples_by_context_upper_bound[context][upper_b] = (self.MINUS_INF, upper_b, bucket_values_by_context_upper_bound[context][upper_b])
continue
tmp = (bucket_values_by_context_upper_bound[context][upper_b] - bucket_values_by_context_upper_bound[context][sorted_buckets_by_context[context][(i - 1)]])
bucket_tuples_by_context_upper_bound[context][upper_b] = (sorted_buckets_by_context[context][(i - 1)], upper_b, tmp)
for (i, sample) in enumerate(metric.samples):
if (not sample[self.SAMPLE_NAME].endswith('_bucket')):
continue
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
matching_bucket_tuple = bucket_tuples_by_context_upper_bound[context_key][float(sample[self.SAMPLE_LABELS]['le'])]
sample[self.SAMPLE_LABELS]['lower_bound'] = str(matching_bucket_tuple[0])
metric.samples[i] = Sample(sample[self.SAMPLE_NAME], sample[self.SAMPLE_LABELS], matching_bucket_tuple[2])
|
def CreateNewSimulator(device_type=None, os_version=None, name_prefix=None):
'Creates a new simulator according to arguments.\n\n If neither device_type nor os_version is given, will use the latest iOS\n version and latest iPhone type.\n If os_version is given but device_type is not, will use latest iPhone type\n according to the OS version limitation. E.g., if the given os_version is 9.3,\n the latest simulator type is iPhone 6s Plus. Because the min OS version of\n iPhone 7 is 10.0.\n If device_type is given but os_version is not, will use the min value\n between max OS version of the simulator type and current latest OS version.\n E.g., if the given device_type is iPhone 5 and latest OS version is 10.3,\n will use 10.2. Because the max OS version of iPhone 5 is 10.2.\n\n Args:\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n os_version: string, OS version of the new simulator. The format is\n {major}.{minor}, such as 9.3, 10.2.\n name_prefix: string, name prefix of the new simulator. By default, it is\n "New".\n\n Returns:\n a tuple with four items:\n string, id of the new simulator.\n string, simulator device type of the new simulator.\n string, OS version of the new simulator.\n string, name of the new simulator.\n\n Raises:\n ios_errors.SimError: when failed to create new simulator.\n ios_errors.IllegalArgumentError: when the given argument is invalid.\n '
if (not device_type):
os_type = ios_constants.OS.IOS
else:
_ValidateSimulatorType(device_type)
os_type = GetOsType(device_type)
if (not os_version):
os_version = GetLastSupportedSimOsVersion(os_type, device_type=device_type)
else:
supported_sim_os_versions = GetSupportedSimOsVersions(os_type)
if (os_version not in supported_sim_os_versions):
raise ios_errors.IllegalArgumentError(('The simulator os version %s is not supported. Supported simulator os versions are %s.' % (os_version, supported_sim_os_versions)))
if (not device_type):
device_type = GetLastSupportedIphoneSimType(os_version)
else:
_ValidateSimulatorTypeWithOsVersion(device_type, os_version)
if (not name_prefix):
name_prefix = 'New'
name = ('%s-%s-%s' % (name_prefix, device_type, os_version))
runtime_id = (((_PREFIX_RUNTIME_ID + os_type) + '-') + os_version.replace('.', '-'))
logging.info('Creating a new simulator:\nName: %s\nOS: %s %s\nType: %s', name, os_type, os_version, device_type)
for i in range(0, _SIM_OPERATION_MAX_ATTEMPTS):
try:
new_simulator_id = RunSimctlCommand(['xcrun', 'simctl', 'create', name, device_type, runtime_id])
except ios_errors.SimError as e:
raise ios_errors.SimError(('Failed to create simulator: %s' % str(e)))
new_simulator_obj = Simulator(new_simulator_id)
try:
new_simulator_obj.WaitUntilStateShutdown(_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC)
logging.info('Created new simulator %s.', new_simulator_id)
return (new_simulator_id, device_type, os_version, name)
except ios_errors.SimError as error:
logging.debug('Failed to create simulator %s: %s.', new_simulator_id, error)
logging.debug('Deleted half-created simulator %s.', new_simulator_id)
new_simulator_obj.Delete()
if (i != (_SIM_OPERATION_MAX_ATTEMPTS - 1)):
logging.debug('Will sleep %ss and retry again.', _SIM_ERROR_RETRY_INTERVAL_SEC)
time.sleep(_SIM_ERROR_RETRY_INTERVAL_SEC)
raise ios_errors.SimError(('Failed to create simulator in %d attempts.' % _SIM_OPERATION_MAX_ATTEMPTS))
| -5,960,860,370,713,791,000
|
Creates a new simulator according to arguments.
If neither device_type nor os_version is given, will use the latest iOS
version and latest iPhone type.
If os_version is given but device_type is not, will use latest iPhone type
according to the OS version limitation. E.g., if the given os_version is 9.3,
the latest simulator type is iPhone 6s Plus. Because the min OS version of
iPhone 7 is 10.0.
If device_type is given but os_version is not, will use the min value
between max OS version of the simulator type and current latest OS version.
E.g., if the given device_type is iPhone 5 and latest OS version is 10.3,
will use 10.2. Because the max OS version of iPhone 5 is 10.2.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
name_prefix: string, name prefix of the new simulator. By default, it is
"New".
Returns:
a tuple with four items:
string, id of the new simulator.
string, simulator device type of the new simulator.
string, OS version of the new simulator.
string, name of the new simulator.
Raises:
ios_errors.SimError: when failed to create new simulator.
ios_errors.IllegalArgumentError: when the given argument is invalid.
|
simulator_control/simulator_util.py
|
CreateNewSimulator
|
ios-bazel-users/xctestrunner
|
python
|
def CreateNewSimulator(device_type=None, os_version=None, name_prefix=None):
'Creates a new simulator according to arguments.\n\n If neither device_type nor os_version is given, will use the latest iOS\n version and latest iPhone type.\n If os_version is given but device_type is not, will use latest iPhone type\n according to the OS version limitation. E.g., if the given os_version is 9.3,\n the latest simulator type is iPhone 6s Plus. Because the min OS version of\n iPhone 7 is 10.0.\n If device_type is given but os_version is not, will use the min value\n between max OS version of the simulator type and current latest OS version.\n E.g., if the given device_type is iPhone 5 and latest OS version is 10.3,\n will use 10.2. Because the max OS version of iPhone 5 is 10.2.\n\n Args:\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n os_version: string, OS version of the new simulator. The format is\n {major}.{minor}, such as 9.3, 10.2.\n name_prefix: string, name prefix of the new simulator. By default, it is\n "New".\n\n Returns:\n a tuple with four items:\n string, id of the new simulator.\n string, simulator device type of the new simulator.\n string, OS version of the new simulator.\n string, name of the new simulator.\n\n Raises:\n ios_errors.SimError: when failed to create new simulator.\n ios_errors.IllegalArgumentError: when the given argument is invalid.\n '
if (not device_type):
os_type = ios_constants.OS.IOS
else:
_ValidateSimulatorType(device_type)
os_type = GetOsType(device_type)
if (not os_version):
os_version = GetLastSupportedSimOsVersion(os_type, device_type=device_type)
else:
supported_sim_os_versions = GetSupportedSimOsVersions(os_type)
if (os_version not in supported_sim_os_versions):
raise ios_errors.IllegalArgumentError(('The simulator os version %s is not supported. Supported simulator os versions are %s.' % (os_version, supported_sim_os_versions)))
if (not device_type):
device_type = GetLastSupportedIphoneSimType(os_version)
else:
_ValidateSimulatorTypeWithOsVersion(device_type, os_version)
if (not name_prefix):
name_prefix = 'New'
name = ('%s-%s-%s' % (name_prefix, device_type, os_version))
runtime_id = (((_PREFIX_RUNTIME_ID + os_type) + '-') + os_version.replace('.', '-'))
logging.info('Creating a new simulator:\nName: %s\nOS: %s %s\nType: %s', name, os_type, os_version, device_type)
for i in range(0, _SIM_OPERATION_MAX_ATTEMPTS):
try:
new_simulator_id = RunSimctlCommand(['xcrun', 'simctl', 'create', name, device_type, runtime_id])
except ios_errors.SimError as e:
raise ios_errors.SimError(('Failed to create simulator: %s' % str(e)))
new_simulator_obj = Simulator(new_simulator_id)
try:
new_simulator_obj.WaitUntilStateShutdown(_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC)
logging.info('Created new simulator %s.', new_simulator_id)
return (new_simulator_id, device_type, os_version, name)
except ios_errors.SimError as error:
logging.debug('Failed to create simulator %s: %s.', new_simulator_id, error)
logging.debug('Deleted half-created simulator %s.', new_simulator_id)
new_simulator_obj.Delete()
if (i != (_SIM_OPERATION_MAX_ATTEMPTS - 1)):
logging.debug('Will sleep %ss and retry again.', _SIM_ERROR_RETRY_INTERVAL_SEC)
time.sleep(_SIM_ERROR_RETRY_INTERVAL_SEC)
raise ios_errors.SimError(('Failed to create simulator in %d attempts.' % _SIM_OPERATION_MAX_ATTEMPTS))
|
def GetSupportedSimDeviceTypes(os_type=None):
'Gets the name list of supported simulator device types of given OS type.\n\n If os_type is not provided, it will return all supported simulator device\n types. The names are got from command result of `xcrun simctl list devices`.\n So some simulator device types\' names may be different in different Xcode.\n E.g., the name of iPad Pro (12.9-inch) in Xcode 7.2.1 is "iPad Pro", but it is\n "iPad Pro (12.9-inch)" in Xcode 8+.\n\n Args:\n os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,\n watchOS, tvOS.\n\n Returns:\n a list of string, each item is a simulator device type.\n E.g., ["iPhone 5", "iPhone 6 Plus"]\n '
sim_types_infos_json = json.loads(RunSimctlCommand(('xcrun', 'simctl', 'list', 'devicetypes', '-j')))
sim_types = []
for sim_types_info in sim_types_infos_json['devicetypes']:
sim_type = sim_types_info['name']
if ((os_type is None) or ((os_type == ios_constants.OS.IOS) and sim_type.startswith('i')) or ((os_type == ios_constants.OS.TVOS) and ('TV' in sim_type)) or ((os_type == ios_constants.OS.WATCHOS) and ('Watch' in sim_type))):
sim_types.append(sim_type)
return sim_types
| 1,543,619,953,991,501,300
|
Gets the name list of supported simulator device types of given OS type.
If os_type is not provided, it will return all supported simulator device
types. The names are got from command result of `xcrun simctl list devices`.
So some simulator device types' names may be different in different Xcode.
E.g., the name of iPad Pro (12.9-inch) in Xcode 7.2.1 is "iPad Pro", but it is
"iPad Pro (12.9-inch)" in Xcode 8+.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
Returns:
a list of string, each item is a simulator device type.
E.g., ["iPhone 5", "iPhone 6 Plus"]
|
simulator_control/simulator_util.py
|
GetSupportedSimDeviceTypes
|
ios-bazel-users/xctestrunner
|
python
|
def GetSupportedSimDeviceTypes(os_type=None):
'Gets the name list of supported simulator device types of given OS type.\n\n If os_type is not provided, it will return all supported simulator device\n types. The names are got from command result of `xcrun simctl list devices`.\n So some simulator device types\' names may be different in different Xcode.\n E.g., the name of iPad Pro (12.9-inch) in Xcode 7.2.1 is "iPad Pro", but it is\n "iPad Pro (12.9-inch)" in Xcode 8+.\n\n Args:\n os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,\n watchOS, tvOS.\n\n Returns:\n a list of string, each item is a simulator device type.\n E.g., ["iPhone 5", "iPhone 6 Plus"]\n '
sim_types_infos_json = json.loads(RunSimctlCommand(('xcrun', 'simctl', 'list', 'devicetypes', '-j')))
sim_types = []
for sim_types_info in sim_types_infos_json['devicetypes']:
sim_type = sim_types_info['name']
if ((os_type is None) or ((os_type == ios_constants.OS.IOS) and sim_type.startswith('i')) or ((os_type == ios_constants.OS.TVOS) and ('TV' in sim_type)) or ((os_type == ios_constants.OS.WATCHOS) and ('Watch' in sim_type))):
sim_types.append(sim_type)
return sim_types
|
def GetLastSupportedIphoneSimType(os_version):
'"Gets the last supported iPhone simulator type of the given OS version.\n\n Currently, the last supported iPhone simulator type is the last iPhone from\n the output of `xcrun simctl list devicetypes`.\n\n Args:\n os_version: string, OS version of the new simulator. The format is\n {major}.{minor}, such as 9.3, 10.2.\n\n Returns:\n a string, the last supported iPhone simulator type.\n\n Raises:\n ios_errors.SimError: when there is no supported iPhone simulator type.\n '
supported_sim_types = GetSupportedSimDeviceTypes(ios_constants.OS.IOS)
supported_sim_types.reverse()
os_version_float = float(os_version)
for sim_type in supported_sim_types:
if sim_type.startswith('iPhone'):
min_os_version_float = float(simtype_profile.SimTypeProfile(sim_type).min_os_version)
if (os_version_float >= min_os_version_float):
return sim_type
raise ios_errors.SimError('Can not find supported iPhone simulator type.')
| 6,094,569,838,771,017,000
|
"Gets the last supported iPhone simulator type of the given OS version.
Currently, the last supported iPhone simulator type is the last iPhone from
the output of `xcrun simctl list devicetypes`.
Args:
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
Returns:
a string, the last supported iPhone simulator type.
Raises:
ios_errors.SimError: when there is no supported iPhone simulator type.
|
simulator_control/simulator_util.py
|
GetLastSupportedIphoneSimType
|
ios-bazel-users/xctestrunner
|
python
|
def GetLastSupportedIphoneSimType(os_version):
'"Gets the last supported iPhone simulator type of the given OS version.\n\n Currently, the last supported iPhone simulator type is the last iPhone from\n the output of `xcrun simctl list devicetypes`.\n\n Args:\n os_version: string, OS version of the new simulator. The format is\n {major}.{minor}, such as 9.3, 10.2.\n\n Returns:\n a string, the last supported iPhone simulator type.\n\n Raises:\n ios_errors.SimError: when there is no supported iPhone simulator type.\n '
supported_sim_types = GetSupportedSimDeviceTypes(ios_constants.OS.IOS)
supported_sim_types.reverse()
os_version_float = float(os_version)
for sim_type in supported_sim_types:
if sim_type.startswith('iPhone'):
min_os_version_float = float(simtype_profile.SimTypeProfile(sim_type).min_os_version)
if (os_version_float >= min_os_version_float):
return sim_type
raise ios_errors.SimError('Can not find supported iPhone simulator type.')
|
def GetSupportedSimOsVersions(os_type=ios_constants.OS.IOS):
'Gets the supported version of given simulator OS type.\n\n Args:\n os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,\n watchOS, tvOS.\n\n Returns:\n a list of string, each item is an OS version number. E.g., ["10.1", "11.0"]\n '
if (os_type is None):
os_type = ios_constants.OS.IOS
xcode_version_num = xcode_info_util.GetXcodeVersionNumber()
sim_runtime_infos_json = json.loads(RunSimctlCommand(('xcrun', 'simctl', 'list', 'runtimes', '-j')))
sim_versions = []
for sim_runtime_info in sim_runtime_infos_json['runtimes']:
if (('availability' in sim_runtime_info) and (sim_runtime_info['availability'].find('unavailable') >= 0)):
continue
elif (('isAvailable' in sim_runtime_info) and (not sim_runtime_info['isAvailable'])):
continue
(listed_os_type, listed_os_version) = sim_runtime_info['name'].split(' ', 1)
if (listed_os_type == os_type):
if ('bundlePath' in sim_runtime_info):
runtime_path = sim_runtime_info['bundlePath']
info_plist_object = plist_util.Plist(os.path.join(runtime_path, 'Contents/Info.plist'))
min_xcode_version_num = int(info_plist_object.GetPlistField('DTXcode'))
if (xcode_version_num >= min_xcode_version_num):
sim_versions.append(listed_os_version)
else:
if (os_type == ios_constants.OS.IOS):
(ios_major_version, ios_minor_version) = listed_os_version.split('.', 1)
ios_minor_version = ios_minor_version[0]
ios_version_num = ((int(ios_major_version) * 100) + (int(ios_minor_version) * 10))
if (ios_version_num > (xcode_version_num + 200)):
continue
sim_versions.append(listed_os_version)
return sim_versions
| 2,632,829,651,962,133,500
|
Gets the supported version of given simulator OS type.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
Returns:
a list of string, each item is an OS version number. E.g., ["10.1", "11.0"]
|
simulator_control/simulator_util.py
|
GetSupportedSimOsVersions
|
ios-bazel-users/xctestrunner
|
python
|
def GetSupportedSimOsVersions(os_type=ios_constants.OS.IOS):
'Gets the supported version of given simulator OS type.\n\n Args:\n os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,\n watchOS, tvOS.\n\n Returns:\n a list of string, each item is an OS version number. E.g., ["10.1", "11.0"]\n '
if (os_type is None):
os_type = ios_constants.OS.IOS
xcode_version_num = xcode_info_util.GetXcodeVersionNumber()
sim_runtime_infos_json = json.loads(RunSimctlCommand(('xcrun', 'simctl', 'list', 'runtimes', '-j')))
sim_versions = []
for sim_runtime_info in sim_runtime_infos_json['runtimes']:
if (('availability' in sim_runtime_info) and (sim_runtime_info['availability'].find('unavailable') >= 0)):
continue
elif (('isAvailable' in sim_runtime_info) and (not sim_runtime_info['isAvailable'])):
continue
(listed_os_type, listed_os_version) = sim_runtime_info['name'].split(' ', 1)
if (listed_os_type == os_type):
if ('bundlePath' in sim_runtime_info):
runtime_path = sim_runtime_info['bundlePath']
info_plist_object = plist_util.Plist(os.path.join(runtime_path, 'Contents/Info.plist'))
min_xcode_version_num = int(info_plist_object.GetPlistField('DTXcode'))
if (xcode_version_num >= min_xcode_version_num):
sim_versions.append(listed_os_version)
else:
if (os_type == ios_constants.OS.IOS):
(ios_major_version, ios_minor_version) = listed_os_version.split('.', 1)
ios_minor_version = ios_minor_version[0]
ios_version_num = ((int(ios_major_version) * 100) + (int(ios_minor_version) * 10))
if (ios_version_num > (xcode_version_num + 200)):
continue
sim_versions.append(listed_os_version)
return sim_versions
|
def GetLastSupportedSimOsVersion(os_type=ios_constants.OS.IOS, device_type=None):
'Gets the last supported version of given arguments.\n\n If device_type is given, will return the last supported OS version of the\n device type. Otherwise, will return the last supported OS version of the\n OS type.\n\n Args:\n os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,\n watchOS, tvOS.\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n\n Returns:\n a string, the last supported version.\n\n Raises:\n ios_errors.SimError: when there is no supported OS version of the given OS.\n ios_errors.IllegalArgumentError: when the supported OS version can not match\n the given simulator type.\n '
supported_os_versions = GetSupportedSimOsVersions(os_type)
if (not supported_os_versions):
raise ios_errors.SimError(('Can not find supported OS version of %s.' % os_type))
if (not device_type):
return supported_os_versions[(- 1)]
simtype_max_os_version_float = float(simtype_profile.SimTypeProfile(device_type).max_os_version)
supported_os_versions.reverse()
for os_version in supported_os_versions:
if (float(os_version) <= simtype_max_os_version_float):
return os_version
if (not supported_os_versions):
raise ios_errors.IllegalArgumentError(('The supported OS version %s can not match simulator type %s. Because its max OS version is %s' % (supported_os_versions, device_type, simtype_max_os_version_float)))
| 6,682,635,599,485,531,000
|
Gets the last supported version of given arguments.
If device_type is given, will return the last supported OS version of the
device type. Otherwise, will return the last supported OS version of the
OS type.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Returns:
a string, the last supported version.
Raises:
ios_errors.SimError: when there is no supported OS version of the given OS.
ios_errors.IllegalArgumentError: when the supported OS version can not match
the given simulator type.
|
simulator_control/simulator_util.py
|
GetLastSupportedSimOsVersion
|
ios-bazel-users/xctestrunner
|
python
|
def GetLastSupportedSimOsVersion(os_type=ios_constants.OS.IOS, device_type=None):
'Gets the last supported version of given arguments.\n\n If device_type is given, will return the last supported OS version of the\n device type. Otherwise, will return the last supported OS version of the\n OS type.\n\n Args:\n os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,\n watchOS, tvOS.\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n\n Returns:\n a string, the last supported version.\n\n Raises:\n ios_errors.SimError: when there is no supported OS version of the given OS.\n ios_errors.IllegalArgumentError: when the supported OS version can not match\n the given simulator type.\n '
supported_os_versions = GetSupportedSimOsVersions(os_type)
if (not supported_os_versions):
raise ios_errors.SimError(('Can not find supported OS version of %s.' % os_type))
if (not device_type):
return supported_os_versions[(- 1)]
simtype_max_os_version_float = float(simtype_profile.SimTypeProfile(device_type).max_os_version)
supported_os_versions.reverse()
for os_version in supported_os_versions:
if (float(os_version) <= simtype_max_os_version_float):
return os_version
if (not supported_os_versions):
raise ios_errors.IllegalArgumentError(('The supported OS version %s can not match simulator type %s. Because its max OS version is %s' % (supported_os_versions, device_type, simtype_max_os_version_float)))
|
def GetOsType(device_type):
'Gets the OS type of the given simulator.\n\n This method can not work fine if the device_type is invalid. Please calls\n simulator_util.ValidateSimulatorType(device_type, os_version) to validate\n it first.\n\n Args:\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n\n Returns:\n shared.ios_constants.OS.\n\n Raises:\n ios_errors.IllegalArgumentError: when the OS type of the given simulator\n device type can not be recognized.\n '
if device_type.startswith('i'):
return ios_constants.OS.IOS
if ('TV' in device_type):
return ios_constants.OS.TVOS
if ('Watch' in device_type):
return ios_constants.OS.WATCHOS
raise ios_errors.IllegalArgumentError(('Failed to recognize the os type for simulator device type %s.' % device_type))
| 6,833,521,821,800,253,000
|
Gets the OS type of the given simulator.
This method can not work fine if the device_type is invalid. Please calls
simulator_util.ValidateSimulatorType(device_type, os_version) to validate
it first.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Returns:
shared.ios_constants.OS.
Raises:
ios_errors.IllegalArgumentError: when the OS type of the given simulator
device type can not be recognized.
|
simulator_control/simulator_util.py
|
GetOsType
|
ios-bazel-users/xctestrunner
|
python
|
def GetOsType(device_type):
'Gets the OS type of the given simulator.\n\n This method can not work fine if the device_type is invalid. Please calls\n simulator_util.ValidateSimulatorType(device_type, os_version) to validate\n it first.\n\n Args:\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n\n Returns:\n shared.ios_constants.OS.\n\n Raises:\n ios_errors.IllegalArgumentError: when the OS type of the given simulator\n device type can not be recognized.\n '
if device_type.startswith('i'):
return ios_constants.OS.IOS
if ('TV' in device_type):
return ios_constants.OS.TVOS
if ('Watch' in device_type):
return ios_constants.OS.WATCHOS
raise ios_errors.IllegalArgumentError(('Failed to recognize the os type for simulator device type %s.' % device_type))
|
def _ValidateSimulatorType(device_type):
'Checks if the simulator type is valid.\n\n Args:\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n\n Raises:\n ios_errors.IllegalArgumentError: when the given simulator device type is\n invalid.\n '
supported_sim_device_types = GetSupportedSimDeviceTypes()
if (device_type not in supported_sim_device_types):
raise ios_errors.IllegalArgumentError(('The simulator device type %s is not supported. Supported simulator device types are %s.' % (device_type, supported_sim_device_types)))
| -6,484,406,973,542,943,000
|
Checks if the simulator type is valid.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Raises:
ios_errors.IllegalArgumentError: when the given simulator device type is
invalid.
|
simulator_control/simulator_util.py
|
_ValidateSimulatorType
|
ios-bazel-users/xctestrunner
|
python
|
def _ValidateSimulatorType(device_type):
'Checks if the simulator type is valid.\n\n Args:\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n\n Raises:\n ios_errors.IllegalArgumentError: when the given simulator device type is\n invalid.\n '
supported_sim_device_types = GetSupportedSimDeviceTypes()
if (device_type not in supported_sim_device_types):
raise ios_errors.IllegalArgumentError(('The simulator device type %s is not supported. Supported simulator device types are %s.' % (device_type, supported_sim_device_types)))
|
def _ValidateSimulatorTypeWithOsVersion(device_type, os_version):
'Checks if the simulator type with the given os version is valid.\n\n Args:\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n os_version: string, OS version of the new simulator. The format is\n {major}.{minor}, such as 9.3, 10.2.\n\n Raises:\n ios_errors.IllegalArgumentError: when the given simulator device type can\n not match the given OS version.\n '
os_version_float = float(os_version)
sim_profile = simtype_profile.SimTypeProfile(device_type)
min_os_version_float = float(sim_profile.min_os_version)
if (min_os_version_float > os_version_float):
raise ios_errors.IllegalArgumentError(('The min OS version of %s is %s. But current OS version is %s' % (device_type, min_os_version_float, os_version)))
max_os_version_float = float(sim_profile.max_os_version)
if (max_os_version_float < os_version_float):
raise ios_errors.IllegalArgumentError(('The max OS version of %s is %s. But current OS version is %s' % (device_type, max_os_version_float, os_version)))
| -4,512,402,697,419,481,600
|
Checks if the simulator type with the given os version is valid.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
Raises:
ios_errors.IllegalArgumentError: when the given simulator device type can
not match the given OS version.
|
simulator_control/simulator_util.py
|
_ValidateSimulatorTypeWithOsVersion
|
ios-bazel-users/xctestrunner
|
python
|
def _ValidateSimulatorTypeWithOsVersion(device_type, os_version):
'Checks if the simulator type with the given os version is valid.\n\n Args:\n device_type: string, device type of the new simulator. The value corresponds\n to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad\n Air, etc.\n os_version: string, OS version of the new simulator. The format is\n {major}.{minor}, such as 9.3, 10.2.\n\n Raises:\n ios_errors.IllegalArgumentError: when the given simulator device type can\n not match the given OS version.\n '
os_version_float = float(os_version)
sim_profile = simtype_profile.SimTypeProfile(device_type)
min_os_version_float = float(sim_profile.min_os_version)
if (min_os_version_float > os_version_float):
raise ios_errors.IllegalArgumentError(('The min OS version of %s is %s. But current OS version is %s' % (device_type, min_os_version_float, os_version)))
max_os_version_float = float(sim_profile.max_os_version)
if (max_os_version_float < os_version_float):
raise ios_errors.IllegalArgumentError(('The max OS version of %s is %s. But current OS version is %s' % (device_type, max_os_version_float, os_version)))
|
def QuitSimulatorApp():
'Quits the Simulator.app.'
if (xcode_info_util.GetXcodeVersionNumber() >= 700):
simulator_name = 'Simulator'
else:
simulator_name = 'iOS Simulator'
subprocess.Popen(['killall', simulator_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
| 1,200,886,455,530,469,000
|
Quits the Simulator.app.
|
simulator_control/simulator_util.py
|
QuitSimulatorApp
|
ios-bazel-users/xctestrunner
|
python
|
def QuitSimulatorApp():
if (xcode_info_util.GetXcodeVersionNumber() >= 700):
simulator_name = 'Simulator'
else:
simulator_name = 'iOS Simulator'
subprocess.Popen(['killall', simulator_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
def IsAppFailedToLaunchOnSim(sim_sys_log, app_bundle_id=''):
"Checks if the app failed to launch on simulator.\n\n If app_bundle_id is not provided, will check if any UIKitApplication failed\n to launch on simulator.\n\n Args:\n sim_sys_log: string, the content of the simulator's system.log.\n app_bundle_id: string, the bundle id of the app.\n\n Returns:\n True if the app failed to launch on simulator.\n "
pattern = re.compile((_PATTERN_APP_CRASH_ON_SIM % app_bundle_id))
return (pattern.search(sim_sys_log) is not None)
| -2,801,166,854,162,089,000
|
Checks if the app failed to launch on simulator.
If app_bundle_id is not provided, will check if any UIKitApplication failed
to launch on simulator.
Args:
sim_sys_log: string, the content of the simulator's system.log.
app_bundle_id: string, the bundle id of the app.
Returns:
True if the app failed to launch on simulator.
|
simulator_control/simulator_util.py
|
IsAppFailedToLaunchOnSim
|
ios-bazel-users/xctestrunner
|
python
|
def IsAppFailedToLaunchOnSim(sim_sys_log, app_bundle_id=):
"Checks if the app failed to launch on simulator.\n\n If app_bundle_id is not provided, will check if any UIKitApplication failed\n to launch on simulator.\n\n Args:\n sim_sys_log: string, the content of the simulator's system.log.\n app_bundle_id: string, the bundle id of the app.\n\n Returns:\n True if the app failed to launch on simulator.\n "
pattern = re.compile((_PATTERN_APP_CRASH_ON_SIM % app_bundle_id))
return (pattern.search(sim_sys_log) is not None)
|
def IsXctestFailedToLaunchOnSim(sim_sys_log):
"Checks if the xctest process failed to launch on simulator.\n\n Args:\n sim_sys_log: string, the content of the simulator's system.log.\n\n Returns:\n True if the xctest process failed to launch on simulator.\n "
pattern = re.compile(_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM)
return (pattern.search(sim_sys_log) is not None)
| -1,712,033,317,035,671,000
|
Checks if the xctest process failed to launch on simulator.
Args:
sim_sys_log: string, the content of the simulator's system.log.
Returns:
True if the xctest process failed to launch on simulator.
|
simulator_control/simulator_util.py
|
IsXctestFailedToLaunchOnSim
|
ios-bazel-users/xctestrunner
|
python
|
def IsXctestFailedToLaunchOnSim(sim_sys_log):
"Checks if the xctest process failed to launch on simulator.\n\n Args:\n sim_sys_log: string, the content of the simulator's system.log.\n\n Returns:\n True if the xctest process failed to launch on simulator.\n "
pattern = re.compile(_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM)
return (pattern.search(sim_sys_log) is not None)
|
def IsCoreSimulatorCrash(sim_sys_log):
"Checks if CoreSimulator crashes.\n\n Args:\n sim_sys_log: string, the content of the simulator's system.log.\n\n Returns:\n True if the CoreSimulator crashes.\n "
pattern = re.compile(_PATTERN_CORESIMULATOR_CRASH)
return (pattern.search(sim_sys_log) is not None)
| 7,590,567,797,334,453,000
|
Checks if CoreSimulator crashes.
Args:
sim_sys_log: string, the content of the simulator's system.log.
Returns:
True if the CoreSimulator crashes.
|
simulator_control/simulator_util.py
|
IsCoreSimulatorCrash
|
ios-bazel-users/xctestrunner
|
python
|
def IsCoreSimulatorCrash(sim_sys_log):
"Checks if CoreSimulator crashes.\n\n Args:\n sim_sys_log: string, the content of the simulator's system.log.\n\n Returns:\n True if the CoreSimulator crashes.\n "
pattern = re.compile(_PATTERN_CORESIMULATOR_CRASH)
return (pattern.search(sim_sys_log) is not None)
|
def RunSimctlCommand(command):
'Runs simctl command.'
for i in range(_SIMCTL_MAX_ATTEMPTS):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
if (ios_constants.CORESIMULATOR_CHANGE_ERROR in stderr):
output = stdout
else:
output = '\n'.join([stdout, stderr])
output = output.strip()
if (process.poll() != 0):
if ((i < (_SIMCTL_MAX_ATTEMPTS - 1)) and (ios_constants.CORESIMULATOR_INTERRUPTED_ERROR in output)):
continue
raise ios_errors.SimError(output)
return output
| -4,057,334,053,992,785,400
|
Runs simctl command.
|
simulator_control/simulator_util.py
|
RunSimctlCommand
|
ios-bazel-users/xctestrunner
|
python
|
def RunSimctlCommand(command):
for i in range(_SIMCTL_MAX_ATTEMPTS):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
if (ios_constants.CORESIMULATOR_CHANGE_ERROR in stderr):
output = stdout
else:
output = '\n'.join([stdout, stderr])
output = output.strip()
if (process.poll() != 0):
if ((i < (_SIMCTL_MAX_ATTEMPTS - 1)) and (ios_constants.CORESIMULATOR_INTERRUPTED_ERROR in output)):
continue
raise ios_errors.SimError(output)
return output
|
def __init__(self, simulator_id):
'Constructor of Simulator object.\n\n Args:\n simulator_id: string, the identity of the simulator.\n '
self._simulator_id = simulator_id
self._simulator_root_dir = None
self._simulator_log_root_dir = None
self._device_plist_object = None
| -7,640,569,958,608,737,000
|
Constructor of Simulator object.
Args:
simulator_id: string, the identity of the simulator.
|
simulator_control/simulator_util.py
|
__init__
|
ios-bazel-users/xctestrunner
|
python
|
def __init__(self, simulator_id):
'Constructor of Simulator object.\n\n Args:\n simulator_id: string, the identity of the simulator.\n '
self._simulator_id = simulator_id
self._simulator_root_dir = None
self._simulator_log_root_dir = None
self._device_plist_object = None
|
@property
def simulator_root_dir(self):
"Gets the simulator's root directory."
if (not self._simulator_root_dir):
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_root_dir = os.path.join(('%s/Library/Developer/CoreSimulator/Devices/%s' % (home_dir, self.simulator_id)))
return self._simulator_root_dir
| -8,339,548,879,086,290,000
|
Gets the simulator's root directory.
|
simulator_control/simulator_util.py
|
simulator_root_dir
|
ios-bazel-users/xctestrunner
|
python
|
@property
def simulator_root_dir(self):
if (not self._simulator_root_dir):
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_root_dir = os.path.join(('%s/Library/Developer/CoreSimulator/Devices/%s' % (home_dir, self.simulator_id)))
return self._simulator_root_dir
|
@property
def simulator_log_root_dir(self):
"Gets the root directory of the simulator's logs."
if (not self._simulator_log_root_dir):
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_log_root_dir = os.path.join(('%s/Library/Logs/CoreSimulator/%s' % (home_dir, self.simulator_id)))
return self._simulator_log_root_dir
| -2,731,196,311,688,810,500
|
Gets the root directory of the simulator's logs.
|
simulator_control/simulator_util.py
|
simulator_log_root_dir
|
ios-bazel-users/xctestrunner
|
python
|
@property
def simulator_log_root_dir(self):
if (not self._simulator_log_root_dir):
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_log_root_dir = os.path.join(('%s/Library/Logs/CoreSimulator/%s' % (home_dir, self.simulator_id)))
return self._simulator_log_root_dir
|
@property
def device_plist_object(self):
'Gets the plist_util.Plist object of device.plist of the simulator.\n\n Returns:\n a plist_util.Plist object of device.plist of the simulator or None when\n the simulator does not exist or is being created.\n '
if (not self._device_plist_object):
device_plist_path = os.path.join(self.simulator_root_dir, 'device.plist')
if (not os.path.exists(device_plist_path)):
return None
self._device_plist_object = plist_util.Plist(device_plist_path)
return self._device_plist_object
| -7,964,115,949,617,574,000
|
Gets the plist_util.Plist object of device.plist of the simulator.
Returns:
a plist_util.Plist object of device.plist of the simulator or None when
the simulator does not exist or is being created.
|
simulator_control/simulator_util.py
|
device_plist_object
|
ios-bazel-users/xctestrunner
|
python
|
@property
def device_plist_object(self):
'Gets the plist_util.Plist object of device.plist of the simulator.\n\n Returns:\n a plist_util.Plist object of device.plist of the simulator or None when\n the simulator does not exist or is being created.\n '
if (not self._device_plist_object):
device_plist_path = os.path.join(self.simulator_root_dir, 'device.plist')
if (not os.path.exists(device_plist_path)):
return None
self._device_plist_object = plist_util.Plist(device_plist_path)
return self._device_plist_object
|
def Shutdown(self):
'Shuts down the simulator.'
sim_state = self.GetSimulatorState()
if (sim_state == ios_constants.SimState.SHUTDOWN):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
if (sim_state == ios_constants.SimState.CREATING):
raise ios_errors.SimError('Can not shut down the simulator in state CREATING.')
logging.info('Shutting down simulator %s.', self.simulator_id)
try:
RunSimctlCommand(['xcrun', 'simctl', 'shutdown', self.simulator_id])
except ios_errors.SimError as e:
if ('Unable to shutdown device in current state: Shutdown' in str(e)):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
raise ios_errors.SimError(('Failed to shutdown simulator %s: %s' % (self.simulator_id, str(e))))
self.WaitUntilStateShutdown()
logging.info('Shut down simulator %s.', self.simulator_id)
| -2,268,752,979,446,568,400
|
Shuts down the simulator.
|
simulator_control/simulator_util.py
|
Shutdown
|
ios-bazel-users/xctestrunner
|
python
|
def Shutdown(self):
sim_state = self.GetSimulatorState()
if (sim_state == ios_constants.SimState.SHUTDOWN):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
if (sim_state == ios_constants.SimState.CREATING):
raise ios_errors.SimError('Can not shut down the simulator in state CREATING.')
logging.info('Shutting down simulator %s.', self.simulator_id)
try:
RunSimctlCommand(['xcrun', 'simctl', 'shutdown', self.simulator_id])
except ios_errors.SimError as e:
if ('Unable to shutdown device in current state: Shutdown' in str(e)):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
raise ios_errors.SimError(('Failed to shutdown simulator %s: %s' % (self.simulator_id, str(e))))
self.WaitUntilStateShutdown()
logging.info('Shut down simulator %s.', self.simulator_id)
|
def Delete(self):
"Deletes the simulator asynchronously.\n\n The simulator state should be SHUTDOWN when deleting it. Otherwise, it will\n raise exception.\n\n Raises:\n ios_errors.SimError: The simulator's state is not SHUTDOWN.\n "
if (xcode_info_util.GetXcodeVersionNumber() < 900):
sim_state = self.GetSimulatorState()
if (sim_state != ios_constants.SimState.SHUTDOWN):
raise ios_errors.SimError(('Can only delete the simulator with state SHUTDOWN. The current state of simulator %s is %s.' % (self._simulator_id, sim_state)))
logging.info('Deleting simulator %s asynchronously.', self.simulator_id)
subprocess.Popen(['xcrun', 'simctl', 'delete', self.simulator_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setpgrp)
if os.path.exists(self.simulator_log_root_dir):
shutil.rmtree(self.simulator_log_root_dir, ignore_errors=True)
self._simulator_id = None
| 4,910,870,316,368,934,000
|
Deletes the simulator asynchronously.
The simulator state should be SHUTDOWN when deleting it. Otherwise, it will
raise exception.
Raises:
ios_errors.SimError: The simulator's state is not SHUTDOWN.
|
simulator_control/simulator_util.py
|
Delete
|
ios-bazel-users/xctestrunner
|
python
|
def Delete(self):
"Deletes the simulator asynchronously.\n\n The simulator state should be SHUTDOWN when deleting it. Otherwise, it will\n raise exception.\n\n Raises:\n ios_errors.SimError: The simulator's state is not SHUTDOWN.\n "
if (xcode_info_util.GetXcodeVersionNumber() < 900):
sim_state = self.GetSimulatorState()
if (sim_state != ios_constants.SimState.SHUTDOWN):
raise ios_errors.SimError(('Can only delete the simulator with state SHUTDOWN. The current state of simulator %s is %s.' % (self._simulator_id, sim_state)))
logging.info('Deleting simulator %s asynchronously.', self.simulator_id)
subprocess.Popen(['xcrun', 'simctl', 'delete', self.simulator_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setpgrp)
if os.path.exists(self.simulator_log_root_dir):
shutil.rmtree(self.simulator_log_root_dir, ignore_errors=True)
self._simulator_id = None
|
def FetchLogToFile(self, output_file_path, start_time=None, end_time=None):
'Gets simulator log via running `log` tool on simulator.\n\n Args:\n output_file_path: string, the path of the stdout file.\n start_time: datetime, the start time of the simulatro log.\n end_time: datetime, the end time of the simulatro log.\n '
command = ['xcrun', 'simctl', 'spawn', self._simulator_id, 'log', 'show', '--style', 'syslog']
if start_time:
command.extend(('--start', start_time.strftime('%Y-%m-%d %H:%M:%S')))
if end_time:
command.extend(('--end', end_time.strftime('%Y-%m-%d %H:%M:%S')))
with open(output_file_path, 'w') as stdout_file:
try:
subprocess.Popen(command, stdout=stdout_file, stderr=subprocess.STDOUT)
except ios_errors.SimError as e:
raise ios_errors.SimError(('Failed to get log on simulator %s: %s' % (self.simulator_id, str(e))))
| -3,275,341,676,593,996,000
|
Gets simulator log via running `log` tool on simulator.
Args:
output_file_path: string, the path of the stdout file.
start_time: datetime, the start time of the simulatro log.
end_time: datetime, the end time of the simulatro log.
|
simulator_control/simulator_util.py
|
FetchLogToFile
|
ios-bazel-users/xctestrunner
|
python
|
def FetchLogToFile(self, output_file_path, start_time=None, end_time=None):
'Gets simulator log via running `log` tool on simulator.\n\n Args:\n output_file_path: string, the path of the stdout file.\n start_time: datetime, the start time of the simulatro log.\n end_time: datetime, the end time of the simulatro log.\n '
command = ['xcrun', 'simctl', 'spawn', self._simulator_id, 'log', 'show', '--style', 'syslog']
if start_time:
command.extend(('--start', start_time.strftime('%Y-%m-%d %H:%M:%S')))
if end_time:
command.extend(('--end', end_time.strftime('%Y-%m-%d %H:%M:%S')))
with open(output_file_path, 'w') as stdout_file:
try:
subprocess.Popen(command, stdout=stdout_file, stderr=subprocess.STDOUT)
except ios_errors.SimError as e:
raise ios_errors.SimError(('Failed to get log on simulator %s: %s' % (self.simulator_id, str(e))))
|
def GetAppDocumentsPath(self, app_bundle_id):
"Gets the path of the app's Documents directory."
if (xcode_info_util.GetXcodeVersionNumber() >= 830):
try:
app_data_container = RunSimctlCommand(['xcrun', 'simctl', 'get_app_container', self._simulator_id, app_bundle_id, 'data'])
return os.path.join(app_data_container, 'Documents')
except ios_errors.SimError as e:
raise ios_errors.SimError(('Failed to get data container of the app %s in simulator %s: %s' % (app_bundle_id, self._simulator_id, str(e))))
apps_dir = os.path.join(self.simulator_root_dir, 'data/Containers/Data/Application')
for sub_dir_name in os.listdir(apps_dir):
container_manager_plist = plist_util.Plist(os.path.join(apps_dir, sub_dir_name, '.com.apple.mobile_container_manager.metadata.plist'))
current_app_bundle_id = container_manager_plist.GetPlistField('MCMMetadataIdentifier')
if (current_app_bundle_id == app_bundle_id):
return os.path.join(apps_dir, sub_dir_name, 'Documents')
raise ios_errors.SimError(('Failed to get Documents directory of the app %s in simulator %s' % (app_bundle_id, self._simulator_id)))
| -5,155,790,081,601,089,000
|
Gets the path of the app's Documents directory.
|
simulator_control/simulator_util.py
|
GetAppDocumentsPath
|
ios-bazel-users/xctestrunner
|
python
|
def GetAppDocumentsPath(self, app_bundle_id):
if (xcode_info_util.GetXcodeVersionNumber() >= 830):
try:
app_data_container = RunSimctlCommand(['xcrun', 'simctl', 'get_app_container', self._simulator_id, app_bundle_id, 'data'])
return os.path.join(app_data_container, 'Documents')
except ios_errors.SimError as e:
raise ios_errors.SimError(('Failed to get data container of the app %s in simulator %s: %s' % (app_bundle_id, self._simulator_id, str(e))))
apps_dir = os.path.join(self.simulator_root_dir, 'data/Containers/Data/Application')
for sub_dir_name in os.listdir(apps_dir):
container_manager_plist = plist_util.Plist(os.path.join(apps_dir, sub_dir_name, '.com.apple.mobile_container_manager.metadata.plist'))
current_app_bundle_id = container_manager_plist.GetPlistField('MCMMetadataIdentifier')
if (current_app_bundle_id == app_bundle_id):
return os.path.join(apps_dir, sub_dir_name, 'Documents')
raise ios_errors.SimError(('Failed to get Documents directory of the app %s in simulator %s' % (app_bundle_id, self._simulator_id)))
|
def IsAppInstalled(self, app_bundle_id):
'Checks if the simulator has installed the app with given bundle id.'
try:
RunSimctlCommand(['xcrun', 'simctl', 'get_app_container', self._simulator_id, app_bundle_id])
return True
except ios_errors.SimError:
return False
| 3,805,973,177,728,198,700
|
Checks if the simulator has installed the app with given bundle id.
|
simulator_control/simulator_util.py
|
IsAppInstalled
|
ios-bazel-users/xctestrunner
|
python
|
def IsAppInstalled(self, app_bundle_id):
try:
RunSimctlCommand(['xcrun', 'simctl', 'get_app_container', self._simulator_id, app_bundle_id])
return True
except ios_errors.SimError:
return False
|
def WaitUntilStateShutdown(self, timeout_sec=_SIMULATOR_SHUTDOWN_TIMEOUT_SEC):
'Waits until the simulator state becomes SHUTDOWN.\n\n Args:\n timeout_sec: int, timeout of waiting simulator state for becoming SHUTDOWN\n in seconds.\n\n Raises:\n ios_errors.SimError: when it is timeout to wait the simulator state\n becomes SHUTDOWN.\n '
start_time = time.time()
while ((start_time + timeout_sec) >= time.time()):
if (self.GetSimulatorState() == ios_constants.SimState.SHUTDOWN):
return
time.sleep(_SIM_CHECK_STATE_INTERVAL_SEC)
raise ios_errors.SimError(('Timeout to wait for simulator shutdown in %ss.' % timeout_sec))
| -6,328,015,680,993,274,000
|
Waits until the simulator state becomes SHUTDOWN.
Args:
timeout_sec: int, timeout of waiting simulator state for becoming SHUTDOWN
in seconds.
Raises:
ios_errors.SimError: when it is timeout to wait the simulator state
becomes SHUTDOWN.
|
simulator_control/simulator_util.py
|
WaitUntilStateShutdown
|
ios-bazel-users/xctestrunner
|
python
|
def WaitUntilStateShutdown(self, timeout_sec=_SIMULATOR_SHUTDOWN_TIMEOUT_SEC):
'Waits until the simulator state becomes SHUTDOWN.\n\n Args:\n timeout_sec: int, timeout of waiting simulator state for becoming SHUTDOWN\n in seconds.\n\n Raises:\n ios_errors.SimError: when it is timeout to wait the simulator state\n becomes SHUTDOWN.\n '
start_time = time.time()
while ((start_time + timeout_sec) >= time.time()):
if (self.GetSimulatorState() == ios_constants.SimState.SHUTDOWN):
return
time.sleep(_SIM_CHECK_STATE_INTERVAL_SEC)
raise ios_errors.SimError(('Timeout to wait for simulator shutdown in %ss.' % timeout_sec))
|
def GetSimulatorState(self):
'Gets the state of the simulator in real time.\n\n Returns:\n shared.ios_constants.SimState, the state of the simulator.\n\n Raises:\n ios_errors.SimError: The state can not be recognized.\n '
if (self.device_plist_object is None):
return ios_constants.SimState.CREATING
state_num = self.device_plist_object.GetPlistField('state')
if (state_num not in _SIMULATOR_STATES_MAPPING.keys()):
logging.warning('The state %s of simulator %s can not be recognized.', state_num, self.simulator_id)
return ios_constants.SimState.UNKNOWN
return _SIMULATOR_STATES_MAPPING[state_num]
| 3,246,234,434,032,654,000
|
Gets the state of the simulator in real time.
Returns:
shared.ios_constants.SimState, the state of the simulator.
Raises:
ios_errors.SimError: The state can not be recognized.
|
simulator_control/simulator_util.py
|
GetSimulatorState
|
ios-bazel-users/xctestrunner
|
python
|
def GetSimulatorState(self):
'Gets the state of the simulator in real time.\n\n Returns:\n shared.ios_constants.SimState, the state of the simulator.\n\n Raises:\n ios_errors.SimError: The state can not be recognized.\n '
if (self.device_plist_object is None):
return ios_constants.SimState.CREATING
state_num = self.device_plist_object.GetPlistField('state')
if (state_num not in _SIMULATOR_STATES_MAPPING.keys()):
logging.warning('The state %s of simulator %s can not be recognized.', state_num, self.simulator_id)
return ios_constants.SimState.UNKNOWN
return _SIMULATOR_STATES_MAPPING[state_num]
|
def __init__(self, i2c, device_address):
'\n Try to read a byte from an address,\n if you get an OSError it means the device is not there\n '
while (not i2c.try_lock()):
pass
try:
i2c.writeto(device_address, b'')
except OSError:
try:
result = bytearray(1)
i2c.readfrom_into(device_address, result)
except OSError:
raise ValueError(('No I2C device at address: %x' % device_address))
finally:
i2c.unlock()
self.i2c = i2c
self.device_address = device_address
| -5,941,914,171,103,282,000
|
Try to read a byte from an address,
if you get an OSError it means the device is not there
|
adafruit_bus_device/i2c_device.py
|
__init__
|
rhthomas/Adafruit_CircuitPython_NRF24L01
|
python
|
def __init__(self, i2c, device_address):
'\n Try to read a byte from an address,\n if you get an OSError it means the device is not there\n '
while (not i2c.try_lock()):
pass
try:
i2c.writeto(device_address, b)
except OSError:
try:
result = bytearray(1)
i2c.readfrom_into(device_address, result)
except OSError:
raise ValueError(('No I2C device at address: %x' % device_address))
finally:
i2c.unlock()
self.i2c = i2c
self.device_address = device_address
|
def readinto(self, buf, **kwargs):
'\n Read into ``buf`` from the device. The number of bytes read will be the\n length of ``buf``.\n\n If ``start`` or ``end`` is provided, then the buffer will be sliced\n as if ``buf[start:end]``. This will not cause an allocation like\n ``buf[start:end]`` will so it saves memory.\n\n :param bytearray buffer: buffer to write into\n :param int start: Index to start writing at\n :param int end: Index to write up to but not include\n '
self.i2c.readfrom_into(self.device_address, buf, **kwargs)
| -1,874,825,497,476,106,500
|
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buffer: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
|
adafruit_bus_device/i2c_device.py
|
readinto
|
rhthomas/Adafruit_CircuitPython_NRF24L01
|
python
|
def readinto(self, buf, **kwargs):
'\n Read into ``buf`` from the device. The number of bytes read will be the\n length of ``buf``.\n\n If ``start`` or ``end`` is provided, then the buffer will be sliced\n as if ``buf[start:end]``. This will not cause an allocation like\n ``buf[start:end]`` will so it saves memory.\n\n :param bytearray buffer: buffer to write into\n :param int start: Index to start writing at\n :param int end: Index to write up to but not include\n '
self.i2c.readfrom_into(self.device_address, buf, **kwargs)
|
def write(self, buf, **kwargs):
'\n Write the bytes from ``buffer`` to the device. Transmits a stop bit if\n ``stop`` is set.\n\n If ``start`` or ``end`` is provided, then the buffer will be sliced\n as if ``buffer[start:end]``. This will not cause an allocation like\n ``buffer[start:end]`` will so it saves memory.\n\n :param bytearray buffer: buffer containing the bytes to write\n :param int start: Index to start writing from\n :param int end: Index to read up to but not include\n :param bool stop: If true, output an I2C stop condition after the buffer is written\n '
self.i2c.writeto(self.device_address, buf, **kwargs)
| 8,320,486,125,263,830,000
|
Write the bytes from ``buffer`` to the device. Transmits a stop bit if
``stop`` is set.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buffer: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
:param bool stop: If true, output an I2C stop condition after the buffer is written
|
adafruit_bus_device/i2c_device.py
|
write
|
rhthomas/Adafruit_CircuitPython_NRF24L01
|
python
|
def write(self, buf, **kwargs):
'\n Write the bytes from ``buffer`` to the device. Transmits a stop bit if\n ``stop`` is set.\n\n If ``start`` or ``end`` is provided, then the buffer will be sliced\n as if ``buffer[start:end]``. This will not cause an allocation like\n ``buffer[start:end]`` will so it saves memory.\n\n :param bytearray buffer: buffer containing the bytes to write\n :param int start: Index to start writing from\n :param int end: Index to read up to but not include\n :param bool stop: If true, output an I2C stop condition after the buffer is written\n '
self.i2c.writeto(self.device_address, buf, **kwargs)
|
def write_then_readinto(self, out_buffer, in_buffer, *, out_start=0, out_end=None, in_start=0, in_end=None, stop=True):
'\n Write the bytes from ``out_buffer`` to the device, then immediately\n reads into ``in_buffer`` from the device. The number of bytes read\n will be the length of ``in_buffer``.\n Transmits a stop bit after the write, if ``stop`` is set.\n\n If ``out_start`` or ``out_end`` is provided, then the output buffer\n will be sliced as if ``out_buffer[out_start:out_end]``. This will\n not cause an allocation like ``buffer[out_start:out_end]`` will so\n it saves memory.\n\n If ``in_start`` or ``in_end`` is provided, then the input buffer\n will be sliced as if ``in_buffer[in_start:in_end]``. This will not\n cause an allocation like ``in_buffer[in_start:in_end]`` will so\n it saves memory.\n\n :param bytearray out_buffer: buffer containing the bytes to write\n :param bytearray in_buffer: buffer containing the bytes to read into\n :param int out_start: Index to start writing from\n :param int out_end: Index to read up to but not include\n :param int in_start: Index to start writing at\n :param int in_end: Index to write up to but not include\n :param bool stop: If true, output an I2C stop condition after the buffer is written\n '
if (out_end is None):
out_end = len(out_buffer)
if (in_end is None):
in_end = len(in_buffer)
if hasattr(self.i2c, 'writeto_then_readfrom'):
self.i2c.writeto_then_readfrom(self.device_address, out_buffer, in_buffer, out_start=out_start, out_end=out_end, in_start=in_start, in_end=in_end, stop=stop)
else:
self.write(out_buffer, start=out_start, end=out_end, stop=stop)
self.readinto(in_buffer, start=in_start, end=in_end)
| 3,956,235,842,555,875,300
|
Write the bytes from ``out_buffer`` to the device, then immediately
reads into ``in_buffer`` from the device. The number of bytes read
will be the length of ``in_buffer``.
Transmits a stop bit after the write, if ``stop`` is set.
If ``out_start`` or ``out_end`` is provided, then the output buffer
will be sliced as if ``out_buffer[out_start:out_end]``. This will
not cause an allocation like ``buffer[out_start:out_end]`` will so
it saves memory.
If ``in_start`` or ``in_end`` is provided, then the input buffer
will be sliced as if ``in_buffer[in_start:in_end]``. This will not
cause an allocation like ``in_buffer[in_start:in_end]`` will so
it saves memory.
:param bytearray out_buffer: buffer containing the bytes to write
:param bytearray in_buffer: buffer containing the bytes to read into
:param int out_start: Index to start writing from
:param int out_end: Index to read up to but not include
:param int in_start: Index to start writing at
:param int in_end: Index to write up to but not include
:param bool stop: If true, output an I2C stop condition after the buffer is written
|
adafruit_bus_device/i2c_device.py
|
write_then_readinto
|
rhthomas/Adafruit_CircuitPython_NRF24L01
|
python
|
def write_then_readinto(self, out_buffer, in_buffer, *, out_start=0, out_end=None, in_start=0, in_end=None, stop=True):
'\n Write the bytes from ``out_buffer`` to the device, then immediately\n reads into ``in_buffer`` from the device. The number of bytes read\n will be the length of ``in_buffer``.\n Transmits a stop bit after the write, if ``stop`` is set.\n\n If ``out_start`` or ``out_end`` is provided, then the output buffer\n will be sliced as if ``out_buffer[out_start:out_end]``. This will\n not cause an allocation like ``buffer[out_start:out_end]`` will so\n it saves memory.\n\n If ``in_start`` or ``in_end`` is provided, then the input buffer\n will be sliced as if ``in_buffer[in_start:in_end]``. This will not\n cause an allocation like ``in_buffer[in_start:in_end]`` will so\n it saves memory.\n\n :param bytearray out_buffer: buffer containing the bytes to write\n :param bytearray in_buffer: buffer containing the bytes to read into\n :param int out_start: Index to start writing from\n :param int out_end: Index to read up to but not include\n :param int in_start: Index to start writing at\n :param int in_end: Index to write up to but not include\n :param bool stop: If true, output an I2C stop condition after the buffer is written\n '
if (out_end is None):
out_end = len(out_buffer)
if (in_end is None):
in_end = len(in_buffer)
if hasattr(self.i2c, 'writeto_then_readfrom'):
self.i2c.writeto_then_readfrom(self.device_address, out_buffer, in_buffer, out_start=out_start, out_end=out_end, in_start=in_start, in_end=in_end, stop=stop)
else:
self.write(out_buffer, start=out_start, end=out_end, stop=stop)
self.readinto(in_buffer, start=in_start, end=in_end)
|
@property
def exists(self):
'\n Does this key correspond to an object in S3?\n '
return (self._size is not None)
| -5,765,735,506,469,143,000
|
Does this key correspond to an object in S3?
|
metaflow/datatools/s3.py
|
exists
|
anthonypreza/metaflow
|
python
|
@property
def exists(self):
'\n \n '
return (self._size is not None)
|
@property
def downloaded(self):
'\n Has this object been downloaded?\n '
return bool(self._path)
| 4,760,805,548,842,401,000
|
Has this object been downloaded?
|
metaflow/datatools/s3.py
|
downloaded
|
anthonypreza/metaflow
|
python
|
@property
def downloaded(self):
'\n \n '
return bool(self._path)
|
@property
def url(self):
'\n S3 location of the object\n '
return self._url
| 5,366,792,614,045,977,000
|
S3 location of the object
|
metaflow/datatools/s3.py
|
url
|
anthonypreza/metaflow
|
python
|
@property
def url(self):
'\n \n '
return self._url
|
@property
def prefix(self):
'\n Prefix requested that matches the object.\n '
return self._prefix
| 8,917,826,216,431,051,000
|
Prefix requested that matches the object.
|
metaflow/datatools/s3.py
|
prefix
|
anthonypreza/metaflow
|
python
|
@property
def prefix(self):
'\n \n '
return self._prefix
|
@property
def key(self):
'\n Key corresponds to the key given to the get call that produced\n this object. This may be a full S3 URL or a suffix based on what\n was requested.\n '
return self._key
| -3,277,946,264,028,065,000
|
Key corresponds to the key given to the get call that produced
this object. This may be a full S3 URL or a suffix based on what
was requested.
|
metaflow/datatools/s3.py
|
key
|
anthonypreza/metaflow
|
python
|
@property
def key(self):
'\n Key corresponds to the key given to the get call that produced\n this object. This may be a full S3 URL or a suffix based on what\n was requested.\n '
return self._key
|
@property
def path(self):
'\n Path to the local file corresponding to the object downloaded.\n This file gets deleted automatically when a S3 scope exits.\n\n Returns None if this S3Object has not been downloaded.\n '
return self._path
| -1,224,402,731,662,786,000
|
Path to the local file corresponding to the object downloaded.
This file gets deleted automatically when a S3 scope exits.
Returns None if this S3Object has not been downloaded.
|
metaflow/datatools/s3.py
|
path
|
anthonypreza/metaflow
|
python
|
@property
def path(self):
'\n Path to the local file corresponding to the object downloaded.\n This file gets deleted automatically when a S3 scope exits.\n\n Returns None if this S3Object has not been downloaded.\n '
return self._path
|
@property
def blob(self):
'\n Contents of the object as a byte string.\n\n Returns None if this S3Object has not been downloaded.\n '
if self._path:
with open(self._path, 'rb') as f:
return f.read()
| 4,473,451,728,646,713,300
|
Contents of the object as a byte string.
Returns None if this S3Object has not been downloaded.
|
metaflow/datatools/s3.py
|
blob
|
anthonypreza/metaflow
|
python
|
@property
def blob(self):
'\n Contents of the object as a byte string.\n\n Returns None if this S3Object has not been downloaded.\n '
if self._path:
with open(self._path, 'rb') as f:
return f.read()
|
@property
def text(self):
'\n Contents of the object as a Unicode string.\n\n Returns None if this S3Object has not been downloaded.\n '
if self._path:
return self.blob.decode('utf-8', errors='replace')
| -540,721,315,459,532,160
|
Contents of the object as a Unicode string.
Returns None if this S3Object has not been downloaded.
|
metaflow/datatools/s3.py
|
text
|
anthonypreza/metaflow
|
python
|
@property
def text(self):
'\n Contents of the object as a Unicode string.\n\n Returns None if this S3Object has not been downloaded.\n '
if self._path:
return self.blob.decode('utf-8', errors='replace')
|
@property
def size(self):
'\n Size of the object in bytes.\n\n Returns None if the key does not correspond to an object in S3.\n '
return self._size
| -8,567,336,051,561,795,000
|
Size of the object in bytes.
Returns None if the key does not correspond to an object in S3.
|
metaflow/datatools/s3.py
|
size
|
anthonypreza/metaflow
|
python
|
@property
def size(self):
'\n Size of the object in bytes.\n\n Returns None if the key does not correspond to an object in S3.\n '
return self._size
|
def __init__(self, tmproot='.', bucket=None, prefix=None, run=None, s3root=None):
"\n Initialize a new context for S3 operations. This object is based used as\n a context manager for a with statement.\n\n There are two ways to initialize this object depending whether you want\n to bind paths to a Metaflow run or not.\n\n 1. With a run object:\n\n run: (required) Either a FlowSpec object (typically 'self') or a\n Run object corresponding to an existing Metaflow run. These\n are used to add a version suffix in the S3 path.\n bucket: (optional) S3 bucket.\n prefix: (optional) S3 prefix.\n\n 2. Without a run object:\n\n s3root: (optional) An S3 root URL for all operations. If this is\n not specified, all operations require a full S3 URL.\n\n These options are supported in both the modes:\n\n tmproot: (optional) Root path for temporary files (default: '.')\n "
if run:
parsed = urlparse(DATATOOLS_S3ROOT)
if (not bucket):
bucket = parsed.netloc
if (not prefix):
prefix = parsed.path
if isinstance(run, FlowSpec):
if current.is_running_flow:
prefix = os.path.join(prefix, current.flow_name, current.run_id)
else:
raise MetaflowS3URLException('Initializing S3 with a FlowSpec outside of a running flow is not supported.')
else:
prefix = os.path.join(prefix, run.parent.id, run.id)
self._s3root = (u's3://%s' % os.path.join(bucket, prefix.strip('/')))
elif s3root:
parsed = urlparse(to_unicode(s3root))
if (parsed.scheme != 's3'):
raise MetaflowS3URLException('s3root needs to be an S3 URL prefxied with s3://.')
self._s3root = s3root.rstrip('/')
else:
self._s3root = None
self._tmpdir = mkdtemp(dir=tmproot, prefix='metaflow.s3.')
| -1,208,082,141,560,803,000
|
Initialize a new context for S3 operations. This object is based used as
a context manager for a with statement.
There are two ways to initialize this object depending whether you want
to bind paths to a Metaflow run or not.
1. With a run object:
run: (required) Either a FlowSpec object (typically 'self') or a
Run object corresponding to an existing Metaflow run. These
are used to add a version suffix in the S3 path.
bucket: (optional) S3 bucket.
prefix: (optional) S3 prefix.
2. Without a run object:
s3root: (optional) An S3 root URL for all operations. If this is
not specified, all operations require a full S3 URL.
These options are supported in both the modes:
tmproot: (optional) Root path for temporary files (default: '.')
|
metaflow/datatools/s3.py
|
__init__
|
anthonypreza/metaflow
|
python
|
def __init__(self, tmproot='.', bucket=None, prefix=None, run=None, s3root=None):
"\n Initialize a new context for S3 operations. This object is based used as\n a context manager for a with statement.\n\n There are two ways to initialize this object depending whether you want\n to bind paths to a Metaflow run or not.\n\n 1. With a run object:\n\n run: (required) Either a FlowSpec object (typically 'self') or a\n Run object corresponding to an existing Metaflow run. These\n are used to add a version suffix in the S3 path.\n bucket: (optional) S3 bucket.\n prefix: (optional) S3 prefix.\n\n 2. Without a run object:\n\n s3root: (optional) An S3 root URL for all operations. If this is\n not specified, all operations require a full S3 URL.\n\n These options are supported in both the modes:\n\n tmproot: (optional) Root path for temporary files (default: '.')\n "
if run:
parsed = urlparse(DATATOOLS_S3ROOT)
if (not bucket):
bucket = parsed.netloc
if (not prefix):
prefix = parsed.path
if isinstance(run, FlowSpec):
if current.is_running_flow:
prefix = os.path.join(prefix, current.flow_name, current.run_id)
else:
raise MetaflowS3URLException('Initializing S3 with a FlowSpec outside of a running flow is not supported.')
else:
prefix = os.path.join(prefix, run.parent.id, run.id)
self._s3root = (u's3://%s' % os.path.join(bucket, prefix.strip('/')))
elif s3root:
parsed = urlparse(to_unicode(s3root))
if (parsed.scheme != 's3'):
raise MetaflowS3URLException('s3root needs to be an S3 URL prefxied with s3://.')
self._s3root = s3root.rstrip('/')
else:
self._s3root = None
self._tmpdir = mkdtemp(dir=tmproot, prefix='metaflow.s3.')
|
def close(self):
'\n Delete all temporary files downloaded in this context.\n '
try:
if (not debug.s3client):
shutil.rmtree(self._tmpdir)
except:
pass
| -6,482,489,124,144,749,000
|
Delete all temporary files downloaded in this context.
|
metaflow/datatools/s3.py
|
close
|
anthonypreza/metaflow
|
python
|
def close(self):
'\n \n '
try:
if (not debug.s3client):
shutil.rmtree(self._tmpdir)
except:
pass
|
def list_paths(self, keys=None):
"\n List the next level of paths in S3. If multiple keys are\n specified, listings are done in parallel. The returned\n S3Objects have .exists == False if the url refers to a\n prefix, not an existing S3 object.\n\n Args:\n keys: (required) a list of suffixes for paths to list.\n\n Returns:\n a list of S3Objects (not downloaded)\n\n Example:\n\n Consider the following paths in S3:\n\n A/B/C\n D/E\n\n In this case, list_paths(['A', 'D']), returns ['A/B', 'D/E']. The\n first S3Object has .exists == False, since it does not refer to an\n object in S3. It is just a prefix.\n "
def _list(keys):
if (keys is None):
keys = [None]
urls = ((self._url(key).rstrip('/') + '/') for key in keys)
res = self._read_many_files('list', urls)
for (s3prefix, s3url, size) in res:
if size:
(yield (s3prefix, s3url, None, int(size)))
else:
(yield (s3prefix, s3url, None, None))
return list(starmap(S3Object, _list(keys)))
| -2,845,874,272,869,705,700
|
List the next level of paths in S3. If multiple keys are
specified, listings are done in parallel. The returned
S3Objects have .exists == False if the url refers to a
prefix, not an existing S3 object.
Args:
keys: (required) a list of suffixes for paths to list.
Returns:
a list of S3Objects (not downloaded)
Example:
Consider the following paths in S3:
A/B/C
D/E
In this case, list_paths(['A', 'D']), returns ['A/B', 'D/E']. The
first S3Object has .exists == False, since it does not refer to an
object in S3. It is just a prefix.
|
metaflow/datatools/s3.py
|
list_paths
|
anthonypreza/metaflow
|
python
|
def list_paths(self, keys=None):
"\n List the next level of paths in S3. If multiple keys are\n specified, listings are done in parallel. The returned\n S3Objects have .exists == False if the url refers to a\n prefix, not an existing S3 object.\n\n Args:\n keys: (required) a list of suffixes for paths to list.\n\n Returns:\n a list of S3Objects (not downloaded)\n\n Example:\n\n Consider the following paths in S3:\n\n A/B/C\n D/E\n\n In this case, list_paths(['A', 'D']), returns ['A/B', 'D/E']. The\n first S3Object has .exists == False, since it does not refer to an\n object in S3. It is just a prefix.\n "
def _list(keys):
if (keys is None):
keys = [None]
urls = ((self._url(key).rstrip('/') + '/') for key in keys)
res = self._read_many_files('list', urls)
for (s3prefix, s3url, size) in res:
if size:
(yield (s3prefix, s3url, None, int(size)))
else:
(yield (s3prefix, s3url, None, None))
return list(starmap(S3Object, _list(keys)))
|
def list_recursive(self, keys=None):
"\n List objects in S3 recursively. If multiple keys are\n specified, listings are done in parallel. The returned\n S3Objects have always .exists == True, since they refer\n to existing objects in S3.\n\n Args:\n keys: (required) a list of suffixes for paths to list.\n\n Returns:\n a list of S3Objects (not downloaded)\n\n Example:\n\n Consider the following paths in S3:\n\n A/B/C\n D/E\n\n In this case, list_recursive(['A', 'D']), returns ['A/B/C', 'D/E'].\n "
def _list(keys):
if (keys is None):
keys = [None]
res = self._read_many_files('list', map(self._url, keys), recursive=True)
for (s3prefix, s3url, size) in res:
(yield (s3prefix, s3url, None, int(size)))
return list(starmap(S3Object, _list(keys)))
| -8,079,390,429,491,253,000
|
List objects in S3 recursively. If multiple keys are
specified, listings are done in parallel. The returned
S3Objects have always .exists == True, since they refer
to existing objects in S3.
Args:
keys: (required) a list of suffixes for paths to list.
Returns:
a list of S3Objects (not downloaded)
Example:
Consider the following paths in S3:
A/B/C
D/E
In this case, list_recursive(['A', 'D']), returns ['A/B/C', 'D/E'].
|
metaflow/datatools/s3.py
|
list_recursive
|
anthonypreza/metaflow
|
python
|
def list_recursive(self, keys=None):
"\n List objects in S3 recursively. If multiple keys are\n specified, listings are done in parallel. The returned\n S3Objects have always .exists == True, since they refer\n to existing objects in S3.\n\n Args:\n keys: (required) a list of suffixes for paths to list.\n\n Returns:\n a list of S3Objects (not downloaded)\n\n Example:\n\n Consider the following paths in S3:\n\n A/B/C\n D/E\n\n In this case, list_recursive(['A', 'D']), returns ['A/B/C', 'D/E'].\n "
def _list(keys):
if (keys is None):
keys = [None]
res = self._read_many_files('list', map(self._url, keys), recursive=True)
for (s3prefix, s3url, size) in res:
(yield (s3prefix, s3url, None, int(size)))
return list(starmap(S3Object, _list(keys)))
|
def get(self, key=None, return_missing=False):
'\n Get a single object from S3.\n\n Args:\n key: (optional) a suffix identifying the object.\n return_missing: (optional, default False) if set to True, do\n not raise an exception for a missing key but\n return it as an S3Object with .exists == False.\n\n Returns:\n an S3Object corresponding to the object requested.\n '
url = self._url(key)
src = urlparse(url)
def _download(s3, tmp):
s3.download_file(src.netloc, src.path.lstrip('/'), tmp)
return url
try:
path = self._one_boto_op(_download, url)
except MetaflowS3NotFound:
if return_missing:
path = None
else:
raise
return S3Object(self._s3root, url, path)
| -1,205,389,529,836,959,500
|
Get a single object from S3.
Args:
key: (optional) a suffix identifying the object.
return_missing: (optional, default False) if set to True, do
not raise an exception for a missing key but
return it as an S3Object with .exists == False.
Returns:
an S3Object corresponding to the object requested.
|
metaflow/datatools/s3.py
|
get
|
anthonypreza/metaflow
|
python
|
def get(self, key=None, return_missing=False):
'\n Get a single object from S3.\n\n Args:\n key: (optional) a suffix identifying the object.\n return_missing: (optional, default False) if set to True, do\n not raise an exception for a missing key but\n return it as an S3Object with .exists == False.\n\n Returns:\n an S3Object corresponding to the object requested.\n '
url = self._url(key)
src = urlparse(url)
def _download(s3, tmp):
s3.download_file(src.netloc, src.path.lstrip('/'), tmp)
return url
try:
path = self._one_boto_op(_download, url)
except MetaflowS3NotFound:
if return_missing:
path = None
else:
raise
return S3Object(self._s3root, url, path)
|
def get_many(self, keys, return_missing=False):
'\n Get many objects from S3 in parallel.\n\n Args:\n keys: (required) a list of suffixes identifying the objects.\n return_missing: (optional, default False) if set to True, do\n not raise an exception for a missing key but\n return it as an S3Object with .exists == False.\n\n Returns:\n a list of S3Objects corresponding to the objects requested.\n '
def _get():
res = self._read_many_files('get', map(self._url, keys), allow_missing=return_missing, verify=True, verbose=False, listing=True)
for (s3prefix, s3url, fname) in res:
if fname:
(yield (self._s3root, s3url, os.path.join(self._tmpdir, fname)))
else:
(yield (self._s3root, s3prefix, None, None))
return list(starmap(S3Object, _get()))
| -445,562,500,342,118,500
|
Get many objects from S3 in parallel.
Args:
keys: (required) a list of suffixes identifying the objects.
return_missing: (optional, default False) if set to True, do
not raise an exception for a missing key but
return it as an S3Object with .exists == False.
Returns:
a list of S3Objects corresponding to the objects requested.
|
metaflow/datatools/s3.py
|
get_many
|
anthonypreza/metaflow
|
python
|
def get_many(self, keys, return_missing=False):
'\n Get many objects from S3 in parallel.\n\n Args:\n keys: (required) a list of suffixes identifying the objects.\n return_missing: (optional, default False) if set to True, do\n not raise an exception for a missing key but\n return it as an S3Object with .exists == False.\n\n Returns:\n a list of S3Objects corresponding to the objects requested.\n '
def _get():
res = self._read_many_files('get', map(self._url, keys), allow_missing=return_missing, verify=True, verbose=False, listing=True)
for (s3prefix, s3url, fname) in res:
if fname:
(yield (self._s3root, s3url, os.path.join(self._tmpdir, fname)))
else:
(yield (self._s3root, s3prefix, None, None))
return list(starmap(S3Object, _get()))
|
def get_recursive(self, keys):
'\n Get many objects from S3 recursively in parallel.\n\n Args:\n keys: (required) a list of suffixes for paths to download\n recursively.\n\n Returns:\n a list of S3Objects corresponding to the objects requested.\n '
def _get():
res = self._read_many_files('get', map(self._url, keys), recursive=True, verify=True, verbose=False, listing=True)
for (s3prefix, s3url, fname) in res:
(yield (s3prefix, s3url, os.path.join(self._tmpdir, fname)))
return list(starmap(S3Object, _get()))
| 346,994,368,772,277,500
|
Get many objects from S3 recursively in parallel.
Args:
keys: (required) a list of suffixes for paths to download
recursively.
Returns:
a list of S3Objects corresponding to the objects requested.
|
metaflow/datatools/s3.py
|
get_recursive
|
anthonypreza/metaflow
|
python
|
def get_recursive(self, keys):
'\n Get many objects from S3 recursively in parallel.\n\n Args:\n keys: (required) a list of suffixes for paths to download\n recursively.\n\n Returns:\n a list of S3Objects corresponding to the objects requested.\n '
def _get():
res = self._read_many_files('get', map(self._url, keys), recursive=True, verify=True, verbose=False, listing=True)
for (s3prefix, s3url, fname) in res:
(yield (s3prefix, s3url, os.path.join(self._tmpdir, fname)))
return list(starmap(S3Object, _get()))
|
def get_all(self):
'\n Get all objects from S3 recursively (in parallel). This request\n only works if S3 is initialized with a run or a s3root prefix.\n\n Returns:\n a list of S3Objects corresponding to the objects requested.\n '
if (self._s3root is None):
raise MetaflowS3URLException("Can't get_all() when S3 is initialized without a prefix")
else:
return self.get_recursive([None])
| 995,553,267,817,929,200
|
Get all objects from S3 recursively (in parallel). This request
only works if S3 is initialized with a run or a s3root prefix.
Returns:
a list of S3Objects corresponding to the objects requested.
|
metaflow/datatools/s3.py
|
get_all
|
anthonypreza/metaflow
|
python
|
def get_all(self):
'\n Get all objects from S3 recursively (in parallel). This request\n only works if S3 is initialized with a run or a s3root prefix.\n\n Returns:\n a list of S3Objects corresponding to the objects requested.\n '
if (self._s3root is None):
raise MetaflowS3URLException("Can't get_all() when S3 is initialized without a prefix")
else:
return self.get_recursive([None])
|
def put(self, key, obj, overwrite=True):
'\n Put an object to S3.\n\n Args:\n key: (required) suffix for the object.\n obj: (required) a bytes, string, or a unicode object to \n be stored in S3.\n overwrite: (optional) overwrites the key with obj, if it exists\n\n Returns:\n an S3 URL corresponding to the object stored.\n '
if (not is_stringish(obj)):
raise MetaflowS3InvalidObject(("Object corresponding to the key '%s' is not a string or a bytes object." % key))
url = self._url(key)
src = urlparse(url)
def _upload(s3, tmp):
blob = to_fileobj(obj)
s3.upload_fileobj(blob, src.netloc, src.path.lstrip('/'))
if overwrite:
self._one_boto_op(_upload, url)
return url
else:
def _head(s3, tmp):
s3.head_object(Bucket=src.netloc, Key=src.path.lstrip('/'))
try:
self._one_boto_op(_head, url)
except MetaflowS3NotFound as err:
self._one_boto_op(_upload, url)
return url
| 6,599,148,957,707,137,000
|
Put an object to S3.
Args:
key: (required) suffix for the object.
obj: (required) a bytes, string, or a unicode object to
be stored in S3.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
an S3 URL corresponding to the object stored.
|
metaflow/datatools/s3.py
|
put
|
anthonypreza/metaflow
|
python
|
def put(self, key, obj, overwrite=True):
'\n Put an object to S3.\n\n Args:\n key: (required) suffix for the object.\n obj: (required) a bytes, string, or a unicode object to \n be stored in S3.\n overwrite: (optional) overwrites the key with obj, if it exists\n\n Returns:\n an S3 URL corresponding to the object stored.\n '
if (not is_stringish(obj)):
raise MetaflowS3InvalidObject(("Object corresponding to the key '%s' is not a string or a bytes object." % key))
url = self._url(key)
src = urlparse(url)
def _upload(s3, tmp):
blob = to_fileobj(obj)
s3.upload_fileobj(blob, src.netloc, src.path.lstrip('/'))
if overwrite:
self._one_boto_op(_upload, url)
return url
else:
def _head(s3, tmp):
s3.head_object(Bucket=src.netloc, Key=src.path.lstrip('/'))
try:
self._one_boto_op(_head, url)
except MetaflowS3NotFound as err:
self._one_boto_op(_upload, url)
return url
|
def put_many(self, key_objs, overwrite=True):
'\n Put objects to S3 in parallel.\n\n Args:\n key_objs: (required) an iterator of (key, value) tuples. Value must\n be a string, bytes, or a unicode object.\n overwrite: (optional) overwrites the key with obj, if it exists\n\n Returns:\n a list of (key, S3 URL) tuples corresponding to the files sent.\n '
def _store():
for (key, obj) in key_objs:
if is_stringish(obj):
with NamedTemporaryFile(dir=self._tmpdir, delete=False, mode='wb', prefix='metaflow.s3.put_many.') as tmp:
tmp.write(to_bytes(obj))
tmp.close()
(yield (tmp.name, self._url(key), key))
else:
raise MetaflowS3InvalidObject(("Object corresponding to the key '%s' is not a string or a bytes object." % key))
return self._put_many_files(_store(), overwrite)
| -338,204,776,939,149,250
|
Put objects to S3 in parallel.
Args:
key_objs: (required) an iterator of (key, value) tuples. Value must
be a string, bytes, or a unicode object.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
a list of (key, S3 URL) tuples corresponding to the files sent.
|
metaflow/datatools/s3.py
|
put_many
|
anthonypreza/metaflow
|
python
|
def put_many(self, key_objs, overwrite=True):
'\n Put objects to S3 in parallel.\n\n Args:\n key_objs: (required) an iterator of (key, value) tuples. Value must\n be a string, bytes, or a unicode object.\n overwrite: (optional) overwrites the key with obj, if it exists\n\n Returns:\n a list of (key, S3 URL) tuples corresponding to the files sent.\n '
def _store():
for (key, obj) in key_objs:
if is_stringish(obj):
with NamedTemporaryFile(dir=self._tmpdir, delete=False, mode='wb', prefix='metaflow.s3.put_many.') as tmp:
tmp.write(to_bytes(obj))
tmp.close()
(yield (tmp.name, self._url(key), key))
else:
raise MetaflowS3InvalidObject(("Object corresponding to the key '%s' is not a string or a bytes object." % key))
return self._put_many_files(_store(), overwrite)
|
def put_files(self, key_paths, overwrite=True):
'\n Put files to S3 in parallel.\n\n Args:\n key_paths: (required) an iterator of (key, path) tuples.\n overwrite: (optional) overwrites the key with obj, if it exists\n\n Returns:\n a list of (key, S3 URL) tuples corresponding to the files sent.\n '
def _check():
for (key, path) in key_paths:
if (not os.path.exists(path)):
raise MetaflowS3NotFound(('Local file not found: %s' % path))
(yield (path, self._url(key), key))
return self._put_many_files(_check(), overwrite)
| 7,870,756,319,350,997,000
|
Put files to S3 in parallel.
Args:
key_paths: (required) an iterator of (key, path) tuples.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
a list of (key, S3 URL) tuples corresponding to the files sent.
|
metaflow/datatools/s3.py
|
put_files
|
anthonypreza/metaflow
|
python
|
def put_files(self, key_paths, overwrite=True):
'\n Put files to S3 in parallel.\n\n Args:\n key_paths: (required) an iterator of (key, path) tuples.\n overwrite: (optional) overwrites the key with obj, if it exists\n\n Returns:\n a list of (key, S3 URL) tuples corresponding to the files sent.\n '
def _check():
for (key, path) in key_paths:
if (not os.path.exists(path)):
raise MetaflowS3NotFound(('Local file not found: %s' % path))
(yield (path, self._url(key), key))
return self._put_many_files(_check(), overwrite)
|
def _mount_config_map_op(config_map_name: Text) -> OpFunc:
'Mounts all key-value pairs found in the named Kubernetes ConfigMap.\n\n All key-value pairs in the ConfigMap are mounted as environment variables.\n\n Args:\n config_map_name: The name of the ConfigMap resource.\n\n Returns:\n An OpFunc for mounting the ConfigMap.\n '
def mount_config_map(container_op: dsl.ContainerOp):
config_map_ref = k8s_client.V1ConfigMapEnvSource(name=config_map_name, optional=True)
container_op.container.add_env_from(k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))
return mount_config_map
| 411,682,323,971,103,200
|
Mounts all key-value pairs found in the named Kubernetes ConfigMap.
All key-value pairs in the ConfigMap are mounted as environment variables.
Args:
config_map_name: The name of the ConfigMap resource.
Returns:
An OpFunc for mounting the ConfigMap.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
_mount_config_map_op
|
TimoKerr/tfx
|
python
|
def _mount_config_map_op(config_map_name: Text) -> OpFunc:
'Mounts all key-value pairs found in the named Kubernetes ConfigMap.\n\n All key-value pairs in the ConfigMap are mounted as environment variables.\n\n Args:\n config_map_name: The name of the ConfigMap resource.\n\n Returns:\n An OpFunc for mounting the ConfigMap.\n '
def mount_config_map(container_op: dsl.ContainerOp):
config_map_ref = k8s_client.V1ConfigMapEnvSource(name=config_map_name, optional=True)
container_op.container.add_env_from(k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))
return mount_config_map
|
def _mount_secret_op(secret_name: Text) -> OpFunc:
'Mounts all key-value pairs found in the named Kubernetes Secret.\n\n All key-value pairs in the Secret are mounted as environment variables.\n\n Args:\n secret_name: The name of the Secret resource.\n\n Returns:\n An OpFunc for mounting the Secret.\n '
def mount_secret(container_op: dsl.ContainerOp):
secret_ref = k8s_client.V1ConfigMapEnvSource(name=secret_name, optional=True)
container_op.container.add_env_from(k8s_client.V1EnvFromSource(secret_ref=secret_ref))
return mount_secret
| 2,268,967,825,270,047,500
|
Mounts all key-value pairs found in the named Kubernetes Secret.
All key-value pairs in the Secret are mounted as environment variables.
Args:
secret_name: The name of the Secret resource.
Returns:
An OpFunc for mounting the Secret.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
_mount_secret_op
|
TimoKerr/tfx
|
python
|
def _mount_secret_op(secret_name: Text) -> OpFunc:
'Mounts all key-value pairs found in the named Kubernetes Secret.\n\n All key-value pairs in the Secret are mounted as environment variables.\n\n Args:\n secret_name: The name of the Secret resource.\n\n Returns:\n An OpFunc for mounting the Secret.\n '
def mount_secret(container_op: dsl.ContainerOp):
secret_ref = k8s_client.V1ConfigMapEnvSource(name=secret_name, optional=True)
container_op.container.add_env_from(k8s_client.V1EnvFromSource(secret_ref=secret_ref))
return mount_secret
|
def get_default_pipeline_operator_funcs(use_gcp_sa: bool=False) -> List[OpFunc]:
'Returns a default list of pipeline operator functions.\n\n Args:\n use_gcp_sa: If true, mount a GCP service account secret to each pod, with\n the name _KUBEFLOW_GCP_SECRET_NAME.\n\n Returns:\n A list of functions with type OpFunc.\n '
gcp_secret_op = gcp.use_gcp_secret(_KUBEFLOW_GCP_SECRET_NAME)
mount_config_map_op = _mount_config_map_op('metadata-grpc-configmap')
if use_gcp_sa:
return [gcp_secret_op, mount_config_map_op]
else:
return [mount_config_map_op]
| -5,693,614,598,524,444,000
|
Returns a default list of pipeline operator functions.
Args:
use_gcp_sa: If true, mount a GCP service account secret to each pod, with
the name _KUBEFLOW_GCP_SECRET_NAME.
Returns:
A list of functions with type OpFunc.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
get_default_pipeline_operator_funcs
|
TimoKerr/tfx
|
python
|
def get_default_pipeline_operator_funcs(use_gcp_sa: bool=False) -> List[OpFunc]:
'Returns a default list of pipeline operator functions.\n\n Args:\n use_gcp_sa: If true, mount a GCP service account secret to each pod, with\n the name _KUBEFLOW_GCP_SECRET_NAME.\n\n Returns:\n A list of functions with type OpFunc.\n '
gcp_secret_op = gcp.use_gcp_secret(_KUBEFLOW_GCP_SECRET_NAME)
mount_config_map_op = _mount_config_map_op('metadata-grpc-configmap')
if use_gcp_sa:
return [gcp_secret_op, mount_config_map_op]
else:
return [mount_config_map_op]
|
def get_default_kubeflow_metadata_config() -> kubeflow_pb2.KubeflowMetadataConfig:
'Returns the default metadata connection config for Kubeflow.\n\n Returns:\n A config proto that will be serialized as JSON and passed to the running\n container so the TFX component driver is able to communicate with MLMD in\n a Kubeflow cluster.\n '
config = kubeflow_pb2.KubeflowMetadataConfig()
config.grpc_config.grpc_service_host.environment_variable = 'METADATA_GRPC_SERVICE_HOST'
config.grpc_config.grpc_service_port.environment_variable = 'METADATA_GRPC_SERVICE_PORT'
return config
| -3,431,900,069,264,442,000
|
Returns the default metadata connection config for Kubeflow.
Returns:
A config proto that will be serialized as JSON and passed to the running
container so the TFX component driver is able to communicate with MLMD in
a Kubeflow cluster.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
get_default_kubeflow_metadata_config
|
TimoKerr/tfx
|
python
|
def get_default_kubeflow_metadata_config() -> kubeflow_pb2.KubeflowMetadataConfig:
'Returns the default metadata connection config for Kubeflow.\n\n Returns:\n A config proto that will be serialized as JSON and passed to the running\n container so the TFX component driver is able to communicate with MLMD in\n a Kubeflow cluster.\n '
config = kubeflow_pb2.KubeflowMetadataConfig()
config.grpc_config.grpc_service_host.environment_variable = 'METADATA_GRPC_SERVICE_HOST'
config.grpc_config.grpc_service_port.environment_variable = 'METADATA_GRPC_SERVICE_PORT'
return config
|
def get_default_pod_labels() -> Dict[(Text, Text)]:
'Returns the default pod label dict for Kubeflow.'
result = {'add-pod-env': 'true', telemetry_utils.LABEL_KFP_SDK_ENV: 'tfx'}
return result
| -1,403,425,279,285,681,400
|
Returns the default pod label dict for Kubeflow.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
get_default_pod_labels
|
TimoKerr/tfx
|
python
|
def get_default_pod_labels() -> Dict[(Text, Text)]:
result = {'add-pod-env': 'true', telemetry_utils.LABEL_KFP_SDK_ENV: 'tfx'}
return result
|
def __init__(self, pipeline_operator_funcs: Optional[List[OpFunc]]=None, tfx_image: Optional[Text]=None, kubeflow_metadata_config: Optional[kubeflow_pb2.KubeflowMetadataConfig]=None, supported_launcher_classes: List[Type[base_component_launcher.BaseComponentLauncher]]=None, **kwargs):
'Creates a KubeflowDagRunnerConfig object.\n\n The user can use pipeline_operator_funcs to apply modifications to\n ContainerOps used in the pipeline. For example, to ensure the pipeline\n steps mount a GCP secret, and a Persistent Volume, one can create config\n object like so:\n\n from kfp import gcp, onprem\n mount_secret_op = gcp.use_secret(\'my-secret-name)\n mount_volume_op = onprem.mount_pvc(\n "my-persistent-volume-claim",\n "my-volume-name",\n "/mnt/volume-mount-path")\n\n config = KubeflowDagRunnerConfig(\n pipeline_operator_funcs=[mount_secret_op, mount_volume_op]\n )\n\n Args:\n pipeline_operator_funcs: A list of ContainerOp modifying functions that\n will be applied to every container step in the pipeline.\n tfx_image: The TFX container image to use in the pipeline.\n kubeflow_metadata_config: Runtime configuration to use to connect to\n Kubeflow metadata.\n supported_launcher_classes: A list of component launcher classes that are\n supported by the current pipeline. List sequence determines the order in\n which launchers are chosen for each component being run.\n **kwargs: keyword args for PipelineConfig.\n '
supported_launcher_classes = (supported_launcher_classes or [in_process_component_launcher.InProcessComponentLauncher, kubernetes_component_launcher.KubernetesComponentLauncher])
super(KubeflowDagRunnerConfig, self).__init__(supported_launcher_classes=supported_launcher_classes, **kwargs)
self.pipeline_operator_funcs = (pipeline_operator_funcs or get_default_pipeline_operator_funcs())
self.tfx_image = (tfx_image or DEFAULT_KUBEFLOW_TFX_IMAGE)
self.kubeflow_metadata_config = (kubeflow_metadata_config or get_default_kubeflow_metadata_config())
| -5,085,960,655,409,913,000
|
Creates a KubeflowDagRunnerConfig object.
The user can use pipeline_operator_funcs to apply modifications to
ContainerOps used in the pipeline. For example, to ensure the pipeline
steps mount a GCP secret, and a Persistent Volume, one can create config
object like so:
from kfp import gcp, onprem
mount_secret_op = gcp.use_secret('my-secret-name)
mount_volume_op = onprem.mount_pvc(
"my-persistent-volume-claim",
"my-volume-name",
"/mnt/volume-mount-path")
config = KubeflowDagRunnerConfig(
pipeline_operator_funcs=[mount_secret_op, mount_volume_op]
)
Args:
pipeline_operator_funcs: A list of ContainerOp modifying functions that
will be applied to every container step in the pipeline.
tfx_image: The TFX container image to use in the pipeline.
kubeflow_metadata_config: Runtime configuration to use to connect to
Kubeflow metadata.
supported_launcher_classes: A list of component launcher classes that are
supported by the current pipeline. List sequence determines the order in
which launchers are chosen for each component being run.
**kwargs: keyword args for PipelineConfig.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
__init__
|
TimoKerr/tfx
|
python
|
def __init__(self, pipeline_operator_funcs: Optional[List[OpFunc]]=None, tfx_image: Optional[Text]=None, kubeflow_metadata_config: Optional[kubeflow_pb2.KubeflowMetadataConfig]=None, supported_launcher_classes: List[Type[base_component_launcher.BaseComponentLauncher]]=None, **kwargs):
'Creates a KubeflowDagRunnerConfig object.\n\n The user can use pipeline_operator_funcs to apply modifications to\n ContainerOps used in the pipeline. For example, to ensure the pipeline\n steps mount a GCP secret, and a Persistent Volume, one can create config\n object like so:\n\n from kfp import gcp, onprem\n mount_secret_op = gcp.use_secret(\'my-secret-name)\n mount_volume_op = onprem.mount_pvc(\n "my-persistent-volume-claim",\n "my-volume-name",\n "/mnt/volume-mount-path")\n\n config = KubeflowDagRunnerConfig(\n pipeline_operator_funcs=[mount_secret_op, mount_volume_op]\n )\n\n Args:\n pipeline_operator_funcs: A list of ContainerOp modifying functions that\n will be applied to every container step in the pipeline.\n tfx_image: The TFX container image to use in the pipeline.\n kubeflow_metadata_config: Runtime configuration to use to connect to\n Kubeflow metadata.\n supported_launcher_classes: A list of component launcher classes that are\n supported by the current pipeline. List sequence determines the order in\n which launchers are chosen for each component being run.\n **kwargs: keyword args for PipelineConfig.\n '
supported_launcher_classes = (supported_launcher_classes or [in_process_component_launcher.InProcessComponentLauncher, kubernetes_component_launcher.KubernetesComponentLauncher])
super(KubeflowDagRunnerConfig, self).__init__(supported_launcher_classes=supported_launcher_classes, **kwargs)
self.pipeline_operator_funcs = (pipeline_operator_funcs or get_default_pipeline_operator_funcs())
self.tfx_image = (tfx_image or DEFAULT_KUBEFLOW_TFX_IMAGE)
self.kubeflow_metadata_config = (kubeflow_metadata_config or get_default_kubeflow_metadata_config())
|
def __init__(self, output_dir: Optional[Text]=None, output_filename: Optional[Text]=None, config: Optional[KubeflowDagRunnerConfig]=None, pod_labels_to_attach: Optional[Dict[(Text, Text)]]=None):
'Initializes KubeflowDagRunner for compiling a Kubeflow Pipeline.\n\n Args:\n output_dir: An optional output directory into which to output the pipeline\n definition files. Defaults to the current working directory.\n output_filename: An optional output file name for the pipeline definition\n file. Defaults to pipeline_name.tar.gz when compiling a TFX pipeline.\n Currently supports .tar.gz, .tgz, .zip, .yaml, .yml formats. See\n https://github.com/kubeflow/pipelines/blob/181de66cf9fa87bcd0fe9291926790c400140783/sdk/python/kfp/compiler/compiler.py#L851\n for format restriction.\n config: An optional KubeflowDagRunnerConfig object to specify runtime\n configuration when running the pipeline under Kubeflow.\n pod_labels_to_attach: Optional set of pod labels to attach to GKE pod\n spinned up for this pipeline. Default to the 3 labels:\n 1. add-pod-env: true,\n 2. pipeline SDK type,\n 3. pipeline unique ID,\n where 2 and 3 are instrumentation of usage tracking.\n '
if (config and (not isinstance(config, KubeflowDagRunnerConfig))):
raise TypeError('config must be type of KubeflowDagRunnerConfig.')
super(KubeflowDagRunner, self).__init__((config or KubeflowDagRunnerConfig()))
self._config = cast(KubeflowDagRunnerConfig, self._config)
self._output_dir = (output_dir or os.getcwd())
self._output_filename = output_filename
self._compiler = compiler.Compiler()
self._tfx_compiler = tfx_compiler.Compiler()
self._params = []
self._deduped_parameter_names = set()
if (pod_labels_to_attach is None):
self._pod_labels_to_attach = get_default_pod_labels()
else:
self._pod_labels_to_attach = pod_labels_to_attach
| 3,617,570,464,397,079,000
|
Initializes KubeflowDagRunner for compiling a Kubeflow Pipeline.
Args:
output_dir: An optional output directory into which to output the pipeline
definition files. Defaults to the current working directory.
output_filename: An optional output file name for the pipeline definition
file. Defaults to pipeline_name.tar.gz when compiling a TFX pipeline.
Currently supports .tar.gz, .tgz, .zip, .yaml, .yml formats. See
https://github.com/kubeflow/pipelines/blob/181de66cf9fa87bcd0fe9291926790c400140783/sdk/python/kfp/compiler/compiler.py#L851
for format restriction.
config: An optional KubeflowDagRunnerConfig object to specify runtime
configuration when running the pipeline under Kubeflow.
pod_labels_to_attach: Optional set of pod labels to attach to GKE pod
spinned up for this pipeline. Default to the 3 labels:
1. add-pod-env: true,
2. pipeline SDK type,
3. pipeline unique ID,
where 2 and 3 are instrumentation of usage tracking.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
__init__
|
TimoKerr/tfx
|
python
|
def __init__(self, output_dir: Optional[Text]=None, output_filename: Optional[Text]=None, config: Optional[KubeflowDagRunnerConfig]=None, pod_labels_to_attach: Optional[Dict[(Text, Text)]]=None):
'Initializes KubeflowDagRunner for compiling a Kubeflow Pipeline.\n\n Args:\n output_dir: An optional output directory into which to output the pipeline\n definition files. Defaults to the current working directory.\n output_filename: An optional output file name for the pipeline definition\n file. Defaults to pipeline_name.tar.gz when compiling a TFX pipeline.\n Currently supports .tar.gz, .tgz, .zip, .yaml, .yml formats. See\n https://github.com/kubeflow/pipelines/blob/181de66cf9fa87bcd0fe9291926790c400140783/sdk/python/kfp/compiler/compiler.py#L851\n for format restriction.\n config: An optional KubeflowDagRunnerConfig object to specify runtime\n configuration when running the pipeline under Kubeflow.\n pod_labels_to_attach: Optional set of pod labels to attach to GKE pod\n spinned up for this pipeline. Default to the 3 labels:\n 1. add-pod-env: true,\n 2. pipeline SDK type,\n 3. pipeline unique ID,\n where 2 and 3 are instrumentation of usage tracking.\n '
if (config and (not isinstance(config, KubeflowDagRunnerConfig))):
raise TypeError('config must be type of KubeflowDagRunnerConfig.')
super(KubeflowDagRunner, self).__init__((config or KubeflowDagRunnerConfig()))
self._config = cast(KubeflowDagRunnerConfig, self._config)
self._output_dir = (output_dir or os.getcwd())
self._output_filename = output_filename
self._compiler = compiler.Compiler()
self._tfx_compiler = tfx_compiler.Compiler()
self._params = []
self._deduped_parameter_names = set()
if (pod_labels_to_attach is None):
self._pod_labels_to_attach = get_default_pod_labels()
else:
self._pod_labels_to_attach = pod_labels_to_attach
|
def _parse_parameter_from_component(self, component: base_component.BaseComponent) -> None:
'Extract embedded RuntimeParameter placeholders from a component.\n\n Extract embedded RuntimeParameter placeholders from a component, then append\n the corresponding dsl.PipelineParam to KubeflowDagRunner.\n\n Args:\n component: a TFX component.\n '
serialized_component = json_utils.dumps(component)
placeholders = re.findall(data_types.RUNTIME_PARAMETER_PATTERN, serialized_component)
for placeholder in placeholders:
placeholder = placeholder.replace('\\', '')
placeholder = utils.fix_brackets(placeholder)
parameter = json_utils.loads(placeholder)
if (parameter.name == tfx_pipeline.ROOT_PARAMETER.name):
continue
if (parameter.name not in self._deduped_parameter_names):
self._deduped_parameter_names.add(parameter.name)
dsl_parameter = dsl.PipelineParam(name=parameter.name, value=str(parameter.default))
self._params.append(dsl_parameter)
| 5,823,248,919,543,096,000
|
Extract embedded RuntimeParameter placeholders from a component.
Extract embedded RuntimeParameter placeholders from a component, then append
the corresponding dsl.PipelineParam to KubeflowDagRunner.
Args:
component: a TFX component.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
_parse_parameter_from_component
|
TimoKerr/tfx
|
python
|
def _parse_parameter_from_component(self, component: base_component.BaseComponent) -> None:
'Extract embedded RuntimeParameter placeholders from a component.\n\n Extract embedded RuntimeParameter placeholders from a component, then append\n the corresponding dsl.PipelineParam to KubeflowDagRunner.\n\n Args:\n component: a TFX component.\n '
serialized_component = json_utils.dumps(component)
placeholders = re.findall(data_types.RUNTIME_PARAMETER_PATTERN, serialized_component)
for placeholder in placeholders:
placeholder = placeholder.replace('\\', )
placeholder = utils.fix_brackets(placeholder)
parameter = json_utils.loads(placeholder)
if (parameter.name == tfx_pipeline.ROOT_PARAMETER.name):
continue
if (parameter.name not in self._deduped_parameter_names):
self._deduped_parameter_names.add(parameter.name)
dsl_parameter = dsl.PipelineParam(name=parameter.name, value=str(parameter.default))
self._params.append(dsl_parameter)
|
def _parse_parameter_from_pipeline(self, pipeline: tfx_pipeline.Pipeline) -> None:
'Extract all the RuntimeParameter placeholders from the pipeline.'
for component in pipeline.components:
self._parse_parameter_from_component(component)
| -1,081,928,389,239,006,700
|
Extract all the RuntimeParameter placeholders from the pipeline.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
_parse_parameter_from_pipeline
|
TimoKerr/tfx
|
python
|
def _parse_parameter_from_pipeline(self, pipeline: tfx_pipeline.Pipeline) -> None:
for component in pipeline.components:
self._parse_parameter_from_component(component)
|
def _construct_pipeline_graph(self, pipeline: tfx_pipeline.Pipeline, pipeline_root: dsl.PipelineParam):
'Constructs a Kubeflow Pipeline graph.\n\n Args:\n pipeline: The logical TFX pipeline to base the construction on.\n pipeline_root: dsl.PipelineParam representing the pipeline root.\n '
component_to_kfp_op = {}
tfx_ir = self._generate_tfx_ir(pipeline)
for component in pipeline.components:
depends_on = set()
for upstream_component in component.upstream_nodes:
depends_on.add(component_to_kfp_op[upstream_component])
kfp_component = base_component.BaseComponent(component=component, depends_on=depends_on, pipeline=pipeline, pipeline_root=pipeline_root, tfx_image=self._config.tfx_image, kubeflow_metadata_config=self._config.kubeflow_metadata_config, pod_labels_to_attach=self._pod_labels_to_attach, tfx_ir=tfx_ir)
for operator in self._config.pipeline_operator_funcs:
kfp_component.container_op.apply(operator)
component_to_kfp_op[component] = kfp_component.container_op
| -9,222,476,127,377,449,000
|
Constructs a Kubeflow Pipeline graph.
Args:
pipeline: The logical TFX pipeline to base the construction on.
pipeline_root: dsl.PipelineParam representing the pipeline root.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
_construct_pipeline_graph
|
TimoKerr/tfx
|
python
|
def _construct_pipeline_graph(self, pipeline: tfx_pipeline.Pipeline, pipeline_root: dsl.PipelineParam):
'Constructs a Kubeflow Pipeline graph.\n\n Args:\n pipeline: The logical TFX pipeline to base the construction on.\n pipeline_root: dsl.PipelineParam representing the pipeline root.\n '
component_to_kfp_op = {}
tfx_ir = self._generate_tfx_ir(pipeline)
for component in pipeline.components:
depends_on = set()
for upstream_component in component.upstream_nodes:
depends_on.add(component_to_kfp_op[upstream_component])
kfp_component = base_component.BaseComponent(component=component, depends_on=depends_on, pipeline=pipeline, pipeline_root=pipeline_root, tfx_image=self._config.tfx_image, kubeflow_metadata_config=self._config.kubeflow_metadata_config, pod_labels_to_attach=self._pod_labels_to_attach, tfx_ir=tfx_ir)
for operator in self._config.pipeline_operator_funcs:
kfp_component.container_op.apply(operator)
component_to_kfp_op[component] = kfp_component.container_op
|
def run(self, pipeline: tfx_pipeline.Pipeline):
'Compiles and outputs a Kubeflow Pipeline YAML definition file.\n\n Args:\n pipeline: The logical TFX pipeline to use when building the Kubeflow\n pipeline.\n '
for component in pipeline.components:
if isinstance(component, tfx_base_component.BaseComponent):
component._resolve_pip_dependencies(pipeline.pipeline_info.pipeline_root)
dsl_pipeline_root = dsl.PipelineParam(name=tfx_pipeline.ROOT_PARAMETER.name, value=pipeline.pipeline_info.pipeline_root)
self._params.append(dsl_pipeline_root)
def _construct_pipeline():
'Constructs a Kubeflow pipeline.\n\n Creates Kubeflow ContainerOps for each TFX component encountered in the\n logical pipeline definition.\n '
self._construct_pipeline_graph(pipeline, dsl_pipeline_root)
self._parse_parameter_from_pipeline(pipeline)
file_name = (self._output_filename or get_default_output_filename(pipeline.pipeline_info.pipeline_name))
self._compiler._create_and_write_workflow(pipeline_func=_construct_pipeline, pipeline_name=pipeline.pipeline_info.pipeline_name, params_list=self._params, package_path=os.path.join(self._output_dir, file_name))
| 8,106,389,141,127,631,000
|
Compiles and outputs a Kubeflow Pipeline YAML definition file.
Args:
pipeline: The logical TFX pipeline to use when building the Kubeflow
pipeline.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
run
|
TimoKerr/tfx
|
python
|
def run(self, pipeline: tfx_pipeline.Pipeline):
'Compiles and outputs a Kubeflow Pipeline YAML definition file.\n\n Args:\n pipeline: The logical TFX pipeline to use when building the Kubeflow\n pipeline.\n '
for component in pipeline.components:
if isinstance(component, tfx_base_component.BaseComponent):
component._resolve_pip_dependencies(pipeline.pipeline_info.pipeline_root)
dsl_pipeline_root = dsl.PipelineParam(name=tfx_pipeline.ROOT_PARAMETER.name, value=pipeline.pipeline_info.pipeline_root)
self._params.append(dsl_pipeline_root)
def _construct_pipeline():
'Constructs a Kubeflow pipeline.\n\n Creates Kubeflow ContainerOps for each TFX component encountered in the\n logical pipeline definition.\n '
self._construct_pipeline_graph(pipeline, dsl_pipeline_root)
self._parse_parameter_from_pipeline(pipeline)
file_name = (self._output_filename or get_default_output_filename(pipeline.pipeline_info.pipeline_name))
self._compiler._create_and_write_workflow(pipeline_func=_construct_pipeline, pipeline_name=pipeline.pipeline_info.pipeline_name, params_list=self._params, package_path=os.path.join(self._output_dir, file_name))
|
def _construct_pipeline():
'Constructs a Kubeflow pipeline.\n\n Creates Kubeflow ContainerOps for each TFX component encountered in the\n logical pipeline definition.\n '
self._construct_pipeline_graph(pipeline, dsl_pipeline_root)
| 7,665,502,271,515,047,000
|
Constructs a Kubeflow pipeline.
Creates Kubeflow ContainerOps for each TFX component encountered in the
logical pipeline definition.
|
tfx/orchestration/kubeflow/kubeflow_dag_runner.py
|
_construct_pipeline
|
TimoKerr/tfx
|
python
|
def _construct_pipeline():
'Constructs a Kubeflow pipeline.\n\n Creates Kubeflow ContainerOps for each TFX component encountered in the\n logical pipeline definition.\n '
self._construct_pipeline_graph(pipeline, dsl_pipeline_root)
|
def is_zh(in_str):
'\n SJISに変換して文字数が減れば簡体字があるので中国語\n '
return ((set(in_str) - set(in_str.encode('sjis', 'ignore').decode('sjis'))) != set([]))
| -7,672,488,910,888,925,000
|
SJISに変換して文字数が減れば簡体字があるので中国語
|
code/exp/v18.py
|
is_zh
|
okotaku/pet_finder
|
python
|
def is_zh(in_str):
'\n \n '
return ((set(in_str) - set(in_str.encode('sjis', 'ignore').decode('sjis'))) != set([]))
|
def fit(self, X):
'\n Parameters\n ----------\n X : sparse matrix, [n_samples, n_features] document-term matrix\n '
if (not sp.sparse.issparse(X)):
X = sp.sparse.csc_matrix(X)
if self.use_idf:
(n_samples, n_features) = X.shape
df = _document_frequency(X)
idf = np.log((((n_samples - df) + 0.5) / (df + 0.5)))
self._idf_diag = sp.sparse.spdiags(idf, diags=0, m=n_features, n=n_features)
doc_len = X.sum(axis=1)
self._average_document_len = np.average(doc_len)
return self
| 1,019,888,090,101,431,300
|
Parameters
----------
X : sparse matrix, [n_samples, n_features] document-term matrix
|
code/exp/v18.py
|
fit
|
okotaku/pet_finder
|
python
|
def fit(self, X):
'\n Parameters\n ----------\n X : sparse matrix, [n_samples, n_features] document-term matrix\n '
if (not sp.sparse.issparse(X)):
X = sp.sparse.csc_matrix(X)
if self.use_idf:
(n_samples, n_features) = X.shape
df = _document_frequency(X)
idf = np.log((((n_samples - df) + 0.5) / (df + 0.5)))
self._idf_diag = sp.sparse.spdiags(idf, diags=0, m=n_features, n=n_features)
doc_len = X.sum(axis=1)
self._average_document_len = np.average(doc_len)
return self
|
def transform(self, X, copy=True):
'\n Parameters\n ----------\n X : sparse matrix, [n_samples, n_features] document-term matrix\n copy : boolean, optional (default=True)\n '
if (hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float)):
X = sp.sparse.csr_matrix(X, copy=copy)
else:
X = sp.sparse.csr_matrix(X, dtype=np.float, copy=copy)
(n_samples, n_features) = X.shape
doc_len = X.sum(axis=1)
sz = (X.indptr[1:] - X.indptr[0:(- 1)])
rep = np.repeat(np.asarray(doc_len), sz)
nom = (self.k1 + 1)
denom = (X.data + (self.k1 * ((1 - self.b) + ((self.b * rep) / self._average_document_len))))
data = ((X.data * nom) / denom)
X = sp.sparse.csr_matrix((data, X.indices, X.indptr), shape=X.shape)
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if (n_features != expected_n_features):
raise ValueError(('Input has n_features=%d while the model has been trained with n_features=%d' % (n_features, expected_n_features)))
X = (X * self._idf_diag)
return X
| -7,544,926,766,733,184,000
|
Parameters
----------
X : sparse matrix, [n_samples, n_features] document-term matrix
copy : boolean, optional (default=True)
|
code/exp/v18.py
|
transform
|
okotaku/pet_finder
|
python
|
def transform(self, X, copy=True):
'\n Parameters\n ----------\n X : sparse matrix, [n_samples, n_features] document-term matrix\n copy : boolean, optional (default=True)\n '
if (hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float)):
X = sp.sparse.csr_matrix(X, copy=copy)
else:
X = sp.sparse.csr_matrix(X, dtype=np.float, copy=copy)
(n_samples, n_features) = X.shape
doc_len = X.sum(axis=1)
sz = (X.indptr[1:] - X.indptr[0:(- 1)])
rep = np.repeat(np.asarray(doc_len), sz)
nom = (self.k1 + 1)
denom = (X.data + (self.k1 * ((1 - self.b) + ((self.b * rep) / self._average_document_len))))
data = ((X.data * nom) / denom)
X = sp.sparse.csr_matrix((data, X.indices, X.indptr), shape=X.shape)
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if (n_features != expected_n_features):
raise ValueError(('Input has n_features=%d while the model has been trained with n_features=%d' % (n_features, expected_n_features)))
X = (X * self._idf_diag)
return X
|
def train(self, examples):
'\n This function trains the neural network with examples obtained from\n self-play.\n Input:\n examples: a list of training examples, where each example is of form\n (board, pi, v). pi is the MCTS informed policy vector for\n the given board, and v is its value. The examples has\n board in its canonical form.\n '
pass
| -4,471,661,638,472,599,000
|
This function trains the neural network with examples obtained from
self-play.
Input:
examples: a list of training examples, where each example is of form
(board, pi, v). pi is the MCTS informed policy vector for
the given board, and v is its value. The examples has
board in its canonical form.
|
pommerman/NN/neural_net.py
|
train
|
MaxU11/playground
|
python
|
def train(self, examples):
'\n This function trains the neural network with examples obtained from\n self-play.\n Input:\n examples: a list of training examples, where each example is of form\n (board, pi, v). pi is the MCTS informed policy vector for\n the given board, and v is its value. The examples has\n board in its canonical form.\n '
pass
|
def predict(self, board):
'\n Input:\n board: current board in its canonical form.\n Returns:\n pi: a policy vector for the current board- a numpy array of length\n game.getActionSize\n v: a float in [-1,1] that gives the value of the current board\n '
pass
| -8,479,434,058,017,637,000
|
Input:
board: current board in its canonical form.
Returns:
pi: a policy vector for the current board- a numpy array of length
game.getActionSize
v: a float in [-1,1] that gives the value of the current board
|
pommerman/NN/neural_net.py
|
predict
|
MaxU11/playground
|
python
|
def predict(self, board):
'\n Input:\n board: current board in its canonical form.\n Returns:\n pi: a policy vector for the current board- a numpy array of length\n game.getActionSize\n v: a float in [-1,1] that gives the value of the current board\n '
pass
|
def save_checkpoint(self, folder, filename):
'\n Saves the current neural network (with its parameters) in\n folder/filename\n '
pass
| -7,472,453,376,441,475,000
|
Saves the current neural network (with its parameters) in
folder/filename
|
pommerman/NN/neural_net.py
|
save_checkpoint
|
MaxU11/playground
|
python
|
def save_checkpoint(self, folder, filename):
'\n Saves the current neural network (with its parameters) in\n folder/filename\n '
pass
|
def load_checkpoint(self, folder, filename):
'\n Loads parameters of the neural network from folder/filename\n '
pass
| -7,363,140,946,181,195,000
|
Loads parameters of the neural network from folder/filename
|
pommerman/NN/neural_net.py
|
load_checkpoint
|
MaxU11/playground
|
python
|
def load_checkpoint(self, folder, filename):
'\n \n '
pass
|
def load_image(filename, flags=None):
'\n This will call cv2.imread() with the given arguments and convert\n the resulting numpy array to a darknet image\n\n :param filename: Image file name\n :param flags: imread flags\n :return: Given image file as a darknet image\n :rtype: IMAGE\n '
image = cv2.imread(filename, flags)
return array_to_image(image)
| -8,928,047,387,716,222,000
|
This will call cv2.imread() with the given arguments and convert
the resulting numpy array to a darknet image
:param filename: Image file name
:param flags: imread flags
:return: Given image file as a darknet image
:rtype: IMAGE
|
pyyolo/utils.py
|
load_image
|
isarandi/pyyolo
|
python
|
def load_image(filename, flags=None):
'\n This will call cv2.imread() with the given arguments and convert\n the resulting numpy array to a darknet image\n\n :param filename: Image file name\n :param flags: imread flags\n :return: Given image file as a darknet image\n :rtype: IMAGE\n '
image = cv2.imread(filename, flags)
return array_to_image(image)
|
def array_to_image(arr):
'\n Given image with numpy array will be converted to\n darkent image\n Remember to call free_image(im) function after using this image\n\n :rtype: IMAGE\n :param arr: numpy array\n :return: darknet image\n '
data = arr.ctypes.data_as(POINTER(c_ubyte))
im = ndarray_image(data, arr.ctypes.shape, arr.ctypes.strides)
return im
| 4,073,591,681,017,779,000
|
Given image with numpy array will be converted to
darkent image
Remember to call free_image(im) function after using this image
:rtype: IMAGE
:param arr: numpy array
:return: darknet image
|
pyyolo/utils.py
|
array_to_image
|
isarandi/pyyolo
|
python
|
def array_to_image(arr):
'\n Given image with numpy array will be converted to\n darkent image\n Remember to call free_image(im) function after using this image\n\n :rtype: IMAGE\n :param arr: numpy array\n :return: darknet image\n '
data = arr.ctypes.data_as(POINTER(c_ubyte))
im = ndarray_image(data, arr.ctypes.shape, arr.ctypes.strides)
return im
|
def detect(net, meta, im, thresh=0.2, hier_thresh=0, nms=0.4):
'\n Detect the objects in the given image. free_image function is called inside this function.\n Therefore the input darkent image is not usable after calling this function.\n :param net:\n :param meta:\n :param im:\n :param thresh:\n :param hier_thresh:\n :param nms:\n :return:\n '
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
for j in range(num):
for i in range(meta.classes):
if (dets[j].prob[i] > 0):
b = dets[j].bbox
res.append(YoloData(id=i, name=meta.names[i], bbox=BBox((b.x - (b.w / 2.0)), (b.y - (b.h / 2.0)), b.w, b.h, dets[j].prob[i])))
res = sorted(res, key=(lambda x: (- x.bbox.c)))
free_image(im)
free_detections(dets, num)
return res
| -3,912,271,757,855,231,000
|
Detect the objects in the given image. free_image function is called inside this function.
Therefore the input darkent image is not usable after calling this function.
:param net:
:param meta:
:param im:
:param thresh:
:param hier_thresh:
:param nms:
:return:
|
pyyolo/utils.py
|
detect
|
isarandi/pyyolo
|
python
|
def detect(net, meta, im, thresh=0.2, hier_thresh=0, nms=0.4):
'\n Detect the objects in the given image. free_image function is called inside this function.\n Therefore the input darkent image is not usable after calling this function.\n :param net:\n :param meta:\n :param im:\n :param thresh:\n :param hier_thresh:\n :param nms:\n :return:\n '
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
for j in range(num):
for i in range(meta.classes):
if (dets[j].prob[i] > 0):
b = dets[j].bbox
res.append(YoloData(id=i, name=meta.names[i], bbox=BBox((b.x - (b.w / 2.0)), (b.y - (b.h / 2.0)), b.w, b.h, dets[j].prob[i])))
res = sorted(res, key=(lambda x: (- x.bbox.c)))
free_image(im)
free_detections(dets, num)
return res
|
def load_net(cfg_filepath, weights_filepath, clear):
'\n\n :param cfg_filepath: cfg file name\n :param weights_filepath: weights file name\n :param clear: True if you want to clear the weights otherwise False\n :return: darknet network object\n '
return pyyolo.darknet.load_net(cfg_filepath, weights_filepath, clear)
| 8,169,648,081,730,823,000
|
:param cfg_filepath: cfg file name
:param weights_filepath: weights file name
:param clear: True if you want to clear the weights otherwise False
:return: darknet network object
|
pyyolo/utils.py
|
load_net
|
isarandi/pyyolo
|
python
|
def load_net(cfg_filepath, weights_filepath, clear):
'\n\n :param cfg_filepath: cfg file name\n :param weights_filepath: weights file name\n :param clear: True if you want to clear the weights otherwise False\n :return: darknet network object\n '
return pyyolo.darknet.load_net(cfg_filepath, weights_filepath, clear)
|
def load_meta(meta_filepath):
'\n Recommend using load_names(str) function instead.\n :param meta_filepath: metadata file path\n :return: darknet metadata object\n '
return pyyolo.darknet.load_meta(meta_filepath)
| 725,308,637,335,651,100
|
Recommend using load_names(str) function instead.
:param meta_filepath: metadata file path
:return: darknet metadata object
|
pyyolo/utils.py
|
load_meta
|
isarandi/pyyolo
|
python
|
def load_meta(meta_filepath):
'\n Recommend using load_names(str) function instead.\n :param meta_filepath: metadata file path\n :return: darknet metadata object\n '
return pyyolo.darknet.load_meta(meta_filepath)
|
def load_names(names_filepath):
'\n Loading metadata from data file (eg: coco.data) is a mess as you need to edit that file also by pointing it to the names file.\n Using this function you can directly load the names file as METADATA object.\n\n Older function is still available if you need.\n\n :param names_filepath: Filepath of the names file. Eg: coco.names\n :return: darknet metadata object\n '
data = None
with open(names_filepath) as f:
data = f.readlines()
if (data is None):
raise ValueError(('Names file not found.. %s' % names_filepath))
n_cls = len(data)
p_names = (c_char_p * n_cls)()
for cls in range(n_cls):
name = data[cls].encode('utf-8')
c_name = c_char_p()
c_name.value = name[:(- 1)]
p_names[cls] = c_name
return METADATA(n_cls, cast(p_names, POINTER(c_char_p)))
| 339,022,950,776,337,660
|
Loading metadata from data file (eg: coco.data) is a mess as you need to edit that file also by pointing it to the names file.
Using this function you can directly load the names file as METADATA object.
Older function is still available if you need.
:param names_filepath: Filepath of the names file. Eg: coco.names
:return: darknet metadata object
|
pyyolo/utils.py
|
load_names
|
isarandi/pyyolo
|
python
|
def load_names(names_filepath):
'\n Loading metadata from data file (eg: coco.data) is a mess as you need to edit that file also by pointing it to the names file.\n Using this function you can directly load the names file as METADATA object.\n\n Older function is still available if you need.\n\n :param names_filepath: Filepath of the names file. Eg: coco.names\n :return: darknet metadata object\n '
data = None
with open(names_filepath) as f:
data = f.readlines()
if (data is None):
raise ValueError(('Names file not found.. %s' % names_filepath))
n_cls = len(data)
p_names = (c_char_p * n_cls)()
for cls in range(n_cls):
name = data[cls].encode('utf-8')
c_name = c_char_p()
c_name.value = name[:(- 1)]
p_names[cls] = c_name
return METADATA(n_cls, cast(p_names, POINTER(c_char_p)))
|
def test_rates_limits_list(self):
'\n Test case for rates_limits_list\n\n Endpoint to check rate limits for current user.\n '
pass
| -983,380,903,900,645,100
|
Test case for rates_limits_list
Endpoint to check rate limits for current user.
|
bindings/python/src/test/test_rates_api.py
|
test_rates_limits_list
|
cloudsmith-io/cloudsmith-api
|
python
|
def test_rates_limits_list(self):
'\n Test case for rates_limits_list\n\n Endpoint to check rate limits for current user.\n '
pass
|
def setUp(self):
'\n Initialises common tests attributes.\n '
self._cmfs = reshape_msds(MSDS_CMFS['CIE 1931 2 Degree Standard Observer'], SpectralShape(360, 780, 10))
self._sd_D65 = reshape_sd(SDS_ILLUMINANTS['D65'], self._cmfs.shape)
| 4,722,955,684,539,319
|
Initialises common tests attributes.
|
colour/recovery/tests/test__init__.py
|
setUp
|
JGoldstone/colour
|
python
|
def setUp(self):
'\n \n '
self._cmfs = reshape_msds(MSDS_CMFS['CIE 1931 2 Degree Standard Observer'], SpectralShape(360, 780, 10))
self._sd_D65 = reshape_sd(SDS_ILLUMINANTS['D65'], self._cmfs.shape)
|
def test_domain_range_scale_XYZ_to_sd(self):
'\n Tests :func:`colour.recovery.XYZ_to_sd` definition domain\n and range scale support.\n '
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
m = ('Jakob 2019', 'Mallett 2019', 'Meng 2015', 'Otsu 2018', 'Smits 1999')
v = [sd_to_XYZ_integration(XYZ_to_sd(XYZ, method, cmfs=self._cmfs, illuminant=self._sd_D65), self._cmfs, self._sd_D65) for method in m]
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for (method, value) in zip(m, v):
for (scale, factor_a, factor_b) in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(sd_to_XYZ_integration(XYZ_to_sd((XYZ * factor_a), method, cmfs=self._cmfs, illuminant=self._sd_D65), self._cmfs, self._sd_D65), (value * factor_b), decimal=7)
| 1,553,417,427,829,978,600
|
Tests :func:`colour.recovery.XYZ_to_sd` definition domain
and range scale support.
|
colour/recovery/tests/test__init__.py
|
test_domain_range_scale_XYZ_to_sd
|
JGoldstone/colour
|
python
|
def test_domain_range_scale_XYZ_to_sd(self):
'\n Tests :func:`colour.recovery.XYZ_to_sd` definition domain\n and range scale support.\n '
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
m = ('Jakob 2019', 'Mallett 2019', 'Meng 2015', 'Otsu 2018', 'Smits 1999')
v = [sd_to_XYZ_integration(XYZ_to_sd(XYZ, method, cmfs=self._cmfs, illuminant=self._sd_D65), self._cmfs, self._sd_D65) for method in m]
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for (method, value) in zip(m, v):
for (scale, factor_a, factor_b) in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(sd_to_XYZ_integration(XYZ_to_sd((XYZ * factor_a), method, cmfs=self._cmfs, illuminant=self._sd_D65), self._cmfs, self._sd_D65), (value * factor_b), decimal=7)
|
async def test_flow_works(hass, aioclient_mock, mock_discovery):
'Test config flow.'
mock_discovery.return_value = '1'
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
assert (result['data_schema']({CONF_USERNAME: '', CONF_PASSWORD: ''}) == {CONF_HOST: 'unifi', CONF_USERNAME: '', CONF_PASSWORD: '', CONF_PORT: 443, CONF_VERIFY_SSL: False})
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'desc': 'Site name', 'name': 'site_id', 'role': 'admin'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['title'] == 'Site name')
assert (result['data'] == {CONF_CONTROLLER: {CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_SITE_ID: 'site_id', CONF_VERIFY_SSL: True}})
| 1,996,485,359,439,664,400
|
Test config flow.
|
tests/components/unifi/test_config_flow.py
|
test_flow_works
|
Nixon506E/home-assistant
|
python
|
async def test_flow_works(hass, aioclient_mock, mock_discovery):
mock_discovery.return_value = '1'
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
assert (result['data_schema']({CONF_USERNAME: , CONF_PASSWORD: }) == {CONF_HOST: 'unifi', CONF_USERNAME: , CONF_PASSWORD: , CONF_PORT: 443, CONF_VERIFY_SSL: False})
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'desc': 'Site name', 'name': 'site_id', 'role': 'admin'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['title'] == 'Site name')
assert (result['data'] == {CONF_CONTROLLER: {CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_SITE_ID: 'site_id', CONF_VERIFY_SSL: True}})
|
async def test_flow_works_multiple_sites(hass, aioclient_mock):
'Test config flow works when finding multiple sites.'
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'name': 'default', 'role': 'admin', 'desc': 'site name'}, {'name': 'site2', 'role': 'admin', 'desc': 'site2 name'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'site')
assert result['data_schema']({'site': 'default'})
assert result['data_schema']({'site': 'site2'})
| -1,844,292,938,368,761,900
|
Test config flow works when finding multiple sites.
|
tests/components/unifi/test_config_flow.py
|
test_flow_works_multiple_sites
|
Nixon506E/home-assistant
|
python
|
async def test_flow_works_multiple_sites(hass, aioclient_mock):
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'name': 'default', 'role': 'admin', 'desc': 'site name'}, {'name': 'site2', 'role': 'admin', 'desc': 'site2 name'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'site')
assert result['data_schema']({'site': 'default'})
assert result['data_schema']({'site': 'site2'})
|
async def test_flow_raise_already_configured(hass, aioclient_mock):
'Test config flow aborts since a connected config entry already exists.'
(await setup_unifi_integration(hass))
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'desc': 'Site name', 'name': 'site_id', 'role': 'admin'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_ABORT)
assert (result['reason'] == 'already_configured')
| 7,033,402,708,946,387,000
|
Test config flow aborts since a connected config entry already exists.
|
tests/components/unifi/test_config_flow.py
|
test_flow_raise_already_configured
|
Nixon506E/home-assistant
|
python
|
async def test_flow_raise_already_configured(hass, aioclient_mock):
(await setup_unifi_integration(hass))
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'desc': 'Site name', 'name': 'site_id', 'role': 'admin'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_ABORT)
assert (result['reason'] == 'already_configured')
|
async def test_flow_aborts_configuration_updated(hass, aioclient_mock):
'Test config flow aborts since a connected config entry already exists.'
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'controller': {'host': '1.2.3.4', 'site': 'office'}})
entry.add_to_hass(hass)
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'controller': {'host': '1.2.3.4', 'site': 'site_id'}})
entry.add_to_hass(hass)
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'desc': 'Site name', 'name': 'site_id', 'role': 'admin'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
with patch('homeassistant.components.unifi.async_setup_entry'):
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_ABORT)
assert (result['reason'] == 'configuration_updated')
| 3,056,866,012,301,531,000
|
Test config flow aborts since a connected config entry already exists.
|
tests/components/unifi/test_config_flow.py
|
test_flow_aborts_configuration_updated
|
Nixon506E/home-assistant
|
python
|
async def test_flow_aborts_configuration_updated(hass, aioclient_mock):
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'controller': {'host': '1.2.3.4', 'site': 'office'}})
entry.add_to_hass(hass)
entry = MockConfigEntry(domain=UNIFI_DOMAIN, data={'controller': {'host': '1.2.3.4', 'site': 'site_id'}})
entry.add_to_hass(hass)
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'desc': 'Site name', 'name': 'site_id', 'role': 'admin'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
with patch('homeassistant.components.unifi.async_setup_entry'):
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_ABORT)
assert (result['reason'] == 'configuration_updated')
|
async def test_flow_fails_user_credentials_faulty(hass, aioclient_mock):
'Test config flow.'
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
with patch('aiounifi.Controller.login', side_effect=aiounifi.errors.Unauthorized):
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['errors'] == {'base': 'faulty_credentials'})
| 5,485,508,945,404,993,000
|
Test config flow.
|
tests/components/unifi/test_config_flow.py
|
test_flow_fails_user_credentials_faulty
|
Nixon506E/home-assistant
|
python
|
async def test_flow_fails_user_credentials_faulty(hass, aioclient_mock):
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
with patch('aiounifi.Controller.login', side_effect=aiounifi.errors.Unauthorized):
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['errors'] == {'base': 'faulty_credentials'})
|
async def test_flow_fails_controller_unavailable(hass, aioclient_mock):
'Test config flow.'
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
with patch('aiounifi.Controller.login', side_effect=aiounifi.errors.RequestError):
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['errors'] == {'base': 'service_unavailable'})
| -5,741,835,985,947,900,000
|
Test config flow.
|
tests/components/unifi/test_config_flow.py
|
test_flow_fails_controller_unavailable
|
Nixon506E/home-assistant
|
python
|
async def test_flow_fails_controller_unavailable(hass, aioclient_mock):
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': 'user'}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'user')
aioclient_mock.get('https://1.2.3.4:1234', status=302)
with patch('aiounifi.Controller.login', side_effect=aiounifi.errors.RequestError):
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'username', CONF_PASSWORD: 'password', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['errors'] == {'base': 'service_unavailable'})
|
async def test_reauth_flow_update_configuration(hass, aioclient_mock):
'Verify reauth flow can update controller configuration.'
controller = (await setup_unifi_integration(hass))
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': SOURCE_REAUTH}, data=controller.config_entry))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == SOURCE_USER)
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'desc': 'Site name', 'name': 'site_id', 'role': 'admin'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'new_name', CONF_PASSWORD: 'new_pass', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_ABORT)
assert (result['reason'] == 'reauth_successful')
assert (controller.host == '1.2.3.4')
assert (controller.config_entry.data[CONF_CONTROLLER][CONF_USERNAME] == 'new_name')
assert (controller.config_entry.data[CONF_CONTROLLER][CONF_PASSWORD] == 'new_pass')
| -7,351,216,011,015,675,000
|
Verify reauth flow can update controller configuration.
|
tests/components/unifi/test_config_flow.py
|
test_reauth_flow_update_configuration
|
Nixon506E/home-assistant
|
python
|
async def test_reauth_flow_update_configuration(hass, aioclient_mock):
controller = (await setup_unifi_integration(hass))
result = (await hass.config_entries.flow.async_init(UNIFI_DOMAIN, context={'source': SOURCE_REAUTH}, data=controller.config_entry))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == SOURCE_USER)
aioclient_mock.get('https://1.2.3.4:1234', status=302)
aioclient_mock.post('https://1.2.3.4:1234/api/login', json={'data': 'login successful', 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
aioclient_mock.get('https://1.2.3.4:1234/api/self/sites', json={'data': [{'desc': 'Site name', 'name': 'site_id', 'role': 'admin'}], 'meta': {'rc': 'ok'}}, headers={'content-type': CONTENT_TYPE_JSON})
result = (await hass.config_entries.flow.async_configure(result['flow_id'], user_input={CONF_HOST: '1.2.3.4', CONF_USERNAME: 'new_name', CONF_PASSWORD: 'new_pass', CONF_PORT: 1234, CONF_VERIFY_SSL: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_ABORT)
assert (result['reason'] == 'reauth_successful')
assert (controller.host == '1.2.3.4')
assert (controller.config_entry.data[CONF_CONTROLLER][CONF_USERNAME] == 'new_name')
assert (controller.config_entry.data[CONF_CONTROLLER][CONF_PASSWORD] == 'new_pass')
|
async def test_advanced_option_flow(hass):
'Test advanced config flow options.'
controller = (await setup_unifi_integration(hass, clients_response=CLIENTS, devices_response=DEVICES, wlans_response=WLANS, dpigroup_response=DPI_GROUPS, dpiapp_response=[]))
result = (await hass.config_entries.options.async_init(controller.config_entry.entry_id, context={'show_advanced_options': True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'device_tracker')
assert set(result['data_schema'].schema[CONF_SSID_FILTER].options.keys()).intersection(('SSID 1', 'SSID 2', 'SSID 2_IOT', 'SSID 3'))
result = (await hass.config_entries.options.async_configure(result['flow_id'], user_input={CONF_TRACK_CLIENTS: False, CONF_TRACK_WIRED_CLIENTS: False, CONF_TRACK_DEVICES: False, CONF_SSID_FILTER: ['SSID 1', 'SSID 2_IOT', 'SSID 3'], CONF_DETECTION_TIME: 100}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'client_control')
result = (await hass.config_entries.options.async_configure(result['flow_id'], user_input={CONF_BLOCK_CLIENT: [CLIENTS[0]['mac']], CONF_POE_CLIENTS: False, CONF_DPI_RESTRICTIONS: False}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'statistics_sensors')
result = (await hass.config_entries.options.async_configure(result['flow_id'], user_input={CONF_ALLOW_BANDWIDTH_SENSORS: True, CONF_ALLOW_UPTIME_SENSORS: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['data'] == {CONF_TRACK_CLIENTS: False, CONF_TRACK_WIRED_CLIENTS: False, CONF_TRACK_DEVICES: False, CONF_SSID_FILTER: ['SSID 1', 'SSID 2_IOT', 'SSID 3'], CONF_DETECTION_TIME: 100, CONF_IGNORE_WIRED_BUG: False, CONF_POE_CLIENTS: False, CONF_DPI_RESTRICTIONS: False, CONF_BLOCK_CLIENT: [CLIENTS[0]['mac']], CONF_ALLOW_BANDWIDTH_SENSORS: True, CONF_ALLOW_UPTIME_SENSORS: True})
| -4,160,935,200,820,935,700
|
Test advanced config flow options.
|
tests/components/unifi/test_config_flow.py
|
test_advanced_option_flow
|
Nixon506E/home-assistant
|
python
|
async def test_advanced_option_flow(hass):
controller = (await setup_unifi_integration(hass, clients_response=CLIENTS, devices_response=DEVICES, wlans_response=WLANS, dpigroup_response=DPI_GROUPS, dpiapp_response=[]))
result = (await hass.config_entries.options.async_init(controller.config_entry.entry_id, context={'show_advanced_options': True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'device_tracker')
assert set(result['data_schema'].schema[CONF_SSID_FILTER].options.keys()).intersection(('SSID 1', 'SSID 2', 'SSID 2_IOT', 'SSID 3'))
result = (await hass.config_entries.options.async_configure(result['flow_id'], user_input={CONF_TRACK_CLIENTS: False, CONF_TRACK_WIRED_CLIENTS: False, CONF_TRACK_DEVICES: False, CONF_SSID_FILTER: ['SSID 1', 'SSID 2_IOT', 'SSID 3'], CONF_DETECTION_TIME: 100}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'client_control')
result = (await hass.config_entries.options.async_configure(result['flow_id'], user_input={CONF_BLOCK_CLIENT: [CLIENTS[0]['mac']], CONF_POE_CLIENTS: False, CONF_DPI_RESTRICTIONS: False}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'statistics_sensors')
result = (await hass.config_entries.options.async_configure(result['flow_id'], user_input={CONF_ALLOW_BANDWIDTH_SENSORS: True, CONF_ALLOW_UPTIME_SENSORS: True}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['data'] == {CONF_TRACK_CLIENTS: False, CONF_TRACK_WIRED_CLIENTS: False, CONF_TRACK_DEVICES: False, CONF_SSID_FILTER: ['SSID 1', 'SSID 2_IOT', 'SSID 3'], CONF_DETECTION_TIME: 100, CONF_IGNORE_WIRED_BUG: False, CONF_POE_CLIENTS: False, CONF_DPI_RESTRICTIONS: False, CONF_BLOCK_CLIENT: [CLIENTS[0]['mac']], CONF_ALLOW_BANDWIDTH_SENSORS: True, CONF_ALLOW_UPTIME_SENSORS: True})
|
async def test_simple_option_flow(hass):
'Test simple config flow options.'
controller = (await setup_unifi_integration(hass, clients_response=CLIENTS, wlans_response=WLANS, dpigroup_response=DPI_GROUPS, dpiapp_response=[]))
result = (await hass.config_entries.options.async_init(controller.config_entry.entry_id, context={'show_advanced_options': False}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'simple_options')
result = (await hass.config_entries.options.async_configure(result['flow_id'], user_input={CONF_TRACK_CLIENTS: False, CONF_TRACK_DEVICES: False, CONF_BLOCK_CLIENT: [CLIENTS[0]['mac']]}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['data'] == {CONF_TRACK_CLIENTS: False, CONF_TRACK_DEVICES: False, CONF_BLOCK_CLIENT: [CLIENTS[0]['mac']]})
| -7,815,050,608,609,491,000
|
Test simple config flow options.
|
tests/components/unifi/test_config_flow.py
|
test_simple_option_flow
|
Nixon506E/home-assistant
|
python
|
async def test_simple_option_flow(hass):
controller = (await setup_unifi_integration(hass, clients_response=CLIENTS, wlans_response=WLANS, dpigroup_response=DPI_GROUPS, dpiapp_response=[]))
result = (await hass.config_entries.options.async_init(controller.config_entry.entry_id, context={'show_advanced_options': False}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_FORM)
assert (result['step_id'] == 'simple_options')
result = (await hass.config_entries.options.async_configure(result['flow_id'], user_input={CONF_TRACK_CLIENTS: False, CONF_TRACK_DEVICES: False, CONF_BLOCK_CLIENT: [CLIENTS[0]['mac']]}))
assert (result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY)
assert (result['data'] == {CONF_TRACK_CLIENTS: False, CONF_TRACK_DEVICES: False, CONF_BLOCK_CLIENT: [CLIENTS[0]['mac']]})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.