hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f734528846dbb613a1f70d11b9f44b205a45b9f1 | 4,976 | py | Python | tests/ssg_test_suite/combined.py | rhmdnd/content | 478c60314b7a1692920a4031b51f4b6b3a6f25a0 | [
"BSD-3-Clause"
] | null | null | null | tests/ssg_test_suite/combined.py | rhmdnd/content | 478c60314b7a1692920a4031b51f4b6b3a6f25a0 | [
"BSD-3-Clause"
] | null | null | null | tests/ssg_test_suite/combined.py | rhmdnd/content | 478c60314b7a1692920a4031b51f4b6b3a6f25a0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
from __future__ import print_function
import logging
import re
from ssg.constants import OSCAP_PROFILE
from ssg_test_suite import common
from ssg_test_suite import rule
from ssg_test_suite import xml_operations
from ssg_test_suite import test_env
class CombinedChecker(rule.RuleChecker):
"""
Combined mode works like pretty much like the Rule mode -
for every rule selected in a profile:
- Alter the system.
- Run the scan, check that the result meets expectations.
If the test scenario passed as requested, return True,
if it failed or passed unexpectedly, return False.
The following sequence applies if the initial scan
has failed as expected:
- If there are no remediations, return True.
- Run remediation, return False if it failed.
- Return result of the final scan of remediated system.
If a rule doesn't have any test scenario, it is skipped.
Skipped rules are reported at the end.
"""
def __init__(self, test_env):
super(CombinedChecker, self).__init__(test_env)
self.rules_not_tested_yet = set()
self.results = list()
self._current_result = None
self.run_aborted = False
def _rule_matches_rule_spec(self, rule_short_id):
return (rule_short_id in self.rule_spec)
def _modify_parameters(self, script, params):
# If there is no profiles metadata in a script we will use
# the ALL profile - this will prevent failures which might
# be caused by the tested profile selecting different values
# in tested variables compared to defaults. The ALL profile
# is always selecting default values.
# If there is profiles metadata we check the metadata and set
# it to self.profile (the tested profile) only if the metadata
# contains self.profile - otherwise scenario is not supposed to
# be tested using the self.profile and we return empty profiles
# metadata.
if not params["profiles"]:
params["profiles"].append(rule.OSCAP_PROFILE_ALL_ID)
logging.debug(
"Added the {0} profile to the list of available profiles for {1}"
.format(rule.OSCAP_PROFILE_ALL_ID, script))
else:
params['profiles'] = [item for item in params['profiles'] if re.search(self.profile, item)]
return params
def _generate_target_rules(self, profile):
# check if target is a complete profile ID, if not prepend profile prefix
if not profile.startswith(OSCAP_PROFILE):
profile = OSCAP_PROFILE + profile
logging.info("Performing combined test using profile: {0}".format(profile))
# Fetch target list from rules selected in profile
target_rules = xml_operations.get_all_rule_ids_in_profile(
self.datastream, self.benchmark_id,
profile, logging)
logging.debug("Profile {0} expanded to following list of "
"rules: {1}".format(profile, target_rules))
return target_rules
def _test_target(self):
self.rules_not_tested_yet = set(self.rule_spec)
try:
super(CombinedChecker, self)._test_target()
except KeyboardInterrupt as exec_interrupt:
self.run_aborted = True
raise exec_interrupt
if len(self.rules_not_tested_yet) != 0:
not_tested = sorted(list(self.rules_not_tested_yet))
logging.info("The following rule(s) were not tested:")
for rule in not_tested:
logging.info("{0}".format(rule))
def test_rule(self, state, rule, scenarios):
super(CombinedChecker, self).test_rule(state, rule, scenarios)
# In combined mode there is no expectations of matching substrings,
# every entry in the target is expected to be unique.
# Let's remove matched targets, so we can track rules not tested
self.rules_not_tested_yet.discard(rule.short_id)
def perform_combined_check(options):
checker = CombinedChecker(options.test_env)
checker.datastream = options.datastream
checker.benchmark_id = options.benchmark_id
checker.remediate_using = options.remediate_using
checker.dont_clean = options.dont_clean
checker.no_reports = options.no_reports
# No debug option is provided for combined mode
checker.manual_debug = False
checker.benchmark_cpes = options.benchmark_cpes
checker.scenarios_regex = options.scenarios_regex
checker.slice_current = options.slice_current
checker.slice_total = options.slice_total
for profile in options.target:
# Let's keep track of originally targeted profile
checker.profile = profile
target_rules = checker._generate_target_rules(profile)
checker.rule_spec = target_rules
checker.template_spec = None
checker.test_target()
if checker.run_aborted:
return
| 39.808 | 103 | 0.687902 |
from __future__ import print_function
import logging
import re
from ssg.constants import OSCAP_PROFILE
from ssg_test_suite import common
from ssg_test_suite import rule
from ssg_test_suite import xml_operations
from ssg_test_suite import test_env
class CombinedChecker(rule.RuleChecker):
def __init__(self, test_env):
super(CombinedChecker, self).__init__(test_env)
self.rules_not_tested_yet = set()
self.results = list()
self._current_result = None
self.run_aborted = False
def _rule_matches_rule_spec(self, rule_short_id):
return (rule_short_id in self.rule_spec)
def _modify_parameters(self, script, params):
if not params["profiles"]:
params["profiles"].append(rule.OSCAP_PROFILE_ALL_ID)
logging.debug(
"Added the {0} profile to the list of available profiles for {1}"
.format(rule.OSCAP_PROFILE_ALL_ID, script))
else:
params['profiles'] = [item for item in params['profiles'] if re.search(self.profile, item)]
return params
def _generate_target_rules(self, profile):
if not profile.startswith(OSCAP_PROFILE):
profile = OSCAP_PROFILE + profile
logging.info("Performing combined test using profile: {0}".format(profile))
target_rules = xml_operations.get_all_rule_ids_in_profile(
self.datastream, self.benchmark_id,
profile, logging)
logging.debug("Profile {0} expanded to following list of "
"rules: {1}".format(profile, target_rules))
return target_rules
def _test_target(self):
self.rules_not_tested_yet = set(self.rule_spec)
try:
super(CombinedChecker, self)._test_target()
except KeyboardInterrupt as exec_interrupt:
self.run_aborted = True
raise exec_interrupt
if len(self.rules_not_tested_yet) != 0:
not_tested = sorted(list(self.rules_not_tested_yet))
logging.info("The following rule(s) were not tested:")
for rule in not_tested:
logging.info("{0}".format(rule))
def test_rule(self, state, rule, scenarios):
super(CombinedChecker, self).test_rule(state, rule, scenarios)
self.rules_not_tested_yet.discard(rule.short_id)
def perform_combined_check(options):
checker = CombinedChecker(options.test_env)
checker.datastream = options.datastream
checker.benchmark_id = options.benchmark_id
checker.remediate_using = options.remediate_using
checker.dont_clean = options.dont_clean
checker.no_reports = options.no_reports
# No debug option is provided for combined mode
checker.manual_debug = False
checker.benchmark_cpes = options.benchmark_cpes
checker.scenarios_regex = options.scenarios_regex
checker.slice_current = options.slice_current
checker.slice_total = options.slice_total
for profile in options.target:
# Let's keep track of originally targeted profile
checker.profile = profile
target_rules = checker._generate_target_rules(profile)
checker.rule_spec = target_rules
checker.template_spec = None
checker.test_target()
if checker.run_aborted:
return
| true | true |
f73454591c4d3431543089df0c42b9280414f4e9 | 2,991 | py | Python | shopify_csv/tests/test_end_to_end_tests.py | d-e-h-i-o/shopify_csv | 0c49666bca38802a756502f72f835abb63115025 | [
"MIT"
] | 1 | 2021-02-28T11:36:50.000Z | 2021-02-28T11:36:50.000Z | shopify_csv/tests/test_end_to_end_tests.py | d-e-h-i-o/shopify_csv | 0c49666bca38802a756502f72f835abb63115025 | [
"MIT"
] | null | null | null | shopify_csv/tests/test_end_to_end_tests.py | d-e-h-i-o/shopify_csv | 0c49666bca38802a756502f72f835abb63115025 | [
"MIT"
] | null | null | null | import os
import csv
from shopify_csv import ShopifyRow
def get_template_rows():
with open(
os.path.join(
os.getcwd(), "shopify_csv", "tests", "fixtures", "product_template.csv"
),
"r",
) as file:
reader = csv.reader(file, delimiter=";")
return [row for row in reader]
def get_shopify_rows():
return_rows = []
return_rows.append(ShopifyRow.FIELDS)
row = ShopifyRow()
row.handle = "example-product"
row.title = "Some product"
row.vendor = "Vendor"
row.type = "product"
row.tags = "tag1"
row.published = True
row.option1_name = "Title"
row.option1_value = "Some option value"
row.variant_grams = 3629
row.variant_inventory_policy = "deny"
row.variant_fulfillment_service = "manual"
row.variant_price = 25
row.variant_requires_shipping = True
row.variant_taxable = True
row.image_src = "https://test.com/product.jpg"
row.image_position = 1
row.gift_card = False
row.seo_title = "Seo title."
row.seo_description = "Description"
row.google_shopping_google_product_category = "Products > Products"
row.google_shopping_gender = "Unisex"
row.google_shopping_age_group = "Adult"
row.google_shopping_mpn = "man"
row.google_shopping_adwords_grouping = "products"
row.google_shopping_adwords_labels = "labels"
row.google_shopping_condition = "used"
row.google_shopping_custom_product = "FALSE"
row.variant_weight_unit = "g"
row.status = "active"
row.validate_required_fields()
return_rows.append(row.writable)
row = ShopifyRow()
row.handle = "example-t-shirt"
row.option1_value = "Small"
row.variant_sku = "example-product-s"
row.variant_grams = 200
row.variant_inventory_policy = "deny"
row.variant_fulfillment_service = "manual"
row.variant_price = 29.99
row.variant_compare_at_price = 34.99
row.variant_requires_shipping = True
row.variant_taxable = True
row.variant_weight_unit = "g"
row.validate_required_fields(is_variant=True)
return_rows.append(row.writable)
row = ShopifyRow()
row.handle = "example-t-shirt"
row.option1_value = "Medium"
row.variant_sku = "example-product-m"
row.variant_grams = 200
row.variant_inventory_tracker = "shopify"
row.variant_inventory_policy = "deny"
row.variant_fulfillment_service = "manual"
row.variant_price = 29.99
row.variant_compare_at_price = 34.99
row.variant_requires_shipping = True
row.variant_taxable = True
row.variant_weight_unit = "g"
row.validate_required_fields(is_variant=True)
return_rows.append(row.writable)
return return_rows
def test_should_produce_template_csv():
template_rows = get_template_rows()
shopify_rows = get_shopify_rows()
for template_row, shopify_row in zip(template_rows, shopify_rows):
for field1, field2 in zip(template_row, shopify_row):
assert field1 == field2
| 30.520408 | 83 | 0.698094 | import os
import csv
from shopify_csv import ShopifyRow
def get_template_rows():
with open(
os.path.join(
os.getcwd(), "shopify_csv", "tests", "fixtures", "product_template.csv"
),
"r",
) as file:
reader = csv.reader(file, delimiter=";")
return [row for row in reader]
def get_shopify_rows():
return_rows = []
return_rows.append(ShopifyRow.FIELDS)
row = ShopifyRow()
row.handle = "example-product"
row.title = "Some product"
row.vendor = "Vendor"
row.type = "product"
row.tags = "tag1"
row.published = True
row.option1_name = "Title"
row.option1_value = "Some option value"
row.variant_grams = 3629
row.variant_inventory_policy = "deny"
row.variant_fulfillment_service = "manual"
row.variant_price = 25
row.variant_requires_shipping = True
row.variant_taxable = True
row.image_src = "https://test.com/product.jpg"
row.image_position = 1
row.gift_card = False
row.seo_title = "Seo title."
row.seo_description = "Description"
row.google_shopping_google_product_category = "Products > Products"
row.google_shopping_gender = "Unisex"
row.google_shopping_age_group = "Adult"
row.google_shopping_mpn = "man"
row.google_shopping_adwords_grouping = "products"
row.google_shopping_adwords_labels = "labels"
row.google_shopping_condition = "used"
row.google_shopping_custom_product = "FALSE"
row.variant_weight_unit = "g"
row.status = "active"
row.validate_required_fields()
return_rows.append(row.writable)
row = ShopifyRow()
row.handle = "example-t-shirt"
row.option1_value = "Small"
row.variant_sku = "example-product-s"
row.variant_grams = 200
row.variant_inventory_policy = "deny"
row.variant_fulfillment_service = "manual"
row.variant_price = 29.99
row.variant_compare_at_price = 34.99
row.variant_requires_shipping = True
row.variant_taxable = True
row.variant_weight_unit = "g"
row.validate_required_fields(is_variant=True)
return_rows.append(row.writable)
row = ShopifyRow()
row.handle = "example-t-shirt"
row.option1_value = "Medium"
row.variant_sku = "example-product-m"
row.variant_grams = 200
row.variant_inventory_tracker = "shopify"
row.variant_inventory_policy = "deny"
row.variant_fulfillment_service = "manual"
row.variant_price = 29.99
row.variant_compare_at_price = 34.99
row.variant_requires_shipping = True
row.variant_taxable = True
row.variant_weight_unit = "g"
row.validate_required_fields(is_variant=True)
return_rows.append(row.writable)
return return_rows
def test_should_produce_template_csv():
template_rows = get_template_rows()
shopify_rows = get_shopify_rows()
for template_row, shopify_row in zip(template_rows, shopify_rows):
for field1, field2 in zip(template_row, shopify_row):
assert field1 == field2
| true | true |
f734548f2fc07d83f4ec18ec59e58bd11bd031aa | 128 | py | Python | pub_site/src/ops/config/deploy/__init__.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | 1 | 2019-10-14T11:51:49.000Z | 2019-10-14T11:51:49.000Z | pub_site/src/ops/config/deploy/__init__.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | null | null | null | pub_site/src/ops/config/deploy/__init__.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | null | null | null | # coding=utf-8
HOST_STRING = "lvye_pay@192.168.0.165"
CODE_DIR = "/home/lvye_pay/projects/pay2/pub_site"
VENV_NAME = "pub_venv"
| 25.6 | 50 | 0.75 |
HOST_STRING = "lvye_pay@192.168.0.165"
CODE_DIR = "/home/lvye_pay/projects/pay2/pub_site"
VENV_NAME = "pub_venv"
| true | true |
f73454af719705d177bc737cb4a5f6855b349fd9 | 6,608 | py | Python | flask/testing.py | sabikm9876/Dockers9876 | 5909e26fba86351063bd622cedf6a4c25eba2e79 | [
"BSD-3-Clause"
] | 1 | 2018-04-07T12:15:45.000Z | 2018-04-07T12:15:45.000Z | flask/testing.py | sabikm9876/Dockers9876 | 5909e26fba86351063bd622cedf6a4c25eba2e79 | [
"BSD-3-Clause"
] | null | null | null | flask/testing.py | sabikm9876/Dockers9876 | 5909e26fba86351063bd622cedf6a4c25eba2e79 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
flask.testing
~~~~~~~~~~~~~
Implements test support helpers. This module is lazily imported
and usually not used in production environments.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import werkzeug
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
from flask.json import dumps as json_dumps
try:
from werkzeug.urls import url_parse
except ImportError:
from urlparse import urlsplit as url_parse
def make_test_environ_builder(
app, path='/', base_url=None, subdomain=None, url_scheme=None,
*args, **kwargs
):
"""Creates a new test builder with some application defaults thrown in."""
assert (
not (base_url or subdomain or url_scheme)
or (base_url is not None) != bool(subdomain or url_scheme)
), 'Cannot pass "subdomain" or "url_scheme" with "base_url".'
if base_url is None:
http_host = app.config.get('SERVER_NAME') or 'localhost'
app_root = app.config['APPLICATION_ROOT']
if subdomain:
http_host = '{0}.{1}'.format(subdomain, http_host)
if url_scheme is None:
url_scheme = app.config['PREFERRED_URL_SCHEME']
url = url_parse(path)
base_url = '{0}://{1}/{2}'.format(
url_scheme, url.netloc or http_host, app_root.lstrip('/')
)
path = url.path
if url.query:
sep = b'?' if isinstance(url.query, bytes) else '?'
path += sep + url.query
if 'json' in kwargs:
assert 'data' not in kwargs, (
"Client cannot provide both 'json' and 'data'."
)
# push a context so flask.json can use app's json attributes
with app.app_context():
kwargs['data'] = json_dumps(kwargs.pop('json'))
if 'content_type' not in kwargs:
kwargs['content_type'] = 'application/json'
return EnvironBuilder(path, base_url, *args, **kwargs)
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has some knowledge about
how Flask works to defer the cleanup of the request context stack to the
end of a ``with`` body when used in a ``with`` statement. For general
information about how to use this class refer to
:class:`werkzeug.test.Client`.
.. versionchanged:: 0.12
`app.test_client()` includes preset default environment, which can be
set after instantiation of the `app.test_client()` object in
`client.environ_base`.
Basic usage is outlined in the :ref:`testing` chapter.
"""
preserve_context = False
def __init__(self, *args, **kwargs):
super(FlaskClient, self).__init__(*args, **kwargs)
self.environ_base = {
"REMOTE_ADDR": "127.0.0.1",
"HTTP_USER_AGENT": "werkzeug/" + werkzeug.__version__
}
@contextmanager
def session_transaction(self, *args, **kwargs):
"""When used in combination with a ``with`` statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the ``with`` block is left the session is
stored back.
::
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self.cookie_jar is None:
raise RuntimeError('Session transactions only make sense '
'with cookies enabled.')
app = self.application
environ_overrides = kwargs.setdefault('environ_overrides', {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _request_ctx_stack.top
with app.test_request_context(*args, **kwargs) as c:
session_interface = app.session_interface
sess = session_interface.open_session(app, c.request)
if sess is None:
raise RuntimeError('Session backend did not open a session. '
'Check the configuration')
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
# methods of the actual request context object since that would
# mean that cleanup handlers are called
_request_ctx_stack.push(outer_reqctx)
try:
yield sess
finally:
_request_ctx_stack.pop()
resp = app.response_class()
if not session_interface.is_null_session(sess):
session_interface.save_session(app, sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
kwargs.setdefault('environ_base', self.environ_base)
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = make_test_environ_builder(self.application, *args, **kwargs)
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
def __enter__(self):
if self.preserve_context:
raise RuntimeError('Cannot nest client invocations')
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
# on exit we want to clean up earlier. Normally the request context
# stays preserved until the next request in the same thread comes
# in. See RequestGlobals.push() for the general behavior.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
| 37.545455 | 78 | 0.631205 |
import werkzeug
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
from flask.json import dumps as json_dumps
try:
from werkzeug.urls import url_parse
except ImportError:
from urlparse import urlsplit as url_parse
def make_test_environ_builder(
app, path='/', base_url=None, subdomain=None, url_scheme=None,
*args, **kwargs
):
assert (
not (base_url or subdomain or url_scheme)
or (base_url is not None) != bool(subdomain or url_scheme)
), 'Cannot pass "subdomain" or "url_scheme" with "base_url".'
if base_url is None:
http_host = app.config.get('SERVER_NAME') or 'localhost'
app_root = app.config['APPLICATION_ROOT']
if subdomain:
http_host = '{0}.{1}'.format(subdomain, http_host)
if url_scheme is None:
url_scheme = app.config['PREFERRED_URL_SCHEME']
url = url_parse(path)
base_url = '{0}://{1}/{2}'.format(
url_scheme, url.netloc or http_host, app_root.lstrip('/')
)
path = url.path
if url.query:
sep = b'?' if isinstance(url.query, bytes) else '?'
path += sep + url.query
if 'json' in kwargs:
assert 'data' not in kwargs, (
"Client cannot provide both 'json' and 'data'."
)
with app.app_context():
kwargs['data'] = json_dumps(kwargs.pop('json'))
if 'content_type' not in kwargs:
kwargs['content_type'] = 'application/json'
return EnvironBuilder(path, base_url, *args, **kwargs)
class FlaskClient(Client):
preserve_context = False
def __init__(self, *args, **kwargs):
super(FlaskClient, self).__init__(*args, **kwargs)
self.environ_base = {
"REMOTE_ADDR": "127.0.0.1",
"HTTP_USER_AGENT": "werkzeug/" + werkzeug.__version__
}
@contextmanager
def session_transaction(self, *args, **kwargs):
if self.cookie_jar is None:
raise RuntimeError('Session transactions only make sense '
'with cookies enabled.')
app = self.application
environ_overrides = kwargs.setdefault('environ_overrides', {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _request_ctx_stack.top
with app.test_request_context(*args, **kwargs) as c:
session_interface = app.session_interface
sess = session_interface.open_session(app, c.request)
if sess is None:
raise RuntimeError('Session backend did not open a session. '
'Check the configuration')
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
_request_ctx_stack.push(outer_reqctx)
try:
yield sess
finally:
_request_ctx_stack.pop()
resp = app.response_class()
if not session_interface.is_null_session(sess):
session_interface.save_session(app, sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
kwargs.setdefault('environ_base', self.environ_base)
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = make_test_environ_builder(self.application, *args, **kwargs)
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
def __enter__(self):
if self.preserve_context:
raise RuntimeError('Cannot nest client invocations')
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
| true | true |
f73456535b7365629f81b36cbac61da3a24af926 | 583 | py | Python | var/spack/repos/builtin/packages/perl-module-runtime/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/perl-module-runtime/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/perl-module-runtime/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlModuleRuntime(PerlPackage):
"""Runtime module handling"""
homepage = "http://search.cpan.org/~zefram/Module-Runtime/lib/Module/Runtime.pm"
url = "http://search.cpan.org/CPAN/authors/id/Z/ZE/ZEFRAM/Module-Runtime-0.016.tar.gz"
version('0.016', 'd3d47222fa2e3dfcb4526f6cc8437b20')
depends_on('perl-module-build', type='build')
| 32.388889 | 95 | 0.730703 |
from spack import *
class PerlModuleRuntime(PerlPackage):
homepage = "http://search.cpan.org/~zefram/Module-Runtime/lib/Module/Runtime.pm"
url = "http://search.cpan.org/CPAN/authors/id/Z/ZE/ZEFRAM/Module-Runtime-0.016.tar.gz"
version('0.016', 'd3d47222fa2e3dfcb4526f6cc8437b20')
depends_on('perl-module-build', type='build')
| true | true |
f734576213f7b4d421a0c2f2003e333c8a2ea478 | 13,946 | py | Python | integration/sawtooth_integration/tests/test_transactor_permissioning.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | 1 | 2018-06-28T07:39:38.000Z | 2018-06-28T07:39:38.000Z | integration/sawtooth_integration/tests/test_transactor_permissioning.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | 5 | 2018-05-17T21:56:07.000Z | 2018-06-11T19:52:08.000Z | integration/sawtooth_integration/tests/test_transactor_permissioning.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | 4 | 2018-06-13T16:28:26.000Z | 2018-06-13T16:47:22.000Z | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import enum
import subprocess
import unittest
from uuid import uuid4
import cbor
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from sawtooth_processor_test.message_factory import MessageFactory
from sawtooth_integration.tests.integration_tools import RestClient
from sawtooth_integration.tests.integration_tools import wait_for_rest_apis
REST_API = "rest-api:8008"
class TestTransactorPermissioning(unittest.TestCase):
@classmethod
def setUpClass(cls):
wait_for_rest_apis([REST_API])
cls.REST_ENDPOINT = "http://" + REST_API
def setUp(self):
self.alice = Transactor('alice', self.REST_ENDPOINT)
self.bob = Transactor('bob', self.REST_ENDPOINT)
self.carol = Transactor('carol', self.REST_ENDPOINT)
self.dave = Transactor('dave', self.REST_ENDPOINT)
self.chuck = Transactor('chuck', self.REST_ENDPOINT)
self.mallory = Transactor('mallory', self.REST_ENDPOINT)
self.walter = Transactor('walter', self.REST_ENDPOINT)
def test_transactor_permissioning(self):
"""Test the transactor permissioning system using the Identity
transaction family.
Notes:
The test works from general to specific on the transactor
permissioning settings, starting with 'transactor', then
'transactor.batch_signer', then 'transactor.transaction_signer',
and then finally 'transactor.transaction_signer.intkey' and
'transaction.transaction_signer.xo'. For each subsection,
the test allows some transactors and denies others and asserts
that each either is or is not able to send transactions.
From local configuration, Chuck and Mallory are denied from
being transactors, Dave is denied from being a batch_signer,
Carol is denied from sending XO transactions, while Walter, Bob,
and Alice have all transactor permissions and no explicit denials
from more specific permissions.
"""
#
# transactor subsection
#
# From local configuration Dave is denied from being a batch_signer,
# and Chuck and Mallory are denied from all transactor permissions.
self.walter.set_public_key_for_role(
'deny_bob_allow_alice_walter',
'transactor',
permit_keys=[self.alice.public_key, self.walter.public_key],
deny_keys=[self.bob.public_key])
self.assert_able_to_send(
(self.alice, Families.INTKEY),
(self.walter, Families.INTKEY))
self.assert_not_able_to_send(
(self.bob, Families.INTKEY),
(self.dave, Families.INTKEY),
(self.chuck, Families.INTKEY),
(self.mallory, Families.INTKEY))
self.walter.set_public_key_for_role(
'deny_alice_allow_bob_walter',
'transactor',
permit_keys=[self.bob.public_key, self.walter.public_key],
deny_keys=[self.alice.public_key])
self.assert_able_to_send(
(self.bob, Families.INTKEY),
(self.walter, Families.INTKEY))
self.assert_not_able_to_send(
(self.alice, Families.INTKEY),
(self.chuck, Families.INTKEY),
(self.dave, Families.INTKEY),
(self.mallory, Families.INTKEY))
self.walter.set_public_key_for_role(
"allow_all_transactors",
"transactor",
permit_keys=["*"],
deny_keys=[])
#
# transactor.batch_signer subsection
#
# From local configuration both Alice and Bob are allowed
# batch_signers, while Dave is denied.
self.walter.set_public_key_for_role(
"deny_alice_as_batcher_allow_bob",
"transactor.batch_signer",
permit_keys=[self.bob.public_key, self.walter.public_key],
deny_keys=[self.alice.public_key])
txns = [self.alice.create_txn(
Families.INTKEY,
batcher=self.bob.public_key)]
self.assert_able_to_send_batch(
txns,
(self.bob, Families.INTKEY))
self.assert_not_able_to_send_batch(
txns,
(self.alice, Families.INTKEY))
daves_txns = [self.alice.create_txn(
Families.INTKEY,
batcher=self.dave.public_key)]
self.assert_not_able_to_send_batch(
daves_txns,
(self.dave, Families.INTKEY))
self.walter.set_public_key_for_role(
"allow_all_batchers",
"transactor.batch_signer",
permit_keys=["*"],
deny_keys=[])
#
# transactor.transaction_signer
#
# From local configuration Carol is denied from XO, but is allowed all
# other transactor permissions, Mallory and Chuck are denied from all
# transactor permissions.
self.walter.set_public_key_for_role(
"allow_carol_and_no_others",
"transactor.transaction_signer",
permit_keys=[self.carol.public_key, self.walter.public_key],
deny_keys=[self.alice.public_key, self.bob.public_key,
self.dave.public_key])
self.assert_able_to_send((self.carol, Families.INTKEY))
self.assert_not_able_to_send((self.carol, Families.XO))
self.assert_not_able_to_send(
(self.alice, Families.INTKEY),
(self.alice, Families.XO),
(self.bob, Families.INTKEY),
(self.bob, Families.XO),
(self.dave, Families.INTKEY),
(self.dave, Families.XO),
(self.chuck, Families.INTKEY),
(self.chuck, Families.XO),
(self.mallory, Families.INTKEY),
(self.mallory, Families.XO))
self.walter.set_public_key_for_role(
"allow_all_transaction_signers",
"transactor.transaction_signer",
permit_keys=["*"],
deny_keys=[])
#
# transactor.transaction_signer.< tp_name > subsection
#
# From local configuration Dave is denied from being a batch_signer,
# Mallory and Chuck are denied being transactors.
self.walter.set_public_key_for_role(
"deny_alice_from_xo_allow_bob",
"transactor.transaction_signer.xo",
permit_keys=[self.bob.public_key, self.dave.public_key],
deny_keys=[self.alice.public_key])
self.assert_able_to_send((self.bob, Families.XO))
self.assert_not_able_to_send(
(self.alice, Families.XO),
(self.chuck, Families.XO),
(self.mallory, Families.XO),
(self.dave, Families.XO))
self.walter.set_public_key_for_role(
"deny_bob_from_intkey_allow_dave_alice",
"transactor.transaction_signer.intkey",
permit_keys=[self.alice.public_key, self.dave.public_key],
deny_keys=[self.bob.public_key])
self.assert_able_to_send((self.alice, Families.INTKEY))
self.assert_not_able_to_send(
(self.bob, Families.INTKEY),
(self.chuck, Families.INTKEY),
(self.mallory, Families.INTKEY),
(self.dave, Families.INTKEY))
def assert_able_to_send(self, *transactor_family_pairs):
for transactor, family in transactor_family_pairs:
transactor.send(family)
def assert_able_to_send_batch(self, txns, *transactor_family_pairs):
for transactor, family in transactor_family_pairs:
transactor.send(family_name=family, transactions=txns)
def assert_not_able_to_send(self, *transactor_family_pairs):
for transactor, family in transactor_family_pairs:
with self.assertRaises(Exception):
transactor.send(family)
def assert_not_able_to_send_batch(self, txns, *transactor_family_pairs):
for transactor, family in transactor_family_pairs:
with self.assertRaises(Exception):
transactor.send(family, txns)
INTKEY_NAMESPACE = MessageFactory.sha512('intkey'.encode())[:6]
XO_NAMESPACE = MessageFactory.sha512('xo'.encode())[:6]
# pylint: disable=invalid-name
class Families(enum.Enum):
INTKEY = 1
XO = 2
FAMILY_CONFIG = {
Families.INTKEY: {
'family_name': 'intkey',
'family_version': '1.0',
'namespace': MessageFactory.sha256('intkey'.encode())[:6]
},
Families.XO: {
'family_name': 'xo',
'family_version': '1.0',
'namespace': MessageFactory.sha256('xo'.encode())[:6]
},
}
def make_intkey_payload(unique_value):
return {'Verb': 'set', 'Name': unique_value, 'Value': 1000}
def make_intkey_address(unique_value):
return INTKEY_NAMESPACE + MessageFactory.sha512(
unique_value.encode())[-64:]
def make_xo_payload(unique_value):
return "{},{},{}".format(unique_value, 'create', '').encode('utf-8')
def xo_encode(contents):
return contents
def make_xo_address(unique_value):
return XO_NAMESPACE + MessageFactory.sha512(unique_value.encode())[:64]
TRANSACTION_ENCODER = {
Families.INTKEY: {
'encoder': cbor.dumps,
'payload_func': make_intkey_payload,
'address_func': make_intkey_address
},
Families.XO: {
'encoder': xo_encode,
'payload_func': make_xo_payload,
'address_func': make_xo_address
}
}
class Transactor(object):
def __init__(self, name, rest_endpoint):
"""
Args:
name (str): An identifier for this Transactor
rest_endpoint (str): The rest api that this Transactor will
communicate with.
"""
self.name = name
self._rest_endpoint = rest_endpoint \
if rest_endpoint.startswith("http://") \
else "http://{}".format(rest_endpoint)
with open('/root/.sawtooth/keys/{}.priv'.format(name)) as priv_file:
private_key = Secp256k1PrivateKey.from_hex(
priv_file.read().strip('\n'))
self._signer = CryptoFactory(create_context('secp256k1')) \
.new_signer(private_key)
self._factories = {}
self._client = RestClient(url=self._rest_endpoint)
self._add_transaction_family_factory(Families.INTKEY)
self._add_transaction_family_factory(Families.XO)
@property
def public_key(self):
return self._signer.get_public_key().as_hex()
def _add_transaction_family_factory(self, family_name):
"""Add a MessageFactory for the specified family.
Args:
family_name (Families): One of the Enum values representing
transaction families.
"""
family_config = FAMILY_CONFIG[family_name]
self._factories[family_name] = MessageFactory(
family_name=family_config['family_name'],
family_version=family_config['family_version'],
namespace=family_config['namespace'],
signer=self._signer)
def create_txn(self, family_name, batcher=None):
unique_value = uuid4().hex[:20]
encoder = TRANSACTION_ENCODER[family_name]['encoder']
payload = encoder(
TRANSACTION_ENCODER[family_name]['payload_func'](unique_value))
address = TRANSACTION_ENCODER[family_name]['address_func'](
unique_value)
return self._factories[family_name].create_transaction(
payload=payload,
inputs=[address],
outputs=[address],
deps=[],
batcher=batcher)
def create_batch(self, family_name, count=1):
transactions = [self.create_txn(family_name) for _ in range(count)]
return self.batch_transactions(family_name, transactions=transactions)
def batch_transactions(self, family_name, transactions):
return self._factories[family_name].create_batch(
transactions=transactions)
def send(self, family_name, transactions=None):
if not transactions:
batch_list = self.create_batch(family_name)
else:
batch_list = self.batch_transactions(
family_name=family_name,
transactions=transactions)
self._client.send_batches(batch_list=batch_list)
def set_public_key_for_role(self, policy, role, permit_keys, deny_keys):
permits = ["PERMIT_KEY {}".format(key) for key in permit_keys]
denies = ["DENY_KEY {}".format(key) for key in deny_keys]
self._run_identity_commands(policy, role, denies + permits)
def _run_identity_commands(self, policy, role, rules):
subprocess.run(
['sawtooth', 'identity', 'policy', 'create',
'-k', '/root/.sawtooth/keys/{}.priv'.format(self.name),
'--wait', '15',
'--url', self._rest_endpoint, policy, *rules],
check=True)
subprocess.run(
['sawtooth', 'identity', 'role', 'create',
'-k', '/root/.sawtooth/keys/{}.priv'.format(self.name),
'--wait', '15',
'--url', self._rest_endpoint, role, policy],
check=True)
| 34.952381 | 78 | 0.636455 |
import enum
import subprocess
import unittest
from uuid import uuid4
import cbor
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from sawtooth_processor_test.message_factory import MessageFactory
from sawtooth_integration.tests.integration_tools import RestClient
from sawtooth_integration.tests.integration_tools import wait_for_rest_apis
REST_API = "rest-api:8008"
class TestTransactorPermissioning(unittest.TestCase):
@classmethod
def setUpClass(cls):
wait_for_rest_apis([REST_API])
cls.REST_ENDPOINT = "http://" + REST_API
def setUp(self):
self.alice = Transactor('alice', self.REST_ENDPOINT)
self.bob = Transactor('bob', self.REST_ENDPOINT)
self.carol = Transactor('carol', self.REST_ENDPOINT)
self.dave = Transactor('dave', self.REST_ENDPOINT)
self.chuck = Transactor('chuck', self.REST_ENDPOINT)
self.mallory = Transactor('mallory', self.REST_ENDPOINT)
self.walter = Transactor('walter', self.REST_ENDPOINT)
def test_transactor_permissioning(self):
self.walter.set_public_key_for_role(
'deny_bob_allow_alice_walter',
'transactor',
permit_keys=[self.alice.public_key, self.walter.public_key],
deny_keys=[self.bob.public_key])
self.assert_able_to_send(
(self.alice, Families.INTKEY),
(self.walter, Families.INTKEY))
self.assert_not_able_to_send(
(self.bob, Families.INTKEY),
(self.dave, Families.INTKEY),
(self.chuck, Families.INTKEY),
(self.mallory, Families.INTKEY))
self.walter.set_public_key_for_role(
'deny_alice_allow_bob_walter',
'transactor',
permit_keys=[self.bob.public_key, self.walter.public_key],
deny_keys=[self.alice.public_key])
self.assert_able_to_send(
(self.bob, Families.INTKEY),
(self.walter, Families.INTKEY))
self.assert_not_able_to_send(
(self.alice, Families.INTKEY),
(self.chuck, Families.INTKEY),
(self.dave, Families.INTKEY),
(self.mallory, Families.INTKEY))
self.walter.set_public_key_for_role(
"allow_all_transactors",
"transactor",
permit_keys=["*"],
deny_keys=[])
self.walter.set_public_key_for_role(
"deny_alice_as_batcher_allow_bob",
"transactor.batch_signer",
permit_keys=[self.bob.public_key, self.walter.public_key],
deny_keys=[self.alice.public_key])
txns = [self.alice.create_txn(
Families.INTKEY,
batcher=self.bob.public_key)]
self.assert_able_to_send_batch(
txns,
(self.bob, Families.INTKEY))
self.assert_not_able_to_send_batch(
txns,
(self.alice, Families.INTKEY))
daves_txns = [self.alice.create_txn(
Families.INTKEY,
batcher=self.dave.public_key)]
self.assert_not_able_to_send_batch(
daves_txns,
(self.dave, Families.INTKEY))
self.walter.set_public_key_for_role(
"allow_all_batchers",
"transactor.batch_signer",
permit_keys=["*"],
deny_keys=[])
self.walter.set_public_key_for_role(
"allow_carol_and_no_others",
"transactor.transaction_signer",
permit_keys=[self.carol.public_key, self.walter.public_key],
deny_keys=[self.alice.public_key, self.bob.public_key,
self.dave.public_key])
self.assert_able_to_send((self.carol, Families.INTKEY))
self.assert_not_able_to_send((self.carol, Families.XO))
self.assert_not_able_to_send(
(self.alice, Families.INTKEY),
(self.alice, Families.XO),
(self.bob, Families.INTKEY),
(self.bob, Families.XO),
(self.dave, Families.INTKEY),
(self.dave, Families.XO),
(self.chuck, Families.INTKEY),
(self.chuck, Families.XO),
(self.mallory, Families.INTKEY),
(self.mallory, Families.XO))
self.walter.set_public_key_for_role(
"allow_all_transaction_signers",
"transactor.transaction_signer",
permit_keys=["*"],
deny_keys=[])
self.walter.set_public_key_for_role(
"deny_alice_from_xo_allow_bob",
"transactor.transaction_signer.xo",
permit_keys=[self.bob.public_key, self.dave.public_key],
deny_keys=[self.alice.public_key])
self.assert_able_to_send((self.bob, Families.XO))
self.assert_not_able_to_send(
(self.alice, Families.XO),
(self.chuck, Families.XO),
(self.mallory, Families.XO),
(self.dave, Families.XO))
self.walter.set_public_key_for_role(
"deny_bob_from_intkey_allow_dave_alice",
"transactor.transaction_signer.intkey",
permit_keys=[self.alice.public_key, self.dave.public_key],
deny_keys=[self.bob.public_key])
self.assert_able_to_send((self.alice, Families.INTKEY))
self.assert_not_able_to_send(
(self.bob, Families.INTKEY),
(self.chuck, Families.INTKEY),
(self.mallory, Families.INTKEY),
(self.dave, Families.INTKEY))
def assert_able_to_send(self, *transactor_family_pairs):
for transactor, family in transactor_family_pairs:
transactor.send(family)
def assert_able_to_send_batch(self, txns, *transactor_family_pairs):
for transactor, family in transactor_family_pairs:
transactor.send(family_name=family, transactions=txns)
def assert_not_able_to_send(self, *transactor_family_pairs):
for transactor, family in transactor_family_pairs:
with self.assertRaises(Exception):
transactor.send(family)
def assert_not_able_to_send_batch(self, txns, *transactor_family_pairs):
for transactor, family in transactor_family_pairs:
with self.assertRaises(Exception):
transactor.send(family, txns)
INTKEY_NAMESPACE = MessageFactory.sha512('intkey'.encode())[:6]
XO_NAMESPACE = MessageFactory.sha512('xo'.encode())[:6]
class Families(enum.Enum):
INTKEY = 1
XO = 2
FAMILY_CONFIG = {
Families.INTKEY: {
'family_name': 'intkey',
'family_version': '1.0',
'namespace': MessageFactory.sha256('intkey'.encode())[:6]
},
Families.XO: {
'family_name': 'xo',
'family_version': '1.0',
'namespace': MessageFactory.sha256('xo'.encode())[:6]
},
}
def make_intkey_payload(unique_value):
return {'Verb': 'set', 'Name': unique_value, 'Value': 1000}
def make_intkey_address(unique_value):
return INTKEY_NAMESPACE + MessageFactory.sha512(
unique_value.encode())[-64:]
def make_xo_payload(unique_value):
return "{},{},{}".format(unique_value, 'create', '').encode('utf-8')
def xo_encode(contents):
return contents
def make_xo_address(unique_value):
return XO_NAMESPACE + MessageFactory.sha512(unique_value.encode())[:64]
TRANSACTION_ENCODER = {
Families.INTKEY: {
'encoder': cbor.dumps,
'payload_func': make_intkey_payload,
'address_func': make_intkey_address
},
Families.XO: {
'encoder': xo_encode,
'payload_func': make_xo_payload,
'address_func': make_xo_address
}
}
class Transactor(object):
def __init__(self, name, rest_endpoint):
self.name = name
self._rest_endpoint = rest_endpoint \
if rest_endpoint.startswith("http://") \
else "http://{}".format(rest_endpoint)
with open('/root/.sawtooth/keys/{}.priv'.format(name)) as priv_file:
private_key = Secp256k1PrivateKey.from_hex(
priv_file.read().strip('\n'))
self._signer = CryptoFactory(create_context('secp256k1')) \
.new_signer(private_key)
self._factories = {}
self._client = RestClient(url=self._rest_endpoint)
self._add_transaction_family_factory(Families.INTKEY)
self._add_transaction_family_factory(Families.XO)
@property
def public_key(self):
return self._signer.get_public_key().as_hex()
def _add_transaction_family_factory(self, family_name):
family_config = FAMILY_CONFIG[family_name]
self._factories[family_name] = MessageFactory(
family_name=family_config['family_name'],
family_version=family_config['family_version'],
namespace=family_config['namespace'],
signer=self._signer)
def create_txn(self, family_name, batcher=None):
unique_value = uuid4().hex[:20]
encoder = TRANSACTION_ENCODER[family_name]['encoder']
payload = encoder(
TRANSACTION_ENCODER[family_name]['payload_func'](unique_value))
address = TRANSACTION_ENCODER[family_name]['address_func'](
unique_value)
return self._factories[family_name].create_transaction(
payload=payload,
inputs=[address],
outputs=[address],
deps=[],
batcher=batcher)
def create_batch(self, family_name, count=1):
transactions = [self.create_txn(family_name) for _ in range(count)]
return self.batch_transactions(family_name, transactions=transactions)
def batch_transactions(self, family_name, transactions):
return self._factories[family_name].create_batch(
transactions=transactions)
def send(self, family_name, transactions=None):
if not transactions:
batch_list = self.create_batch(family_name)
else:
batch_list = self.batch_transactions(
family_name=family_name,
transactions=transactions)
self._client.send_batches(batch_list=batch_list)
def set_public_key_for_role(self, policy, role, permit_keys, deny_keys):
permits = ["PERMIT_KEY {}".format(key) for key in permit_keys]
denies = ["DENY_KEY {}".format(key) for key in deny_keys]
self._run_identity_commands(policy, role, denies + permits)
def _run_identity_commands(self, policy, role, rules):
subprocess.run(
['sawtooth', 'identity', 'policy', 'create',
'-k', '/root/.sawtooth/keys/{}.priv'.format(self.name),
'--wait', '15',
'--url', self._rest_endpoint, policy, *rules],
check=True)
subprocess.run(
['sawtooth', 'identity', 'role', 'create',
'-k', '/root/.sawtooth/keys/{}.priv'.format(self.name),
'--wait', '15',
'--url', self._rest_endpoint, role, policy],
check=True)
| true | true |
f734590f7846f821cd91f1a763773a979f4c0a23 | 1,928 | py | Python | LearnyMcLearnface/Layers/AffineLayer.py | alexweav/Deep-Learning | f245708e40f36c4734ea0d4a7e6587624e4b116f | [
"MIT"
] | null | null | null | LearnyMcLearnface/Layers/AffineLayer.py | alexweav/Deep-Learning | f245708e40f36c4734ea0d4a7e6587624e4b116f | [
"MIT"
] | null | null | null | LearnyMcLearnface/Layers/AffineLayer.py | alexweav/Deep-Learning | f245708e40f36c4734ea0d4a7e6587624e4b116f | [
"MIT"
] | 1 | 2018-06-23T14:47:03.000Z | 2018-06-23T14:47:03.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri May 06 14:54:11 2016
@author: Alexander Weaver
"""
"""
Performs an affine (fully connected) operation on its input
An affine layer with out_dim neurons takes a data array of size Nx(in_dim), x
and returns a linearly transformed Nx(out_dim) data array
The transformation result, z, is determined by a (in_dim)x(out_dim) weight matrix, W, and
a (out_dim) bias vector, b. The transformation of any one data point (one row in x) is given by:
z = Wx + b
Constructing this object initializes the parameters following a gaussian random distribution with
standard deviation given by weight_scale.
Forward propagating this object performs the affine transformation on the given array, X.
Backpropagating this object returns the derivatives of x, W, and b with respect to the final output of
the network.
"""
import numpy as np
class AffineLayer(object):
def __init__(self, in_dim, out_dim, weight_scale, data_type=np.float32):
self.in_dim = in_dim
self.out_dim = out_dim
self.weight_scale = weight_scale
self.data_type = data_type
self.W = np.random.randn(in_dim, out_dim) * weight_scale
self.W = self.W.astype(self.data_type)
self.b = np.zeros(out_dim)
self.b = self.b.astype(self.data_type)
def forward(self, x, W=None, b=None):
if W is None:
W = self.W
if b is None:
b = self.b
N = x.shape[0]
reshaped_x = x.reshape(N, np.prod(x.shape[1:]))
out = reshaped_x.dot(W) + b
self.cache_x = x
return out
def backward(self, dout):
x = self.cache_x
N = x.shape[0]
reshaped_x = x.reshape(N, np.prod(x.shape[1:]))
dx = dout.dot(np.transpose(self.W)).reshape(x.shape)
self.dW = np.transpose(reshaped_x).dot(dout)
self.db = np.sum(dout, axis=0)
return dx | 35.703704 | 102 | 0.650415 |
import numpy as np
class AffineLayer(object):
def __init__(self, in_dim, out_dim, weight_scale, data_type=np.float32):
self.in_dim = in_dim
self.out_dim = out_dim
self.weight_scale = weight_scale
self.data_type = data_type
self.W = np.random.randn(in_dim, out_dim) * weight_scale
self.W = self.W.astype(self.data_type)
self.b = np.zeros(out_dim)
self.b = self.b.astype(self.data_type)
def forward(self, x, W=None, b=None):
if W is None:
W = self.W
if b is None:
b = self.b
N = x.shape[0]
reshaped_x = x.reshape(N, np.prod(x.shape[1:]))
out = reshaped_x.dot(W) + b
self.cache_x = x
return out
def backward(self, dout):
x = self.cache_x
N = x.shape[0]
reshaped_x = x.reshape(N, np.prod(x.shape[1:]))
dx = dout.dot(np.transpose(self.W)).reshape(x.shape)
self.dW = np.transpose(reshaped_x).dot(dout)
self.db = np.sum(dout, axis=0)
return dx | true | true |
f73459a7469cfb0da671d5fc04aa4938378e823f | 883 | py | Python | wakepy/_linux.py | Mohammad-Mohsen/wakepy | baa09412924fb296e7b4e4e82eb8e45f592c032c | [
"MIT"
] | null | null | null | wakepy/_linux.py | Mohammad-Mohsen/wakepy | baa09412924fb296e7b4e4e82eb8e45f592c032c | [
"MIT"
] | null | null | null | wakepy/_linux.py | Mohammad-Mohsen/wakepy | baa09412924fb296e7b4e4e82eb8e45f592c032c | [
"MIT"
] | null | null | null | import subprocess
COMMAND = u'systemctl'
ARGS = [u'sleep.target', u'suspend.target', u'hibernate.target', u'hybrid-sleep.target']
# https://www.man7.org/linux/man-pages/man1/systemctl.1.html
if not subprocess.check_output('pidof systemd'):
raise NotImplementedError(
"wakepy has not yet support for init processes other than systemd. Pull requests welcome: https://github.com/np-8/wakepy"
)
def set_keepawake(keep_screen_awake=False):
"""
Set the keep-awake. During keep-awake, the CPU is not allowed to go to sleep
automatically until the `unset_keepawake` is called.
Parameters
-----------
keep_screen_awake: bool
Currently unused as the screen will remain active as a byproduct of preventing sleep.
"""
subprocess.run([COMMAND, u'mask', *ARGS])
def unset_keepawake():
subprocess.run([COMMAND, u'unmask', *ARGS])
| 30.448276 | 129 | 0.705549 | import subprocess
COMMAND = u'systemctl'
ARGS = [u'sleep.target', u'suspend.target', u'hibernate.target', u'hybrid-sleep.target']
if not subprocess.check_output('pidof systemd'):
raise NotImplementedError(
"wakepy has not yet support for init processes other than systemd. Pull requests welcome: https://github.com/np-8/wakepy"
)
def set_keepawake(keep_screen_awake=False):
subprocess.run([COMMAND, u'mask', *ARGS])
def unset_keepawake():
subprocess.run([COMMAND, u'unmask', *ARGS])
| true | true |
f73459f80917cdc270cb33a60e836b82656751da | 30 | py | Python | pushbots/__init__.py | damoon-cmpt376w/pushbots | de7c50da2cffd8527c51a78ab71afd1110e4376d | [
"MIT"
] | 11 | 2015-08-21T20:23:03.000Z | 2017-07-02T18:26:15.000Z | pushbots/__init__.py | damoon-cmpt376w/pushbots | de7c50da2cffd8527c51a78ab71afd1110e4376d | [
"MIT"
] | 3 | 2015-08-17T22:01:52.000Z | 2019-11-15T04:26:27.000Z | pushbots/__init__.py | damoon-cmpt376w/pushbots | de7c50da2cffd8527c51a78ab71afd1110e4376d | [
"MIT"
] | 6 | 2015-07-27T12:48:43.000Z | 2019-11-12T02:06:06.000Z | from .pushbots import Pushbots | 30 | 30 | 0.866667 | from .pushbots import Pushbots | true | true |
f7345c5c40688503298c593d4b121e16e47e7984 | 124,110 | py | Python | tools/sourcecode/Python-3.10.0/Lib/test/test_asyncio/test_tasks.py | gagominecraft12/Blueity-Client-Retrace | d42a927a85226d73da66123922d9ea11cc20ac3d | [
"MIT"
] | 32 | 2021-05-03T09:03:57.000Z | 2022-03-17T09:18:59.000Z | tools/sourcecode/Python-3.10.0/Lib/test/test_asyncio/test_tasks.py | gagominecraft12/Blueity-Client-Retrace | d42a927a85226d73da66123922d9ea11cc20ac3d | [
"MIT"
] | 4 | 2021-05-29T20:42:52.000Z | 2022-03-16T03:01:12.000Z | tools/sourcecode/Python-3.10.0/Lib/test/test_asyncio/test_tasks.py | gagominecraft12/Blueity-Client-Retrace | d42a927a85226d73da66123922d9ea11cc20ac3d | [
"MIT"
] | 3 | 2021-10-05T20:56:09.000Z | 2022-02-23T13:00:54.000Z | """Tests for tasks.py."""
import collections
import contextlib
import contextvars
import functools
import gc
import io
import random
import re
import sys
import textwrap
import traceback
import types
import unittest
import weakref
from unittest import mock
import asyncio
from asyncio import coroutines
from asyncio import futures
from asyncio import tasks
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
def tearDownModule():
asyncio.set_event_loop_policy(None)
async def coroutine_function():
pass
@contextlib.contextmanager
def set_coroutine_debug(enabled):
coroutines = asyncio.coroutines
old_debug = coroutines._DEBUG
try:
coroutines._DEBUG = enabled
yield
finally:
coroutines._DEBUG = old_debug
def format_coroutine(qualname, state, src, source_traceback, generator=False):
if generator:
state = '%s' % state
else:
state = '%s, defined' % state
if source_traceback is not None:
frame = source_traceback[-1]
return ('coro=<%s() %s at %s> created at %s:%s'
% (qualname, state, src, frame[0], frame[1]))
else:
return 'coro=<%s() %s at %s>' % (qualname, state, src)
def get_innermost_context(exc):
"""
Return information about the innermost exception context in the chain.
"""
depth = 0
while True:
context = exc.__context__
if context is None:
break
exc = context
depth += 1
return (type(exc), exc.args, depth)
class Dummy:
def __repr__(self):
return '<Dummy>'
def __call__(self, *args):
pass
class CoroLikeObject:
def send(self, v):
raise StopIteration(42)
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
return self
# The following value can be used as a very small timeout:
# it passes check "timeout > 0", but has almost
# no effect on the test performance
_EPSILON = 0.0001
class BaseTaskTests:
Task = None
Future = None
def new_task(self, loop, coro, name='TestTask'):
return self.__class__.Task(coro, loop=loop, name=name)
def new_future(self, loop):
return self.__class__.Future(loop=loop)
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.loop.set_task_factory(self.new_task)
self.loop.create_future = lambda: self.new_future(self.loop)
def test_task_cancel_message_getter(self):
async def coro():
pass
t = self.new_task(self.loop, coro())
self.assertTrue(hasattr(t, '_cancel_message'))
self.assertEqual(t._cancel_message, None)
t.cancel('my message')
self.assertEqual(t._cancel_message, 'my message')
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
def test_task_cancel_message_setter(self):
async def coro():
pass
t = self.new_task(self.loop, coro())
t.cancel('my message')
t._cancel_message = 'my new message'
self.assertEqual(t._cancel_message, 'my new message')
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
def test_task_del_collect(self):
class Evil:
def __del__(self):
gc.collect()
async def run():
return Evil()
self.loop.run_until_complete(
asyncio.gather(*[
self.new_task(self.loop, run()) for _ in range(100)
]))
def test_other_loop_future(self):
other_loop = asyncio.new_event_loop()
fut = self.new_future(other_loop)
async def run(fut):
await fut
try:
with self.assertRaisesRegex(RuntimeError,
r'Task .* got Future .* attached'):
self.loop.run_until_complete(run(fut))
finally:
other_loop.close()
def test_task_awaits_on_itself(self):
async def test():
await task
task = asyncio.ensure_future(test(), loop=self.loop)
with self.assertRaisesRegex(RuntimeError,
'Task cannot await on itself'):
self.loop.run_until_complete(task)
def test_task_class(self):
async def notmuch():
return 'ok'
t = self.new_task(self.loop, notmuch())
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
self.assertIs(t.get_loop(), self.loop)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
t = self.new_task(loop, notmuch())
self.assertIs(t._loop, loop)
loop.run_until_complete(t)
loop.close()
def test_ensure_future_coroutine(self):
async def notmuch():
return 'ok'
t = asyncio.ensure_future(notmuch(), loop=self.loop)
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
a = notmuch()
self.addCleanup(a.close)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'):
asyncio.ensure_future(a)
self.assertEqual(cm.warnings[0].filename, __file__)
async def test():
return asyncio.ensure_future(notmuch())
t = self.loop.run_until_complete(test())
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
# Deprecated in 3.10
asyncio.set_event_loop(self.loop)
self.addCleanup(asyncio.set_event_loop, None)
with self.assertWarns(DeprecationWarning) as cm:
t = asyncio.ensure_future(notmuch())
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
def test_ensure_future_coroutine_2(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.ensure_future(notmuch(), loop=self.loop)
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
a = notmuch()
self.addCleanup(a.close)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'):
asyncio.ensure_future(a)
self.assertEqual(cm.warnings[0].filename, __file__)
async def test():
return asyncio.ensure_future(notmuch())
t = self.loop.run_until_complete(test())
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
# Deprecated in 3.10
asyncio.set_event_loop(self.loop)
self.addCleanup(asyncio.set_event_loop, None)
with self.assertWarns(DeprecationWarning) as cm:
t = asyncio.ensure_future(notmuch())
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
def test_ensure_future_future(self):
f_orig = self.new_future(self.loop)
f_orig.set_result('ko')
f = asyncio.ensure_future(f_orig)
self.loop.run_until_complete(f)
self.assertTrue(f.done())
self.assertEqual(f.result(), 'ko')
self.assertIs(f, f_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
f = asyncio.ensure_future(f_orig, loop=loop)
loop.close()
f = asyncio.ensure_future(f_orig, loop=self.loop)
self.assertIs(f, f_orig)
def test_ensure_future_task(self):
async def notmuch():
return 'ok'
t_orig = self.new_task(self.loop, notmuch())
t = asyncio.ensure_future(t_orig)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t, t_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
t = asyncio.ensure_future(t_orig, loop=loop)
loop.close()
t = asyncio.ensure_future(t_orig, loop=self.loop)
self.assertIs(t, t_orig)
def test_ensure_future_awaitable(self):
class Aw:
def __init__(self, coro):
self.coro = coro
def __await__(self):
return (yield from self.coro)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
return 'ok'
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
fut = asyncio.ensure_future(Aw(coro()), loop=loop)
loop.run_until_complete(fut)
assert fut.result() == 'ok'
def test_ensure_future_neither(self):
with self.assertRaises(TypeError):
asyncio.ensure_future('ok')
def test_ensure_future_error_msg(self):
loop = asyncio.new_event_loop()
f = self.new_future(self.loop)
with self.assertRaisesRegex(ValueError, 'The future belongs to a '
'different loop than the one specified as '
'the loop argument'):
asyncio.ensure_future(f, loop=loop)
loop.close()
def test_get_stack(self):
T = None
async def foo():
await bar()
async def bar():
# test get_stack()
f = T.get_stack(limit=1)
try:
self.assertEqual(f[0].f_code.co_name, 'foo')
finally:
f = None
# test print_stack()
file = io.StringIO()
T.print_stack(limit=1, file=file)
file.seek(0)
tb = file.read()
self.assertRegex(tb, r'foo\(\) running')
async def runner():
nonlocal T
T = asyncio.ensure_future(foo(), loop=self.loop)
await T
self.loop.run_until_complete(runner())
def test_task_repr(self):
self.loop.set_debug(False)
async def notmuch():
return 'abc'
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
self.assertRegex(notmuch.__qualname__,
r'\w+.test_task_repr.<locals>.notmuch')
self.assertEqual(notmuch.__module__, __name__)
filename, lineno = test_utils.get_function_source(notmuch)
src = "%s:%s" % (filename, lineno)
# test coroutine object
gen = notmuch()
coro_qualname = 'BaseTaskTests.test_task_repr.<locals>.notmuch'
self.assertEqual(gen.__name__, 'notmuch')
self.assertEqual(gen.__qualname__, coro_qualname)
# test pending Task
t = self.new_task(self.loop, gen)
t.add_done_callback(Dummy())
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback, generator=True)
self.assertEqual(repr(t),
"<Task pending name='TestTask' %s cb=[<Dummy>()]>" % coro)
# test cancelling Task
t.cancel() # Does not take immediate effect!
self.assertEqual(repr(t),
"<Task cancelling name='TestTask' %s cb=[<Dummy>()]>" % coro)
# test cancelled Task
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
"<Task cancelled name='TestTask' %s>" % coro)
# test finished Task
t = self.new_task(self.loop, notmuch())
self.loop.run_until_complete(t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
"<Task finished name='TestTask' %s result='abc'>" % coro)
def test_task_repr_autogenerated(self):
async def notmuch():
return 123
t1 = self.new_task(self.loop, notmuch(), None)
t2 = self.new_task(self.loop, notmuch(), None)
self.assertNotEqual(repr(t1), repr(t2))
match1 = re.match(r"^<Task pending name='Task-(\d+)'", repr(t1))
self.assertIsNotNone(match1)
match2 = re.match(r"^<Task pending name='Task-(\d+)'", repr(t2))
self.assertIsNotNone(match2)
# Autogenerated task names should have monotonically increasing numbers
self.assertLess(int(match1.group(1)), int(match2.group(1)))
self.loop.run_until_complete(t1)
self.loop.run_until_complete(t2)
def test_task_repr_name_not_str(self):
async def notmuch():
return 123
t = self.new_task(self.loop, notmuch())
t.set_name({6})
self.assertEqual(t.get_name(), '{6}')
self.loop.run_until_complete(t)
def test_task_repr_coro_decorator(self):
self.loop.set_debug(False)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
# notmuch() function doesn't use yield from: it will be wrapped by
# @coroutine decorator
return 123
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
self.assertRegex(notmuch.__qualname__,
r'\w+.test_task_repr_coro_decorator'
r'\.<locals>\.notmuch')
self.assertEqual(notmuch.__module__, __name__)
# test coroutine object
gen = notmuch()
# On Python >= 3.5, generators now inherit the name of the
# function, as expected, and have a qualified name (__qualname__
# attribute).
coro_name = 'notmuch'
coro_qualname = ('BaseTaskTests.test_task_repr_coro_decorator'
'.<locals>.notmuch')
self.assertEqual(gen.__name__, coro_name)
self.assertEqual(gen.__qualname__, coro_qualname)
# test repr(CoroWrapper)
if coroutines._DEBUG:
# format the coroutine object
if coroutines._DEBUG:
filename, lineno = test_utils.get_function_source(notmuch)
frame = gen._source_traceback[-1]
coro = ('%s() running, defined at %s:%s, created at %s:%s'
% (coro_qualname, filename, lineno,
frame[0], frame[1]))
else:
code = gen.gi_code
coro = ('%s() running at %s:%s'
% (coro_qualname, code.co_filename,
code.co_firstlineno))
self.assertEqual(repr(gen), '<CoroWrapper %s>' % coro)
# test pending Task
t = self.new_task(self.loop, gen)
t.add_done_callback(Dummy())
# format the coroutine object
if coroutines._DEBUG:
src = '%s:%s' % test_utils.get_function_source(notmuch)
else:
code = gen.gi_code
src = '%s:%s' % (code.co_filename, code.co_firstlineno)
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback,
generator=not coroutines._DEBUG)
self.assertEqual(repr(t),
"<Task pending name='TestTask' %s cb=[<Dummy>()]>" % coro)
self.loop.run_until_complete(t)
def test_task_repr_wait_for(self):
self.loop.set_debug(False)
async def wait_for(fut):
return await fut
fut = self.new_future(self.loop)
task = self.new_task(self.loop, wait_for(fut))
test_utils.run_briefly(self.loop)
self.assertRegex(repr(task),
'<Task .* wait_for=%s>' % re.escape(repr(fut)))
fut.set_result(None)
self.loop.run_until_complete(task)
def test_task_repr_partial_corowrapper(self):
# Issue #222: repr(CoroWrapper) must not fail in debug mode if the
# coroutine is a partial function
with set_coroutine_debug(True):
self.loop.set_debug(True)
async def func(x, y):
await asyncio.sleep(0)
with self.assertWarns(DeprecationWarning):
partial_func = asyncio.coroutine(functools.partial(func, 1))
task = self.loop.create_task(partial_func(2))
# make warnings quiet
task._log_destroy_pending = False
self.addCleanup(task._coro.close)
coro_repr = repr(task._coro)
expected = (
r'<coroutine object \w+\.test_task_repr_partial_corowrapper'
r'\.<locals>\.func at'
)
self.assertRegex(coro_repr, expected)
def test_task_basics(self):
async def outer():
a = await inner1()
b = await inner2()
return a+b
async def inner1():
return 42
async def inner2():
return 1000
t = outer()
self.assertEqual(self.loop.run_until_complete(t), 1042)
def test_exception_chaining_after_await(self):
# Test that when awaiting on a task when an exception is already
# active, if the task raises an exception it will be chained
# with the original.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def raise_error():
raise ValueError
async def run():
try:
raise KeyError(3)
except Exception as exc:
task = self.new_task(loop, raise_error())
try:
await task
except Exception as exc:
self.assertEqual(type(exc), ValueError)
chained = exc.__context__
self.assertEqual((type(chained), chained.args),
(KeyError, (3,)))
try:
task = self.new_task(loop, run())
loop.run_until_complete(task)
finally:
loop.close()
def test_exception_chaining_after_await_with_context_cycle(self):
# Check trying to create an exception context cycle:
# https://bugs.python.org/issue40696
has_cycle = None
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def process_exc(exc):
raise exc
async def run():
nonlocal has_cycle
try:
raise KeyError('a')
except Exception as exc:
task = self.new_task(loop, process_exc(exc))
try:
await task
except BaseException as exc:
has_cycle = (exc is exc.__context__)
# Prevent a hang if has_cycle is True.
exc.__context__ = None
try:
task = self.new_task(loop, run())
loop.run_until_complete(task)
finally:
loop.close()
# This also distinguishes from the initial has_cycle=None.
self.assertEqual(has_cycle, False)
def test_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
async def task():
await asyncio.sleep(10.0)
return 12
t = self.new_task(loop, task())
loop.call_soon(t.cancel)
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_with_message_then_future_result(self):
# Test Future.result() after calling cancel() with a message.
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
await asyncio.sleep(0)
task.cancel(*cancel_args)
done, pending = await asyncio.wait([task])
task.result()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, expected_args, 2))
def test_cancel_with_message_then_future_exception(self):
# Test Future.exception() after calling cancel() with a message.
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
await asyncio.sleep(0)
task.cancel(*cancel_args)
done, pending = await asyncio.wait([task])
task.exception()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, expected_args, 2))
def test_cancel_with_message_before_starting_task(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
# We deliberately leave out the sleep here.
task.cancel('my message')
done, pending = await asyncio.wait([task])
task.exception()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, ('my message',), 2))
def test_cancel_yield(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def task():
yield
yield
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop) # start coro
t.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_inner_future(self):
f = self.new_future(self.loop)
async def task():
await f
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop) # start task
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_both_task_and_inner_future(self):
f = self.new_future(self.loop)
async def task():
await f
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
f.cancel()
t.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_task_catching(self):
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
async def task():
await fut1
try:
await fut2
except asyncio.CancelledError:
return 42
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(t.cancelled())
def test_cancel_task_ignoring(self):
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
fut3 = self.new_future(self.loop)
async def task():
await fut1
try:
await fut2
except asyncio.CancelledError:
pass
res = await fut3
return res
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut3) # White-box test.
fut3.set_result(42)
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(fut3.cancelled())
self.assertFalse(t.cancelled())
def test_cancel_current_task(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately.
await asyncio.sleep(100)
return 12
t = self.new_task(loop, task())
self.assertFalse(t.cancelled())
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_cancel_at_end(self):
"""coroutine end right after task is cancelled"""
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
return 12
t = self.new_task(loop, task())
self.assertFalse(t.cancelled())
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_cancel_awaited_task(self):
# This tests for a relatively rare condition when
# a task cancellation is requested for a task which is not
# currently blocked, such as a task cancelling itself.
# In this situation we must ensure that whatever next future
# or task the cancelled task blocks on is cancelled correctly
# as well. See also bpo-34872.
loop = asyncio.new_event_loop()
self.addCleanup(lambda: loop.close())
task = nested_task = None
fut = self.new_future(loop)
async def nested():
await fut
async def coro():
nonlocal nested_task
# Create a sub-task and wait for it to run.
nested_task = self.new_task(loop, nested())
await asyncio.sleep(0)
# Request the current task to be cancelled.
task.cancel()
# Block on the nested task, which should be immediately
# cancelled.
await nested_task
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(task)
self.assertTrue(task.cancelled())
self.assertTrue(nested_task.cancelled())
self.assertTrue(fut.cancelled())
def assert_text_contains(self, text, substr):
if substr not in text:
raise RuntimeError(f'text {substr!r} not found in:\n>>>{text}<<<')
def test_cancel_traceback_for_future_result(self):
# When calling Future.result() on a cancelled task, check that the
# line of code that was interrupted is included in the traceback.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def nested():
# This will get cancelled immediately.
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, nested())
await asyncio.sleep(0)
task.cancel()
await task # search target
task = self.new_task(loop, coro())
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
tb = traceback.format_exc()
self.assert_text_contains(tb, "await asyncio.sleep(10)")
# The intermediate await should also be included.
self.assert_text_contains(tb, "await task # search target")
else:
self.fail('CancelledError did not occur')
def test_cancel_traceback_for_future_exception(self):
# When calling Future.exception() on a cancelled task, check that the
# line of code that was interrupted is included in the traceback.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def nested():
# This will get cancelled immediately.
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, nested())
await asyncio.sleep(0)
task.cancel()
done, pending = await asyncio.wait([task])
task.exception() # search target
task = self.new_task(loop, coro())
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
tb = traceback.format_exc()
self.assert_text_contains(tb, "await asyncio.sleep(10)")
# The intermediate await should also be included.
self.assert_text_contains(tb,
"task.exception() # search target")
else:
self.fail('CancelledError did not occur')
def test_stop_while_run_in_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
when = yield 0.1
self.assertAlmostEqual(0.3, when)
yield 0.1
loop = self.new_test_loop(gen)
x = 0
async def task():
nonlocal x
while x < 10:
await asyncio.sleep(0.1)
x += 1
if x == 2:
loop.stop()
t = self.new_task(loop, task())
with self.assertRaises(RuntimeError) as cm:
loop.run_until_complete(t)
self.assertEqual(str(cm.exception),
'Event loop stopped before Future completed.')
self.assertFalse(t.done())
self.assertEqual(x, 2)
self.assertAlmostEqual(0.3, loop.time())
t.cancel()
self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t)
def test_log_traceback(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
with self.assertRaisesRegex(ValueError, 'can only be set to False'):
task._log_traceback = True
self.loop.run_until_complete(task)
def test_wait_for_timeout_less_then_0_or_0_future_done(self):
def gen():
when = yield
self.assertAlmostEqual(0, when)
loop = self.new_test_loop(gen)
fut = self.new_future(loop)
fut.set_result('done')
ret = loop.run_until_complete(asyncio.wait_for(fut, 0))
self.assertEqual(ret, 'done')
self.assertTrue(fut.done())
self.assertAlmostEqual(0, loop.time())
def test_wait_for_timeout_less_then_0_or_0_coroutine_do_not_started(self):
def gen():
when = yield
self.assertAlmostEqual(0, when)
loop = self.new_test_loop(gen)
foo_started = False
async def foo():
nonlocal foo_started
foo_started = True
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(foo(), 0))
self.assertAlmostEqual(0, loop.time())
self.assertEqual(foo_started, False)
def test_wait_for_timeout_less_then_0_or_0(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0, when)
for timeout in [0, -1]:
with self.subTest(timeout=timeout):
loop = self.new_test_loop(gen)
foo_running = None
async def foo():
nonlocal foo_running
foo_running = True
try:
await asyncio.sleep(0.2)
finally:
foo_running = False
return 'done'
fut = self.new_task(loop, foo())
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, timeout))
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0, loop.time())
self.assertEqual(foo_running, False)
def test_wait_for(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
when = yield 0.1
loop = self.new_test_loop(gen)
foo_running = None
async def foo():
nonlocal foo_running
foo_running = True
try:
await asyncio.sleep(0.2)
finally:
foo_running = False
return 'done'
fut = self.new_task(loop, foo())
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.1))
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0.1, loop.time())
self.assertEqual(foo_running, False)
def test_wait_for_blocking(self):
loop = self.new_test_loop()
async def coro():
return 'done'
res = loop.run_until_complete(asyncio.wait_for(coro(), timeout=None))
self.assertEqual(res, 'done')
def test_wait_for_race_condition(self):
def gen():
yield 0.1
yield 0.1
yield 0.1
loop = self.new_test_loop(gen)
fut = self.new_future(loop)
task = asyncio.wait_for(fut, timeout=0.2)
loop.call_later(0.1, fut.set_result, "ok")
res = loop.run_until_complete(task)
self.assertEqual(res, "ok")
def test_wait_for_cancellation_race_condition(self):
def gen():
yield 0.1
yield 0.1
yield 0.1
yield 0.1
loop = self.new_test_loop(gen)
fut = self.new_future(loop)
loop.call_later(0.1, fut.set_result, "ok")
task = loop.create_task(asyncio.wait_for(fut, timeout=1))
loop.call_later(0.1, task.cancel)
res = loop.run_until_complete(task)
self.assertEqual(res, "ok")
def test_wait_for_waits_for_task_cancellation(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
task_done = False
async def foo():
async def inner():
nonlocal task_done
try:
await asyncio.sleep(0.2)
except asyncio.CancelledError:
await asyncio.sleep(_EPSILON)
raise
finally:
task_done = True
inner_task = self.new_task(loop, inner())
await asyncio.wait_for(inner_task, timeout=_EPSILON)
with self.assertRaises(asyncio.TimeoutError) as cm:
loop.run_until_complete(foo())
self.assertTrue(task_done)
chained = cm.exception.__context__
self.assertEqual(type(chained), asyncio.CancelledError)
def test_wait_for_waits_for_task_cancellation_w_timeout_0(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
task_done = False
async def foo():
async def inner():
nonlocal task_done
try:
await asyncio.sleep(10)
except asyncio.CancelledError:
await asyncio.sleep(_EPSILON)
raise
finally:
task_done = True
inner_task = self.new_task(loop, inner())
await asyncio.sleep(_EPSILON)
await asyncio.wait_for(inner_task, timeout=0)
with self.assertRaises(asyncio.TimeoutError) as cm:
loop.run_until_complete(foo())
self.assertTrue(task_done)
chained = cm.exception.__context__
self.assertEqual(type(chained), asyncio.CancelledError)
def test_wait_for_reraises_exception_during_cancellation(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
class FooException(Exception):
pass
async def foo():
async def inner():
try:
await asyncio.sleep(0.2)
finally:
raise FooException
inner_task = self.new_task(loop, inner())
await asyncio.wait_for(inner_task, timeout=_EPSILON)
with self.assertRaises(FooException):
loop.run_until_complete(foo())
def test_wait_for_raises_timeout_error_if_returned_during_cancellation(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def foo():
async def inner():
try:
await asyncio.sleep(0.2)
except asyncio.CancelledError:
return 42
inner_task = self.new_task(loop, inner())
await asyncio.wait_for(inner_task, timeout=_EPSILON)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(foo())
def test_wait_for_self_cancellation(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def foo():
async def inner():
try:
await asyncio.sleep(0.3)
except asyncio.CancelledError:
try:
await asyncio.sleep(0.3)
except asyncio.CancelledError:
await asyncio.sleep(0.3)
return 42
inner_task = self.new_task(loop, inner())
wait = asyncio.wait_for(inner_task, timeout=0.1)
# Test that wait_for itself is properly cancellable
# even when the initial task holds up the initial cancellation.
task = self.new_task(loop, wait)
await asyncio.sleep(0.2)
task.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertEqual(await inner_task, 42)
loop.run_until_complete(foo())
def test_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait([b, a])
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
self.assertEqual(res, 42)
def test_wait_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
c = coro('test')
task = self.new_task(
self.loop,
asyncio.wait([c, c, coro('spam')]))
with self.assertWarns(DeprecationWarning):
done, pending = self.loop.run_until_complete(task)
self.assertFalse(pending)
self.assertEqual(set(f.result() for f in done), {'test', 'spam'})
def test_wait_errors(self):
self.assertRaises(
ValueError, self.loop.run_until_complete,
asyncio.wait(set()))
# -1 is an invalid return_when value
sleep_coro = asyncio.sleep(10.0)
wait_coro = asyncio.wait([sleep_coro], return_when=-1)
self.assertRaises(ValueError,
self.loop.run_until_complete, wait_coro)
sleep_coro.close()
def test_wait_first_completed(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(10.0))
b = self.new_task(loop, asyncio.sleep(0.1))
task = self.new_task(
loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED))
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertFalse(a.done())
self.assertTrue(b.done())
self.assertIsNone(b.result())
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_really_done(self):
# there is possibility that some tasks in the pending list
# became done but their callbacks haven't all been called yet
async def coro1():
await asyncio.sleep(0)
async def coro2():
await asyncio.sleep(0)
await asyncio.sleep(0)
a = self.new_task(self.loop, coro1())
b = self.new_task(self.loop, coro2())
task = self.new_task(
self.loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED))
done, pending = self.loop.run_until_complete(task)
self.assertEqual({a, b}, done)
self.assertTrue(a.done())
self.assertIsNone(a.result())
self.assertTrue(b.done())
self.assertIsNone(b.result())
def test_wait_first_exception(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
# first_exception, task already has exception
a = self.new_task(loop, asyncio.sleep(10.0))
async def exc():
raise ZeroDivisionError('err')
b = self.new_task(loop, exc())
task = self.new_task(
loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION))
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_first_exception_in_wait(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
# first_exception, exception during waiting
a = self.new_task(loop, asyncio.sleep(10.0))
async def exc():
await asyncio.sleep(0.01)
raise ZeroDivisionError('err')
b = self.new_task(loop, exc())
task = asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0.01, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_with_exception(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
async def sleeper():
await asyncio.sleep(0.15)
raise ZeroDivisionError('really')
b = self.new_task(loop, sleeper())
async def foo():
done, pending = await asyncio.wait([b, a])
self.assertEqual(len(done), 2)
self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None)
self.assertEqual(len(errors), 1)
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
def test_wait_with_timeout(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.11, when)
yield 0.11
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait([b, a], timeout=0.11)
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.11, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_concurrent_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
done, pending = loop.run_until_complete(
asyncio.wait([b, a], timeout=0.1))
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_with_iterator_of_tasks(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait(iter([b, a]))
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed(self):
def gen():
yield 0
yield 0
yield 0.01
yield 0
loop = self.new_test_loop(gen)
# disable "slow callback" warning
loop.slow_callback_duration = 1.0
completed = set()
time_shifted = False
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def sleeper(dt, x):
nonlocal time_shifted
yield from asyncio.sleep(dt)
completed.add(x)
if not time_shifted and 'a' in completed and 'b' in completed:
time_shifted = True
loop.advance_time(0.14)
return x
a = sleeper(0.01, 'a')
b = sleeper(0.01, 'b')
c = sleeper(0.15, 'c')
async def foo():
values = []
for f in asyncio.as_completed([b, c, a]):
values.append(await f)
return values
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
self.assertTrue('a' in res[:2])
self.assertTrue('b' in res[:2])
self.assertEqual(res[2], 'c')
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed_with_timeout(self):
def gen():
yield
yield 0
yield 0
yield 0.1
loop = self.new_test_loop(gen)
a = loop.create_task(asyncio.sleep(0.1, 'a'))
b = loop.create_task(asyncio.sleep(0.15, 'b'))
async def foo():
values = []
for f in asyncio.as_completed([a, b], timeout=0.12):
if values:
loop.advance_time(0.02)
try:
v = await f
values.append((1, v))
except asyncio.TimeoutError as exc:
values.append((2, exc))
return values
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(len(res), 2, res)
self.assertEqual(res[0], (1, 'a'))
self.assertEqual(res[1][0], 2)
self.assertIsInstance(res[1][1], asyncio.TimeoutError)
self.assertAlmostEqual(0.12, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_as_completed_with_unused_timeout(self):
def gen():
yield
yield 0
yield 0.01
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.01, 'a')
async def foo():
for f in asyncio.as_completed([a], timeout=1):
v = await f
self.assertEqual(v, 'a')
loop.run_until_complete(self.new_task(loop, foo()))
def test_as_completed_reverse_wait(self):
def gen():
yield 0
yield 0.05
yield 0
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.05, 'a')
b = asyncio.sleep(0.10, 'b')
fs = {a, b}
async def test():
futs = list(asyncio.as_completed(fs))
self.assertEqual(len(futs), 2)
x = await futs[1]
self.assertEqual(x, 'a')
self.assertAlmostEqual(0.05, loop.time())
loop.advance_time(0.05)
y = await futs[0]
self.assertEqual(y, 'b')
self.assertAlmostEqual(0.10, loop.time())
loop.run_until_complete(test())
def test_as_completed_concurrent(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0
self.assertAlmostEqual(0.05, when)
yield 0.05
a = asyncio.sleep(0.05, 'a')
b = asyncio.sleep(0.05, 'b')
fs = {a, b}
async def test():
futs = list(asyncio.as_completed(fs))
self.assertEqual(len(futs), 2)
waiter = asyncio.wait(futs)
# Deprecation from passing coros in futs to asyncio.wait()
with self.assertWarns(DeprecationWarning) as cm:
done, pending = await waiter
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertEqual(set(f.result() for f in done), {'a', 'b'})
loop = self.new_test_loop(gen)
loop.run_until_complete(test())
def test_as_completed_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def runner():
result = []
c = coro('ham')
for f in asyncio.as_completed([c, c, coro('spam')]):
result.append((yield from f))
return result
fut = self.new_task(self.loop, runner())
self.loop.run_until_complete(fut)
result = fut.result()
self.assertEqual(set(result), {'ham', 'spam'})
self.assertEqual(len(result), 2)
def test_as_completed_coroutine_without_loop(self):
async def coro():
return 42
a = coro()
self.addCleanup(a.close)
futs = asyncio.as_completed([a])
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'):
list(futs)
self.assertEqual(cm.warnings[0].filename, __file__)
def test_as_completed_coroutine_use_running_loop(self):
loop = self.new_test_loop()
async def coro():
return 42
async def test():
futs = list(asyncio.as_completed([coro()]))
self.assertEqual(len(futs), 1)
self.assertEqual(await futs[0], 42)
loop.run_until_complete(test())
def test_as_completed_coroutine_use_global_loop(self):
# Deprecated in 3.10
async def coro():
return 42
loop = self.new_test_loop()
asyncio.set_event_loop(loop)
self.addCleanup(asyncio.set_event_loop, None)
futs = asyncio.as_completed([coro()])
with self.assertWarns(DeprecationWarning) as cm:
futs = list(futs)
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertEqual(len(futs), 1)
self.assertEqual(loop.run_until_complete(futs[0]), 42)
def test_sleep(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0.05
self.assertAlmostEqual(0.1, when)
yield 0.05
loop = self.new_test_loop(gen)
async def sleeper(dt, arg):
await asyncio.sleep(dt/2)
res = await asyncio.sleep(dt/2, arg)
return res
t = self.new_task(loop, sleeper(0.1, 'yeah'))
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'yeah')
self.assertAlmostEqual(0.1, loop.time())
def test_sleep_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
t = self.new_task(loop, asyncio.sleep(10.0, 'yeah'))
handle = None
orig_call_later = loop.call_later
def call_later(delay, callback, *args):
nonlocal handle
handle = orig_call_later(delay, callback, *args)
return handle
loop.call_later = call_later
test_utils.run_briefly(loop)
self.assertFalse(handle._cancelled)
t.cancel()
test_utils.run_briefly(loop)
self.assertTrue(handle._cancelled)
def test_task_cancel_sleeping_task(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(5000, when)
yield 0.1
loop = self.new_test_loop(gen)
async def sleep(dt):
await asyncio.sleep(dt)
async def doit():
sleeper = self.new_task(loop, sleep(5000))
loop.call_later(0.1, sleeper.cancel)
try:
await sleeper
except asyncio.CancelledError:
return 'cancelled'
else:
return 'slept in'
doer = doit()
self.assertEqual(loop.run_until_complete(doer), 'cancelled')
self.assertAlmostEqual(0.1, loop.time())
def test_task_cancel_waiter_future(self):
fut = self.new_future(self.loop)
async def coro():
await fut
task = self.new_task(self.loop, coro())
test_utils.run_briefly(self.loop)
self.assertIs(task._fut_waiter, fut)
task.cancel()
test_utils.run_briefly(self.loop)
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, task)
self.assertIsNone(task._fut_waiter)
self.assertTrue(fut.cancelled())
def test_task_set_methods(self):
async def notmuch():
return 'ko'
gen = notmuch()
task = self.new_task(self.loop, gen)
with self.assertRaisesRegex(RuntimeError, 'not support set_result'):
task.set_result('ok')
with self.assertRaisesRegex(RuntimeError, 'not support set_exception'):
task.set_exception(ValueError())
self.assertEqual(
self.loop.run_until_complete(task),
'ko')
def test_step_result(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
yield None
yield 1
return 'ko'
self.assertRaises(
RuntimeError, self.loop.run_until_complete, notmuch())
def test_step_result_future(self):
# If coroutine returns future, task waits on this future.
class Fut(asyncio.Future):
def __init__(self, *args, **kwds):
self.cb_added = False
super().__init__(*args, **kwds)
def add_done_callback(self, *args, **kwargs):
self.cb_added = True
super().add_done_callback(*args, **kwargs)
fut = Fut(loop=self.loop)
result = None
async def wait_for_future():
nonlocal result
result = await fut
t = self.new_task(self.loop, wait_for_future())
test_utils.run_briefly(self.loop)
self.assertTrue(fut.cb_added)
res = object()
fut.set_result(res)
test_utils.run_briefly(self.loop)
self.assertIs(res, result)
self.assertTrue(t.done())
self.assertIsNone(t.result())
def test_baseexception_during_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
async def sleeper():
await asyncio.sleep(10)
base_exc = SystemExit()
async def notmutch():
try:
await sleeper()
except asyncio.CancelledError:
raise base_exc
task = self.new_task(loop, notmutch())
test_utils.run_briefly(loop)
task.cancel()
self.assertFalse(task.done())
self.assertRaises(SystemExit, test_utils.run_briefly, loop)
self.assertTrue(task.done())
self.assertFalse(task.cancelled())
self.assertIs(task.exception(), base_exc)
def test_iscoroutinefunction(self):
def fn():
pass
self.assertFalse(asyncio.iscoroutinefunction(fn))
def fn1():
yield
self.assertFalse(asyncio.iscoroutinefunction(fn1))
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def fn2():
yield
self.assertTrue(asyncio.iscoroutinefunction(fn2))
self.assertFalse(asyncio.iscoroutinefunction(mock.Mock()))
def test_yield_vs_yield_from(self):
fut = self.new_future(self.loop)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def wait_for_future():
yield fut
task = wait_for_future()
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(task)
self.assertFalse(fut.done())
def test_yield_vs_yield_from_generator(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
yield
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def wait_for_future():
gen = coro()
try:
yield gen
finally:
gen.close()
task = wait_for_future()
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, task)
def test_coroutine_non_gen_function(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def func():
return 'test'
self.assertTrue(asyncio.iscoroutinefunction(func))
coro = func()
self.assertTrue(asyncio.iscoroutine(coro))
res = self.loop.run_until_complete(coro)
self.assertEqual(res, 'test')
def test_coroutine_non_gen_function_return_future(self):
fut = self.new_future(self.loop)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def func():
return fut
async def coro():
fut.set_result('test')
t1 = self.new_task(self.loop, func())
t2 = self.new_task(self.loop, coro())
res = self.loop.run_until_complete(t1)
self.assertEqual(res, 'test')
self.assertIsNone(t2.result())
def test_current_task(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
async def coro(loop):
self.assertIs(asyncio.current_task(), task)
self.assertIs(asyncio.current_task(None), task)
self.assertIs(asyncio.current_task(), task)
task = self.new_task(self.loop, coro(self.loop))
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.current_task(loop=self.loop))
def test_current_task_with_interleaving_tasks(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
async def coro1(loop):
self.assertTrue(asyncio.current_task() is task1)
await fut1
self.assertTrue(asyncio.current_task() is task1)
fut2.set_result(True)
async def coro2(loop):
self.assertTrue(asyncio.current_task() is task2)
fut1.set_result(True)
await fut2
self.assertTrue(asyncio.current_task() is task2)
task1 = self.new_task(self.loop, coro1(self.loop))
task2 = self.new_task(self.loop, coro2(self.loop))
self.loop.run_until_complete(asyncio.wait((task1, task2)))
self.assertIsNone(asyncio.current_task(loop=self.loop))
# Some thorough tests for cancellation propagation through
# coroutines, tasks and wait().
def test_yield_future_passes_cancel(self):
# Cancelling outer() cancels inner() cancels waiter.
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
try:
await waiter
except asyncio.CancelledError:
proof += 1
raise
else:
self.fail('got past sleep() in inner()')
async def outer():
nonlocal proof
try:
await inner()
except asyncio.CancelledError:
proof += 100 # Expect this path.
else:
proof += 10
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.loop.run_until_complete(f)
self.assertEqual(proof, 101)
self.assertTrue(waiter.cancelled())
def test_yield_wait_does_not_shield_cancel(self):
# Cancelling outer() makes wait() return early, leaves inner()
# running.
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
await waiter
proof += 1
async def outer():
nonlocal proof
with self.assertWarns(DeprecationWarning):
d, p = await asyncio.wait([inner()])
proof += 100
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_result(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
inner.set_result(42)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_shield_exception(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
exc = RuntimeError('expected')
inner.set_exception(exc)
test_utils.run_briefly(self.loop)
self.assertIs(outer.exception(), exc)
def test_shield_cancel_inner(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
inner.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
def test_shield_cancel_outer(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
self.assertEqual(0, 0 if outer._callbacks is None else len(outer._callbacks))
def test_shield_shortcut(self):
fut = self.new_future(self.loop)
fut.set_result(42)
res = self.loop.run_until_complete(asyncio.shield(fut))
self.assertEqual(res, 42)
def test_shield_effect(self):
# Cancelling outer() does not affect inner().
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
await waiter
proof += 1
async def outer():
nonlocal proof
await asyncio.shield(inner())
proof += 100
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_gather(self):
child1 = self.new_future(self.loop)
child2 = self.new_future(self.loop)
parent = asyncio.gather(child1, child2)
outer = asyncio.shield(parent)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
self.assertEqual(parent.result(), [1, 2])
def test_gather_shield(self):
child1 = self.new_future(self.loop)
child2 = self.new_future(self.loop)
inner1 = asyncio.shield(child1)
inner2 = asyncio.shield(child2)
parent = asyncio.gather(inner1, inner2)
test_utils.run_briefly(self.loop)
parent.cancel()
# This should cancel inner1 and inner2 but bot child1 and child2.
test_utils.run_briefly(self.loop)
self.assertIsInstance(parent.exception(), asyncio.CancelledError)
self.assertTrue(inner1.cancelled())
self.assertTrue(inner2.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
def test_shield_coroutine_without_loop(self):
async def coro():
return 42
inner = coro()
self.addCleanup(inner.close)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'):
asyncio.shield(inner)
self.assertEqual(cm.warnings[0].filename, __file__)
def test_shield_coroutine_use_running_loop(self):
async def coro():
return 42
async def test():
return asyncio.shield(coro())
outer = self.loop.run_until_complete(test())
self.assertEqual(outer._loop, self.loop)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_shield_coroutine_use_global_loop(self):
# Deprecated in 3.10
async def coro():
return 42
asyncio.set_event_loop(self.loop)
self.addCleanup(asyncio.set_event_loop, None)
with self.assertWarns(DeprecationWarning) as cm:
outer = asyncio.shield(coro())
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertEqual(outer._loop, self.loop)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_as_completed_invalid_args(self):
fut = self.new_future(self.loop)
# as_completed() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(fut))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(coro))
coro.close()
def test_wait_invalid_args(self):
fut = self.new_future(self.loop)
# wait() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(fut))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(coro))
coro.close()
# wait() expects at least a future
self.assertRaises(ValueError, self.loop.run_until_complete,
asyncio.wait([]))
def test_corowrapper_mocks_generator(self):
def check():
# A function that asserts various things.
# Called twice, with different debug flag values.
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
# The actual coroutine.
self.assertTrue(gen.gi_running)
yield from fut
# A completed Future used to run the coroutine.
fut = self.new_future(self.loop)
fut.set_result(None)
# Call the coroutine.
gen = coro()
# Check some properties.
self.assertTrue(asyncio.iscoroutine(gen))
self.assertIsInstance(gen.gi_frame, types.FrameType)
self.assertFalse(gen.gi_running)
self.assertIsInstance(gen.gi_code, types.CodeType)
# Run it.
self.loop.run_until_complete(gen)
# The frame should have changed.
self.assertIsNone(gen.gi_frame)
# Test with debug flag cleared.
with set_coroutine_debug(False):
check()
# Test with debug flag set.
with set_coroutine_debug(True):
check()
def test_yield_from_corowrapper(self):
with set_coroutine_debug(True):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t1():
return (yield from t2())
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t2():
f = self.new_future(self.loop)
self.new_task(self.loop, t3(f))
return (yield from f)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t3(f):
f.set_result((1, 2, 3))
task = self.new_task(self.loop, t1())
val = self.loop.run_until_complete(task)
self.assertEqual(val, (1, 2, 3))
def test_yield_from_corowrapper_send(self):
def foo():
a = yield
return a
def call(arg):
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
try:
cw.send(arg)
except StopIteration as ex:
return ex.args[0]
else:
raise AssertionError('StopIteration was expected')
self.assertEqual(call((1, 2)), (1, 2))
self.assertEqual(call('spam'), 'spam')
def test_corowrapper_weakref(self):
wd = weakref.WeakValueDictionary()
def foo(): yield from []
cw = asyncio.coroutines.CoroWrapper(foo())
wd['cw'] = cw # Would fail without __weakref__ slot.
cw.gen = None # Suppress warning from __del__.
def test_corowrapper_throw(self):
# Issue 429: CoroWrapper.throw must be compatible with gen.throw
def foo():
value = None
while True:
try:
value = yield value
except Exception as e:
value = e
exception = Exception("foo")
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
self.assertIs(exception, cw.throw(exception))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
self.assertIs(exception, cw.throw(Exception, exception))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
exception = cw.throw(Exception, "foo")
self.assertIsInstance(exception, Exception)
self.assertEqual(exception.args, ("foo", ))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
exception = cw.throw(Exception, "foo", None)
self.assertIsInstance(exception, Exception)
self.assertEqual(exception.args, ("foo", ))
def test_log_destroyed_pending_task(self):
Task = self.__class__.Task
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def kill_me(loop):
future = self.new_future(loop)
yield from future
# at this point, the only reference to kill_me() task is
# the Task._wakeup() method in future._callbacks
raise Exception("code never reached")
mock_handler = mock.Mock()
self.loop.set_debug(True)
self.loop.set_exception_handler(mock_handler)
# schedule the task
coro = kill_me(self.loop)
task = asyncio.ensure_future(coro, loop=self.loop)
self.assertEqual(asyncio.all_tasks(loop=self.loop), {task})
asyncio.set_event_loop(None)
# execute the task so it waits for future
self.loop._run_once()
self.assertEqual(len(self.loop._ready), 0)
# remove the future used in kill_me(), and references to the task
del coro.gi_frame.f_locals['future']
coro = None
source_traceback = task._source_traceback
task = None
# no more reference to kill_me() task: the task is destroyed by the GC
support.gc_collect()
self.assertEqual(asyncio.all_tasks(loop=self.loop), set())
mock_handler.assert_called_with(self.loop, {
'message': 'Task was destroyed but it is pending!',
'task': mock.ANY,
'source_traceback': source_traceback,
})
mock_handler.reset_mock()
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_not_called_after_cancel(self, m_log):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def coro():
raise TypeError
async def runner():
task = self.new_task(loop, coro())
await asyncio.sleep(0.05)
task.cancel()
task = None
loop.run_until_complete(runner())
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.coroutines.logger')
def test_coroutine_never_yielded(self, m_log):
with set_coroutine_debug(True):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro_noop():
pass
tb_filename = __file__
tb_lineno = sys._getframe().f_lineno + 2
# create a coroutine object but don't use it
coro_noop()
support.gc_collect()
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
func_filename, func_lineno = test_utils.get_function_source(coro_noop)
regex = (r'^<CoroWrapper %s\(?\)? .* at %s:%s, .*> '
r'was never yielded from\n'
r'Coroutine object created at \(most recent call last, truncated to \d+ last lines\):\n'
r'.*\n'
r' File "%s", line %s, in test_coroutine_never_yielded\n'
r' coro_noop\(\)$'
% (re.escape(coro_noop.__qualname__),
re.escape(func_filename), func_lineno,
re.escape(tb_filename), tb_lineno))
self.assertRegex(message, re.compile(regex, re.DOTALL))
def test_return_coroutine_from_coroutine(self):
"""Return of @asyncio.coroutine()-wrapped function generator object
from @asyncio.coroutine()-wrapped function should have same effect as
returning generator object or Future."""
def check():
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def outer_coro():
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def inner_coro():
return 1
return inner_coro()
result = self.loop.run_until_complete(outer_coro())
self.assertEqual(result, 1)
# Test with debug flag cleared.
with set_coroutine_debug(False):
check()
# Test with debug flag set.
with set_coroutine_debug(True):
check()
def test_task_source_traceback(self):
self.loop.set_debug(True)
task = self.new_task(self.loop, coroutine_function())
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(task._source_traceback, list)
self.assertEqual(task._source_traceback[-2][:3],
(__file__,
lineno,
'test_task_source_traceback'))
self.loop.run_until_complete(task)
def _test_cancel_wait_for(self, timeout):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def blocking_coroutine():
fut = self.new_future(loop)
# Block: fut result is never set
await fut
task = loop.create_task(blocking_coroutine())
wait = loop.create_task(asyncio.wait_for(task, timeout))
loop.call_soon(wait.cancel)
self.assertRaises(asyncio.CancelledError,
loop.run_until_complete, wait)
# Python issue #23219: cancelling the wait must also cancel the task
self.assertTrue(task.cancelled())
def test_cancel_blocking_wait_for(self):
self._test_cancel_wait_for(None)
def test_cancel_wait_for(self):
self._test_cancel_wait_for(60.0)
def test_cancel_gather_1(self):
"""Ensure that a gathering future refuses to be cancelled once all
children are done"""
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
fut = self.new_future(loop)
async def create():
# The indirection fut->child_coro is needed since otherwise the
# gathering task is done at the same time as the child future
def child_coro():
return (yield from fut)
gather_future = asyncio.gather(child_coro())
return asyncio.ensure_future(gather_future)
gather_task = loop.run_until_complete(create())
cancel_result = None
def cancelling_callback(_):
nonlocal cancel_result
cancel_result = gather_task.cancel()
fut.add_done_callback(cancelling_callback)
fut.set_result(42) # calls the cancelling_callback after fut is done()
# At this point the task should complete.
loop.run_until_complete(gather_task)
# Python issue #26923: asyncio.gather drops cancellation
self.assertEqual(cancel_result, False)
self.assertFalse(gather_task.cancelled())
self.assertEqual(gather_task.result(), [42])
def test_cancel_gather_2(self):
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def test():
time = 0
while True:
time += 0.05
await asyncio.gather(asyncio.sleep(0.05),
return_exceptions=True)
if time > 1:
return
async def main():
qwe = self.new_task(loop, test())
await asyncio.sleep(0.2)
qwe.cancel(*cancel_args)
await qwe
try:
loop.run_until_complete(main())
except asyncio.CancelledError as exc:
self.assertEqual(exc.args, ())
exc_type, exc_args, depth = get_innermost_context(exc)
self.assertEqual((exc_type, exc_args),
(asyncio.CancelledError, expected_args))
# The exact traceback seems to vary in CI.
self.assertIn(depth, (2, 3))
else:
self.fail('gather did not propagate the cancellation '
'request')
def test_exception_traceback(self):
# See http://bugs.python.org/issue28843
async def foo():
1 / 0
async def main():
task = self.new_task(self.loop, foo())
await asyncio.sleep(0) # skip one loop iteration
self.assertIsNotNone(task.exception().__traceback__)
self.loop.run_until_complete(main())
@mock.patch('asyncio.base_events.logger')
def test_error_in_call_soon(self, m_log):
def call_soon(callback, *args, **kwargs):
raise ValueError
self.loop.call_soon = call_soon
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
pass
self.assertFalse(m_log.error.called)
with self.assertRaises(ValueError):
gen = coro()
try:
self.new_task(self.loop, gen)
finally:
gen.close()
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
self.assertIn('Task was destroyed but it is pending', message)
self.assertEqual(asyncio.all_tasks(self.loop), set())
def test_create_task_with_noncoroutine(self):
with self.assertRaisesRegex(TypeError,
"a coroutine was expected, got 123"):
self.new_task(self.loop, 123)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
with self.assertRaisesRegex(TypeError,
"a coroutine was expected, got 123"):
self.new_task(self.loop, 123)
def test_create_task_with_oldstyle_coroutine(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
pass
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
def test_create_task_with_async_function(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
def test_create_task_with_asynclike_function(self):
task = self.new_task(self.loop, CoroLikeObject())
self.assertIsInstance(task, self.Task)
self.assertEqual(self.loop.run_until_complete(task), 42)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
task = self.new_task(self.loop, CoroLikeObject())
self.assertIsInstance(task, self.Task)
self.assertEqual(self.loop.run_until_complete(task), 42)
def test_bare_create_task(self):
async def inner():
return 1
async def coro():
task = asyncio.create_task(inner())
self.assertIsInstance(task, self.Task)
ret = await task
self.assertEqual(1, ret)
self.loop.run_until_complete(coro())
def test_bare_create_named_task(self):
async def coro_noop():
pass
async def coro():
task = asyncio.create_task(coro_noop(), name='No-op')
self.assertEqual(task.get_name(), 'No-op')
await task
self.loop.run_until_complete(coro())
def test_context_1(self):
cvar = contextvars.ContextVar('cvar', default='nope')
async def sub():
await asyncio.sleep(0.01)
self.assertEqual(cvar.get(), 'nope')
cvar.set('something else')
async def main():
self.assertEqual(cvar.get(), 'nope')
subtask = self.new_task(loop, sub())
cvar.set('yes')
self.assertEqual(cvar.get(), 'yes')
await subtask
self.assertEqual(cvar.get(), 'yes')
loop = asyncio.new_event_loop()
try:
task = self.new_task(loop, main())
loop.run_until_complete(task)
finally:
loop.close()
def test_context_2(self):
cvar = contextvars.ContextVar('cvar', default='nope')
async def main():
def fut_on_done(fut):
# This change must not pollute the context
# of the "main()" task.
cvar.set('something else')
self.assertEqual(cvar.get(), 'nope')
for j in range(2):
fut = self.new_future(loop)
fut.add_done_callback(fut_on_done)
cvar.set(f'yes{j}')
loop.call_soon(fut.set_result, None)
await fut
self.assertEqual(cvar.get(), f'yes{j}')
for i in range(3):
# Test that task passed its context to add_done_callback:
cvar.set(f'yes{i}-{j}')
await asyncio.sleep(0.001)
self.assertEqual(cvar.get(), f'yes{i}-{j}')
loop = asyncio.new_event_loop()
try:
task = self.new_task(loop, main())
loop.run_until_complete(task)
finally:
loop.close()
self.assertEqual(cvar.get(), 'nope')
def test_context_3(self):
# Run 100 Tasks in parallel, each modifying cvar.
cvar = contextvars.ContextVar('cvar', default=-1)
async def sub(num):
for i in range(10):
cvar.set(num + i)
await asyncio.sleep(random.uniform(0.001, 0.05))
self.assertEqual(cvar.get(), num + i)
async def main():
tasks = []
for i in range(100):
task = loop.create_task(sub(random.randint(0, 10)))
tasks.append(task)
await asyncio.gather(*tasks)
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
self.assertEqual(cvar.get(), -1)
def test_get_coro(self):
loop = asyncio.new_event_loop()
coro = coroutine_function()
try:
task = self.new_task(loop, coro)
loop.run_until_complete(task)
self.assertIs(task.get_coro(), coro)
finally:
loop.close()
def add_subclass_tests(cls):
BaseTask = cls.Task
BaseFuture = cls.Future
if BaseTask is None or BaseFuture is None:
return cls
class CommonFuture:
def __init__(self, *args, **kwargs):
self.calls = collections.defaultdict(lambda: 0)
super().__init__(*args, **kwargs)
def add_done_callback(self, *args, **kwargs):
self.calls['add_done_callback'] += 1
return super().add_done_callback(*args, **kwargs)
class Task(CommonFuture, BaseTask):
pass
class Future(CommonFuture, BaseFuture):
pass
def test_subclasses_ctask_cfuture(self):
fut = self.Future(loop=self.loop)
async def func():
self.loop.call_soon(lambda: fut.set_result('spam'))
return await fut
task = self.Task(func(), loop=self.loop)
result = self.loop.run_until_complete(task)
self.assertEqual(result, 'spam')
self.assertEqual(
dict(task.calls),
{'add_done_callback': 1})
self.assertEqual(
dict(fut.calls),
{'add_done_callback': 1})
# Add patched Task & Future back to the test case
cls.Task = Task
cls.Future = Future
# Add an extra unit-test
cls.test_subclasses_ctask_cfuture = test_subclasses_ctask_cfuture
# Disable the "test_task_source_traceback" test
# (the test is hardcoded for a particular call stack, which
# is slightly different for Task subclasses)
cls.test_task_source_traceback = None
return cls
class SetMethodsTest:
def test_set_result_causes_invalid_state(self):
Future = type(self).Future
self.loop.call_exception_handler = exc_handler = mock.Mock()
async def foo():
await asyncio.sleep(0.1)
return 10
coro = foo()
task = self.new_task(self.loop, coro)
Future.set_result(task, 'spam')
self.assertEqual(
self.loop.run_until_complete(task),
'spam')
exc_handler.assert_called_once()
exc = exc_handler.call_args[0][0]['exception']
with self.assertRaisesRegex(asyncio.InvalidStateError,
r'step\(\): already done'):
raise exc
coro.close()
def test_set_exception_causes_invalid_state(self):
class MyExc(Exception):
pass
Future = type(self).Future
self.loop.call_exception_handler = exc_handler = mock.Mock()
async def foo():
await asyncio.sleep(0.1)
return 10
coro = foo()
task = self.new_task(self.loop, coro)
Future.set_exception(task, MyExc())
with self.assertRaises(MyExc):
self.loop.run_until_complete(task)
exc_handler.assert_called_once()
exc = exc_handler.call_args[0][0]['exception']
with self.assertRaisesRegex(asyncio.InvalidStateError,
r'step\(\): already done'):
raise exc
coro.close()
@unittest.skipUnless(hasattr(futures, '_CFuture') and
hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_CFuture_Tests(BaseTaskTests, SetMethodsTest,
test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = getattr(futures, '_CFuture', None)
@support.refcount_test
def test_refleaks_in_task___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
async def coro():
pass
task = self.new_task(self.loop, coro())
self.loop.run_until_complete(task)
refs_before = gettotalrefcount()
for i in range(100):
task.__init__(coro(), loop=self.loop)
self.loop.run_until_complete(task)
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
def test_del__log_destroy_pending_segfault(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
self.loop.run_until_complete(task)
with self.assertRaises(AttributeError):
del task._log_destroy_pending
@unittest.skipUnless(hasattr(futures, '_CFuture') and
hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
@add_subclass_tests
class CTask_CFuture_SubclassTests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = getattr(futures, '_CFuture', None)
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
@add_subclass_tests
class CTaskSubclass_PyFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = futures._PyFuture
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
@add_subclass_tests
class PyTask_CFutureSubclass_Tests(BaseTaskTests, test_utils.TestCase):
Future = getattr(futures, '_CFuture', None)
Task = tasks._PyTask
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_PyFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = futures._PyFuture
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
class PyTask_CFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = tasks._PyTask
Future = getattr(futures, '_CFuture', None)
class PyTask_PyFuture_Tests(BaseTaskTests, SetMethodsTest,
test_utils.TestCase):
Task = tasks._PyTask
Future = futures._PyFuture
@add_subclass_tests
class PyTask_PyFuture_SubclassTests(BaseTaskTests, test_utils.TestCase):
Task = tasks._PyTask
Future = futures._PyFuture
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_Future_Tests(test_utils.TestCase):
def test_foobar(self):
class Fut(asyncio.Future):
@property
def get_loop(self):
raise AttributeError
async def coro():
await fut
return 'spam'
self.loop = asyncio.new_event_loop()
try:
fut = Fut(loop=self.loop)
self.loop.call_later(0.1, fut.set_result, 1)
task = self.loop.create_task(coro())
res = self.loop.run_until_complete(task)
finally:
self.loop.close()
self.assertEqual(res, 'spam')
class BaseTaskIntrospectionTests:
_register_task = None
_unregister_task = None
_enter_task = None
_leave_task = None
def test__register_task_1(self):
class TaskLike:
@property
def _loop(self):
return loop
def done(self):
return False
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), {task})
self._unregister_task(task)
def test__register_task_2(self):
class TaskLike:
def get_loop(self):
return loop
def done(self):
return False
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), {task})
self._unregister_task(task)
def test__register_task_3(self):
class TaskLike:
def get_loop(self):
return loop
def done(self):
return True
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
self._unregister_task(task)
def test__enter_task(self):
task = mock.Mock()
loop = mock.Mock()
self.assertIsNone(asyncio.current_task(loop))
self._enter_task(loop, task)
self.assertIs(asyncio.current_task(loop), task)
self._leave_task(loop, task)
def test__enter_task_failure(self):
task1 = mock.Mock()
task2 = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task1)
with self.assertRaises(RuntimeError):
self._enter_task(loop, task2)
self.assertIs(asyncio.current_task(loop), task1)
self._leave_task(loop, task1)
def test__leave_task(self):
task = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task)
self._leave_task(loop, task)
self.assertIsNone(asyncio.current_task(loop))
def test__leave_task_failure1(self):
task1 = mock.Mock()
task2 = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task1)
with self.assertRaises(RuntimeError):
self._leave_task(loop, task2)
self.assertIs(asyncio.current_task(loop), task1)
self._leave_task(loop, task1)
def test__leave_task_failure2(self):
task = mock.Mock()
loop = mock.Mock()
with self.assertRaises(RuntimeError):
self._leave_task(loop, task)
self.assertIsNone(asyncio.current_task(loop))
def test__unregister_task(self):
task = mock.Mock()
loop = mock.Mock()
task.get_loop = lambda: loop
self._register_task(task)
self._unregister_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
def test__unregister_task_not_registered(self):
task = mock.Mock()
loop = mock.Mock()
self._unregister_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
class PyIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests):
_register_task = staticmethod(tasks._py_register_task)
_unregister_task = staticmethod(tasks._py_unregister_task)
_enter_task = staticmethod(tasks._py_enter_task)
_leave_task = staticmethod(tasks._py_leave_task)
@unittest.skipUnless(hasattr(tasks, '_c_register_task'),
'requires the C _asyncio module')
class CIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests):
if hasattr(tasks, '_c_register_task'):
_register_task = staticmethod(tasks._c_register_task)
_unregister_task = staticmethod(tasks._c_unregister_task)
_enter_task = staticmethod(tasks._c_enter_task)
_leave_task = staticmethod(tasks._c_leave_task)
else:
_register_task = _unregister_task = _enter_task = _leave_task = None
class BaseCurrentLoopTests:
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def new_task(self, coro):
raise NotImplementedError
def test_current_task_no_running_loop(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
def test_current_task_no_running_loop_implicit(self):
with self.assertRaises(RuntimeError):
asyncio.current_task()
def test_current_task_with_implicit_loop(self):
async def coro():
self.assertIs(asyncio.current_task(loop=self.loop), task)
self.assertIs(asyncio.current_task(None), task)
self.assertIs(asyncio.current_task(), task)
task = self.new_task(coro())
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.current_task(loop=self.loop))
class PyCurrentLoopTests(BaseCurrentLoopTests, test_utils.TestCase):
def new_task(self, coro):
return tasks._PyTask(coro, loop=self.loop)
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CCurrentLoopTests(BaseCurrentLoopTests, test_utils.TestCase):
def new_task(self, coro):
return getattr(tasks, '_CTask')(coro, loop=self.loop)
class GenericTaskTests(test_utils.TestCase):
def test_future_subclass(self):
self.assertTrue(issubclass(asyncio.Task, asyncio.Future))
@support.cpython_only
def test_asyncio_module_compiled(self):
# Because of circular imports it's easy to make _asyncio
# module non-importable. This is a simple test that will
# fail on systems where C modules were successfully compiled
# (hence the test for _functools etc), but _asyncio somehow didn't.
try:
import _functools
import _json
import _pickle
except ImportError:
self.skipTest('C modules are not available')
else:
try:
import _asyncio
except ImportError:
self.fail('_asyncio module is missing')
class GatherTestsBase:
def setUp(self):
super().setUp()
self.one_loop = self.new_test_loop()
self.other_loop = self.new_test_loop()
self.set_event_loop(self.one_loop, cleanup=False)
def _run_loop(self, loop):
while loop._ready:
test_utils.run_briefly(loop)
def _check_success(self, **kwargs):
a, b, c = [self.one_loop.create_future() for i in range(3)]
fut = self._gather(*self.wrap_futures(a, b, c), **kwargs)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
b.set_result(1)
a.set_result(2)
self._run_loop(self.one_loop)
self.assertEqual(cb.called, False)
self.assertFalse(fut.done())
c.set_result(3)
self._run_loop(self.one_loop)
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [2, 1, 3])
def test_success(self):
self._check_success()
self._check_success(return_exceptions=False)
def test_result_exception_success(self):
self._check_success(return_exceptions=True)
def test_one_exception(self):
a, b, c, d, e = [self.one_loop.create_future() for i in range(5)]
fut = self._gather(*self.wrap_futures(a, b, c, d, e))
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
a.set_result(1)
b.set_exception(exc)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertIs(fut.exception(), exc)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_return_exceptions(self):
a, b, c, d = [self.one_loop.create_future() for i in range(4)]
fut = self._gather(*self.wrap_futures(a, b, c, d),
return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
exc2 = RuntimeError()
b.set_result(1)
c.set_exception(exc)
a.set_result(3)
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_exception(exc2)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [3, 1, exc, exc2])
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio.coroutines',
'print(asyncio.coroutines._DEBUG)'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
def wrap_futures(self, *futures):
return futures
def _gather(self, *args, **kwargs):
return asyncio.gather(*args, **kwargs)
def test_constructor_empty_sequence_without_loop(self):
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaises(RuntimeError):
asyncio.gather()
self.assertEqual(cm.warnings[0].filename, __file__)
def test_constructor_empty_sequence_use_running_loop(self):
async def gather():
return asyncio.gather()
fut = self.one_loop.run_until_complete(gather())
self.assertIsInstance(fut, asyncio.Future)
self.assertIs(fut._loop, self.one_loop)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
self.assertEqual(fut.result(), [])
def test_constructor_empty_sequence_use_global_loop(self):
# Deprecated in 3.10
asyncio.set_event_loop(self.one_loop)
self.addCleanup(asyncio.set_event_loop, None)
with self.assertWarns(DeprecationWarning) as cm:
fut = asyncio.gather()
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertIsInstance(fut, asyncio.Future)
self.assertIs(fut._loop, self.one_loop)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
self.assertEqual(fut.result(), [])
def test_constructor_heterogenous_futures(self):
fut1 = self.one_loop.create_future()
fut2 = self.other_loop.create_future()
with self.assertRaises(ValueError):
asyncio.gather(fut1, fut2)
def test_constructor_homogenous_futures(self):
children = [self.other_loop.create_future() for i in range(3)]
fut = asyncio.gather(*children)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
fut = asyncio.gather(*children)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
def test_one_cancellation(self):
a, b, c, d, e = [self.one_loop.create_future() for i in range(5)]
fut = asyncio.gather(a, b, c, d, e)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
b.cancel()
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertFalse(fut.cancelled())
self.assertIsInstance(fut.exception(), asyncio.CancelledError)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_result_exception_one_cancellation(self):
a, b, c, d, e, f = [self.one_loop.create_future()
for i in range(6)]
fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
zde = ZeroDivisionError()
b.set_exception(zde)
c.cancel()
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_result(3)
e.cancel()
rte = RuntimeError()
f.set_exception(rte)
res = self.one_loop.run_until_complete(fut)
self.assertIsInstance(res[2], asyncio.CancelledError)
self.assertIsInstance(res[4], asyncio.CancelledError)
res[2] = res[4] = None
self.assertEqual(res, [1, zde, None, 3, None, rte])
cb.assert_called_once_with(fut)
class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def wrap_futures(self, *futures):
coros = []
for fut in futures:
async def coro(fut=fut):
return await fut
coros.append(coro())
return coros
def _gather(self, *args, **kwargs):
async def coro():
return asyncio.gather(*args, **kwargs)
return self.one_loop.run_until_complete(coro())
def test_constructor_without_loop(self):
async def coro():
return 'abc'
gen1 = coro()
self.addCleanup(gen1.close)
gen2 = coro()
self.addCleanup(gen2.close)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaises(RuntimeError):
asyncio.gather(gen1, gen2)
self.assertEqual(cm.warnings[0].filename, __file__)
def test_constructor_use_running_loop(self):
async def coro():
return 'abc'
gen1 = coro()
gen2 = coro()
async def gather():
return asyncio.gather(gen1, gen2)
fut = self.one_loop.run_until_complete(gather())
self.assertIs(fut._loop, self.one_loop)
self.one_loop.run_until_complete(fut)
def test_constructor_use_global_loop(self):
# Deprecated in 3.10
async def coro():
return 'abc'
asyncio.set_event_loop(self.other_loop)
self.addCleanup(asyncio.set_event_loop, None)
gen1 = coro()
gen2 = coro()
with self.assertWarns(DeprecationWarning) as cm:
fut = asyncio.gather(gen1, gen2)
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertIs(fut._loop, self.other_loop)
self.other_loop.run_until_complete(fut)
def test_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
c = coro('abc')
fut = self._gather(c, c, coro('def'), c)
self._run_loop(self.one_loop)
self.assertEqual(fut.result(), ['abc', 'abc', 'def', 'abc'])
def test_cancellation_broadcast(self):
# Cancelling outer() cancels all children.
proof = 0
waiter = self.one_loop.create_future()
async def inner():
nonlocal proof
await waiter
proof += 1
child1 = asyncio.ensure_future(inner(), loop=self.one_loop)
child2 = asyncio.ensure_future(inner(), loop=self.one_loop)
gatherer = None
async def outer():
nonlocal proof, gatherer
gatherer = asyncio.gather(child1, child2)
await gatherer
proof += 100
f = asyncio.ensure_future(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
self.assertTrue(f.cancel())
with self.assertRaises(asyncio.CancelledError):
self.one_loop.run_until_complete(f)
self.assertFalse(gatherer.cancel())
self.assertTrue(waiter.cancelled())
self.assertTrue(child1.cancelled())
self.assertTrue(child2.cancelled())
test_utils.run_briefly(self.one_loop)
self.assertEqual(proof, 0)
def test_exception_marking(self):
# Test for the first line marked "Mark exception retrieved."
async def inner(f):
await f
raise RuntimeError('should not be ignored')
a = self.one_loop.create_future()
b = self.one_loop.create_future()
async def outer():
await asyncio.gather(inner(a), inner(b))
f = asyncio.ensure_future(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
a.set_result(None)
test_utils.run_briefly(self.one_loop)
b.set_result(None)
test_utils.run_briefly(self.one_loop)
self.assertIsInstance(f.exception(), RuntimeError)
class RunCoroutineThreadsafeTests(test_utils.TestCase):
"""Test case for asyncio.run_coroutine_threadsafe."""
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop) # Will cleanup properly
async def add(self, a, b, fail=False, cancel=False):
"""Wait 0.05 second and return a + b."""
await asyncio.sleep(0.05)
if fail:
raise RuntimeError("Fail!")
if cancel:
asyncio.current_task(self.loop).cancel()
await asyncio.sleep(0)
return a + b
def target(self, fail=False, cancel=False, timeout=None,
advance_coro=False):
"""Run add coroutine in the event loop."""
coro = self.add(1, 2, fail=fail, cancel=cancel)
future = asyncio.run_coroutine_threadsafe(coro, self.loop)
if advance_coro:
# this is for test_run_coroutine_threadsafe_task_factory_exception;
# otherwise it spills errors and breaks **other** unittests, since
# 'target' is interacting with threads.
# With this call, `coro` will be advanced, so that
# CoroWrapper.__del__ won't do anything when asyncio tests run
# in debug mode.
self.loop.call_soon_threadsafe(coro.send, None)
try:
return future.result(timeout)
finally:
future.done() or future.cancel()
def test_run_coroutine_threadsafe(self):
"""Test coroutine submission from a thread to an event loop."""
future = self.loop.run_in_executor(None, self.target)
result = self.loop.run_until_complete(future)
self.assertEqual(result, 3)
def test_run_coroutine_threadsafe_with_exception(self):
"""Test coroutine submission from a thread to an event loop
when an exception is raised."""
future = self.loop.run_in_executor(None, self.target, True)
with self.assertRaises(RuntimeError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Fail!", exc_context.exception.args)
def test_run_coroutine_threadsafe_with_timeout(self):
"""Test coroutine submission from a thread to an event loop
when a timeout is raised."""
callback = lambda: self.target(timeout=0)
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(future)
test_utils.run_briefly(self.loop)
# Check that there's no pending task (add has been cancelled)
for task in asyncio.all_tasks(self.loop):
self.assertTrue(task.done())
def test_run_coroutine_threadsafe_task_cancelled(self):
"""Test coroutine submission from a tread to an event loop
when the task is cancelled."""
callback = lambda: self.target(cancel=True)
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(future)
def test_run_coroutine_threadsafe_task_factory_exception(self):
"""Test coroutine submission from a tread to an event loop
when the task factory raise an exception."""
def task_factory(loop, coro):
raise NameError
run = self.loop.run_in_executor(
None, lambda: self.target(advance_coro=True))
# Set exception handler
callback = test_utils.MockCallback()
self.loop.set_exception_handler(callback)
# Set corrupted task factory
self.addCleanup(self.loop.set_task_factory,
self.loop.get_task_factory())
self.loop.set_task_factory(task_factory)
# Run event loop
with self.assertRaises(NameError) as exc_context:
self.loop.run_until_complete(run)
# Check exceptions
self.assertEqual(len(callback.call_args_list), 1)
(loop, context), kwargs = callback.call_args
self.assertEqual(context['exception'], exc_context.exception)
class SleepTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_sleep_zero(self):
result = 0
def inc_result(num):
nonlocal result
result += num
async def coro():
self.loop.call_soon(inc_result, 1)
self.assertEqual(result, 0)
num = await asyncio.sleep(0, result=10)
self.assertEqual(result, 1) # inc'ed by call_soon
inc_result(num) # num should be 11
self.loop.run_until_complete(coro())
self.assertEqual(result, 11)
class WaitTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_coro_is_deprecated_in_wait(self):
# Remove test when passing coros to asyncio.wait() is removed in 3.11
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(
asyncio.wait([coroutine_function()]))
task = self.loop.create_task(coroutine_function())
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(
asyncio.wait([task, coroutine_function()]))
class CompatibilityTests(test_utils.TestCase):
# Tests for checking a bridge between old-styled coroutines
# and async/await syntax
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_yield_from_awaitable(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
yield from asyncio.sleep(0)
return 'ok'
result = self.loop.run_until_complete(coro())
self.assertEqual('ok', result)
def test_await_old_style_coro(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro1():
return 'ok1'
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro2():
yield from asyncio.sleep(0)
return 'ok2'
async def inner():
return await asyncio.gather(coro1(), coro2())
result = self.loop.run_until_complete(inner())
self.assertEqual(['ok1', 'ok2'], result)
def test_debug_mode_interop(self):
# https://bugs.python.org/issue32636
code = textwrap.dedent("""
import asyncio
async def native_coro():
pass
@asyncio.coroutine
def old_style_coro():
yield from native_coro()
asyncio.run(old_style_coro())
""")
assert_python_ok("-Wignore::DeprecationWarning", "-c", code,
PYTHONASYNCIODEBUG="1")
if __name__ == '__main__':
unittest.main()
| 32.42163 | 105 | 0.586899 |
import collections
import contextlib
import contextvars
import functools
import gc
import io
import random
import re
import sys
import textwrap
import traceback
import types
import unittest
import weakref
from unittest import mock
import asyncio
from asyncio import coroutines
from asyncio import futures
from asyncio import tasks
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
def tearDownModule():
asyncio.set_event_loop_policy(None)
async def coroutine_function():
pass
@contextlib.contextmanager
def set_coroutine_debug(enabled):
coroutines = asyncio.coroutines
old_debug = coroutines._DEBUG
try:
coroutines._DEBUG = enabled
yield
finally:
coroutines._DEBUG = old_debug
def format_coroutine(qualname, state, src, source_traceback, generator=False):
if generator:
state = '%s' % state
else:
state = '%s, defined' % state
if source_traceback is not None:
frame = source_traceback[-1]
return ('coro=<%s() %s at %s> created at %s:%s'
% (qualname, state, src, frame[0], frame[1]))
else:
return 'coro=<%s() %s at %s>' % (qualname, state, src)
def get_innermost_context(exc):
depth = 0
while True:
context = exc.__context__
if context is None:
break
exc = context
depth += 1
return (type(exc), exc.args, depth)
class Dummy:
def __repr__(self):
return '<Dummy>'
def __call__(self, *args):
pass
class CoroLikeObject:
def send(self, v):
raise StopIteration(42)
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
return self
_EPSILON = 0.0001
class BaseTaskTests:
Task = None
Future = None
def new_task(self, loop, coro, name='TestTask'):
return self.__class__.Task(coro, loop=loop, name=name)
def new_future(self, loop):
return self.__class__.Future(loop=loop)
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.loop.set_task_factory(self.new_task)
self.loop.create_future = lambda: self.new_future(self.loop)
def test_task_cancel_message_getter(self):
async def coro():
pass
t = self.new_task(self.loop, coro())
self.assertTrue(hasattr(t, '_cancel_message'))
self.assertEqual(t._cancel_message, None)
t.cancel('my message')
self.assertEqual(t._cancel_message, 'my message')
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
def test_task_cancel_message_setter(self):
async def coro():
pass
t = self.new_task(self.loop, coro())
t.cancel('my message')
t._cancel_message = 'my new message'
self.assertEqual(t._cancel_message, 'my new message')
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
def test_task_del_collect(self):
class Evil:
def __del__(self):
gc.collect()
async def run():
return Evil()
self.loop.run_until_complete(
asyncio.gather(*[
self.new_task(self.loop, run()) for _ in range(100)
]))
def test_other_loop_future(self):
other_loop = asyncio.new_event_loop()
fut = self.new_future(other_loop)
async def run(fut):
await fut
try:
with self.assertRaisesRegex(RuntimeError,
r'Task .* got Future .* attached'):
self.loop.run_until_complete(run(fut))
finally:
other_loop.close()
def test_task_awaits_on_itself(self):
async def test():
await task
task = asyncio.ensure_future(test(), loop=self.loop)
with self.assertRaisesRegex(RuntimeError,
'Task cannot await on itself'):
self.loop.run_until_complete(task)
def test_task_class(self):
async def notmuch():
return 'ok'
t = self.new_task(self.loop, notmuch())
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
self.assertIs(t.get_loop(), self.loop)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
t = self.new_task(loop, notmuch())
self.assertIs(t._loop, loop)
loop.run_until_complete(t)
loop.close()
def test_ensure_future_coroutine(self):
async def notmuch():
return 'ok'
t = asyncio.ensure_future(notmuch(), loop=self.loop)
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
a = notmuch()
self.addCleanup(a.close)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'):
asyncio.ensure_future(a)
self.assertEqual(cm.warnings[0].filename, __file__)
async def test():
return asyncio.ensure_future(notmuch())
t = self.loop.run_until_complete(test())
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
asyncio.set_event_loop(self.loop)
self.addCleanup(asyncio.set_event_loop, None)
with self.assertWarns(DeprecationWarning) as cm:
t = asyncio.ensure_future(notmuch())
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
def test_ensure_future_coroutine_2(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.ensure_future(notmuch(), loop=self.loop)
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
a = notmuch()
self.addCleanup(a.close)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'):
asyncio.ensure_future(a)
self.assertEqual(cm.warnings[0].filename, __file__)
async def test():
return asyncio.ensure_future(notmuch())
t = self.loop.run_until_complete(test())
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
asyncio.set_event_loop(self.loop)
self.addCleanup(asyncio.set_event_loop, None)
with self.assertWarns(DeprecationWarning) as cm:
t = asyncio.ensure_future(notmuch())
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertIs(t._loop, self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
def test_ensure_future_future(self):
f_orig = self.new_future(self.loop)
f_orig.set_result('ko')
f = asyncio.ensure_future(f_orig)
self.loop.run_until_complete(f)
self.assertTrue(f.done())
self.assertEqual(f.result(), 'ko')
self.assertIs(f, f_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
f = asyncio.ensure_future(f_orig, loop=loop)
loop.close()
f = asyncio.ensure_future(f_orig, loop=self.loop)
self.assertIs(f, f_orig)
def test_ensure_future_task(self):
async def notmuch():
return 'ok'
t_orig = self.new_task(self.loop, notmuch())
t = asyncio.ensure_future(t_orig)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t, t_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
t = asyncio.ensure_future(t_orig, loop=loop)
loop.close()
t = asyncio.ensure_future(t_orig, loop=self.loop)
self.assertIs(t, t_orig)
def test_ensure_future_awaitable(self):
class Aw:
def __init__(self, coro):
self.coro = coro
def __await__(self):
return (yield from self.coro)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
return 'ok'
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
fut = asyncio.ensure_future(Aw(coro()), loop=loop)
loop.run_until_complete(fut)
assert fut.result() == 'ok'
def test_ensure_future_neither(self):
with self.assertRaises(TypeError):
asyncio.ensure_future('ok')
def test_ensure_future_error_msg(self):
loop = asyncio.new_event_loop()
f = self.new_future(self.loop)
with self.assertRaisesRegex(ValueError, 'The future belongs to a '
'different loop than the one specified as '
'the loop argument'):
asyncio.ensure_future(f, loop=loop)
loop.close()
def test_get_stack(self):
T = None
async def foo():
await bar()
async def bar():
f = T.get_stack(limit=1)
try:
self.assertEqual(f[0].f_code.co_name, 'foo')
finally:
f = None
file = io.StringIO()
T.print_stack(limit=1, file=file)
file.seek(0)
tb = file.read()
self.assertRegex(tb, r'foo\(\) running')
async def runner():
nonlocal T
T = asyncio.ensure_future(foo(), loop=self.loop)
await T
self.loop.run_until_complete(runner())
def test_task_repr(self):
self.loop.set_debug(False)
async def notmuch():
return 'abc'
self.assertEqual(notmuch.__name__, 'notmuch')
self.assertRegex(notmuch.__qualname__,
r'\w+.test_task_repr.<locals>.notmuch')
self.assertEqual(notmuch.__module__, __name__)
filename, lineno = test_utils.get_function_source(notmuch)
src = "%s:%s" % (filename, lineno)
gen = notmuch()
coro_qualname = 'BaseTaskTests.test_task_repr.<locals>.notmuch'
self.assertEqual(gen.__name__, 'notmuch')
self.assertEqual(gen.__qualname__, coro_qualname)
t = self.new_task(self.loop, gen)
t.add_done_callback(Dummy())
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback, generator=True)
self.assertEqual(repr(t),
"<Task pending name='TestTask' %s cb=[<Dummy>()]>" % coro)
t.cancel()
self.assertEqual(repr(t),
"<Task cancelling name='TestTask' %s cb=[<Dummy>()]>" % coro)
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
"<Task cancelled name='TestTask' %s>" % coro)
t = self.new_task(self.loop, notmuch())
self.loop.run_until_complete(t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
"<Task finished name='TestTask' %s result='abc'>" % coro)
def test_task_repr_autogenerated(self):
async def notmuch():
return 123
t1 = self.new_task(self.loop, notmuch(), None)
t2 = self.new_task(self.loop, notmuch(), None)
self.assertNotEqual(repr(t1), repr(t2))
match1 = re.match(r"^<Task pending name='Task-(\d+)'", repr(t1))
self.assertIsNotNone(match1)
match2 = re.match(r"^<Task pending name='Task-(\d+)'", repr(t2))
self.assertIsNotNone(match2)
self.assertLess(int(match1.group(1)), int(match2.group(1)))
self.loop.run_until_complete(t1)
self.loop.run_until_complete(t2)
def test_task_repr_name_not_str(self):
async def notmuch():
return 123
t = self.new_task(self.loop, notmuch())
t.set_name({6})
self.assertEqual(t.get_name(), '{6}')
self.loop.run_until_complete(t)
def test_task_repr_coro_decorator(self):
self.loop.set_debug(False)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
# @coroutine decorator
return 123
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
self.assertRegex(notmuch.__qualname__,
r'\w+.test_task_repr_coro_decorator'
r'\.<locals>\.notmuch')
self.assertEqual(notmuch.__module__, __name__)
# test coroutine object
gen = notmuch()
# On Python >= 3.5, generators now inherit the name of the
# function, as expected, and have a qualified name (__qualname__
# attribute).
coro_name = 'notmuch'
coro_qualname = ('BaseTaskTests.test_task_repr_coro_decorator'
'.<locals>.notmuch')
self.assertEqual(gen.__name__, coro_name)
self.assertEqual(gen.__qualname__, coro_qualname)
# test repr(CoroWrapper)
if coroutines._DEBUG:
# format the coroutine object
if coroutines._DEBUG:
filename, lineno = test_utils.get_function_source(notmuch)
frame = gen._source_traceback[-1]
coro = ('%s() running, defined at %s:%s, created at %s:%s'
% (coro_qualname, filename, lineno,
frame[0], frame[1]))
else:
code = gen.gi_code
coro = ('%s() running at %s:%s'
% (coro_qualname, code.co_filename,
code.co_firstlineno))
self.assertEqual(repr(gen), '<CoroWrapper %s>' % coro)
# test pending Task
t = self.new_task(self.loop, gen)
t.add_done_callback(Dummy())
# format the coroutine object
if coroutines._DEBUG:
src = '%s:%s' % test_utils.get_function_source(notmuch)
else:
code = gen.gi_code
src = '%s:%s' % (code.co_filename, code.co_firstlineno)
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback,
generator=not coroutines._DEBUG)
self.assertEqual(repr(t),
"<Task pending name='TestTask' %s cb=[<Dummy>()]>" % coro)
self.loop.run_until_complete(t)
def test_task_repr_wait_for(self):
self.loop.set_debug(False)
async def wait_for(fut):
return await fut
fut = self.new_future(self.loop)
task = self.new_task(self.loop, wait_for(fut))
test_utils.run_briefly(self.loop)
self.assertRegex(repr(task),
'<Task .* wait_for=%s>' % re.escape(repr(fut)))
fut.set_result(None)
self.loop.run_until_complete(task)
def test_task_repr_partial_corowrapper(self):
# Issue #222: repr(CoroWrapper) must not fail in debug mode if the
# coroutine is a partial function
with set_coroutine_debug(True):
self.loop.set_debug(True)
async def func(x, y):
await asyncio.sleep(0)
with self.assertWarns(DeprecationWarning):
partial_func = asyncio.coroutine(functools.partial(func, 1))
task = self.loop.create_task(partial_func(2))
# make warnings quiet
task._log_destroy_pending = False
self.addCleanup(task._coro.close)
coro_repr = repr(task._coro)
expected = (
r'<coroutine object \w+\.test_task_repr_partial_corowrapper'
r'\.<locals>\.func at'
)
self.assertRegex(coro_repr, expected)
def test_task_basics(self):
async def outer():
a = await inner1()
b = await inner2()
return a+b
async def inner1():
return 42
async def inner2():
return 1000
t = outer()
self.assertEqual(self.loop.run_until_complete(t), 1042)
def test_exception_chaining_after_await(self):
# Test that when awaiting on a task when an exception is already
# active, if the task raises an exception it will be chained
# with the original.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def raise_error():
raise ValueError
async def run():
try:
raise KeyError(3)
except Exception as exc:
task = self.new_task(loop, raise_error())
try:
await task
except Exception as exc:
self.assertEqual(type(exc), ValueError)
chained = exc.__context__
self.assertEqual((type(chained), chained.args),
(KeyError, (3,)))
try:
task = self.new_task(loop, run())
loop.run_until_complete(task)
finally:
loop.close()
def test_exception_chaining_after_await_with_context_cycle(self):
# Check trying to create an exception context cycle:
# https://bugs.python.org/issue40696
has_cycle = None
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def process_exc(exc):
raise exc
async def run():
nonlocal has_cycle
try:
raise KeyError('a')
except Exception as exc:
task = self.new_task(loop, process_exc(exc))
try:
await task
except BaseException as exc:
has_cycle = (exc is exc.__context__)
# Prevent a hang if has_cycle is True.
exc.__context__ = None
try:
task = self.new_task(loop, run())
loop.run_until_complete(task)
finally:
loop.close()
# This also distinguishes from the initial has_cycle=None.
self.assertEqual(has_cycle, False)
def test_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
async def task():
await asyncio.sleep(10.0)
return 12
t = self.new_task(loop, task())
loop.call_soon(t.cancel)
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_with_message_then_future_result(self):
# Test Future.result() after calling cancel() with a message.
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
await asyncio.sleep(0)
task.cancel(*cancel_args)
done, pending = await asyncio.wait([task])
task.result()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, expected_args, 2))
def test_cancel_with_message_then_future_exception(self):
# Test Future.exception() after calling cancel() with a message.
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
await asyncio.sleep(0)
task.cancel(*cancel_args)
done, pending = await asyncio.wait([task])
task.exception()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, expected_args, 2))
def test_cancel_with_message_before_starting_task(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
# We deliberately leave out the sleep here.
task.cancel('my message')
done, pending = await asyncio.wait([task])
task.exception()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, ('my message',), 2))
def test_cancel_yield(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def task():
yield
yield
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop) # start coro
t.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_inner_future(self):
f = self.new_future(self.loop)
async def task():
await f
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop) # start task
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_both_task_and_inner_future(self):
f = self.new_future(self.loop)
async def task():
await f
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
f.cancel()
t.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_task_catching(self):
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
async def task():
await fut1
try:
await fut2
except asyncio.CancelledError:
return 42
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(t.cancelled())
def test_cancel_task_ignoring(self):
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
fut3 = self.new_future(self.loop)
async def task():
await fut1
try:
await fut2
except asyncio.CancelledError:
pass
res = await fut3
return res
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut3) # White-box test.
fut3.set_result(42)
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(fut3.cancelled())
self.assertFalse(t.cancelled())
def test_cancel_current_task(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately.
await asyncio.sleep(100)
return 12
t = self.new_task(loop, task())
self.assertFalse(t.cancelled())
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_cancel_at_end(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
return 12
t = self.new_task(loop, task())
self.assertFalse(t.cancelled())
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_cancel_awaited_task(self):
# This tests for a relatively rare condition when
# a task cancellation is requested for a task which is not
# currently blocked, such as a task cancelling itself.
# In this situation we must ensure that whatever next future
# or task the cancelled task blocks on is cancelled correctly
# as well. See also bpo-34872.
loop = asyncio.new_event_loop()
self.addCleanup(lambda: loop.close())
task = nested_task = None
fut = self.new_future(loop)
async def nested():
await fut
async def coro():
nonlocal nested_task
# Create a sub-task and wait for it to run.
nested_task = self.new_task(loop, nested())
await asyncio.sleep(0)
# Request the current task to be cancelled.
task.cancel()
# Block on the nested task, which should be immediately
# cancelled.
await nested_task
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(task)
self.assertTrue(task.cancelled())
self.assertTrue(nested_task.cancelled())
self.assertTrue(fut.cancelled())
def assert_text_contains(self, text, substr):
if substr not in text:
raise RuntimeError(f'text {substr!r} not found in:\n>>>{text}<<<')
def test_cancel_traceback_for_future_result(self):
# When calling Future.result() on a cancelled task, check that the
# line of code that was interrupted is included in the traceback.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def nested():
# This will get cancelled immediately.
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, nested())
await asyncio.sleep(0)
task.cancel()
await task # search target
task = self.new_task(loop, coro())
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
tb = traceback.format_exc()
self.assert_text_contains(tb, "await asyncio.sleep(10)")
# The intermediate await should also be included.
self.assert_text_contains(tb, "await task # search target")
else:
self.fail('CancelledError did not occur')
def test_cancel_traceback_for_future_exception(self):
# When calling Future.exception() on a cancelled task, check that the
# line of code that was interrupted is included in the traceback.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def nested():
# This will get cancelled immediately.
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, nested())
await asyncio.sleep(0)
task.cancel()
done, pending = await asyncio.wait([task])
task.exception() # search target
task = self.new_task(loop, coro())
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
tb = traceback.format_exc()
self.assert_text_contains(tb, "await asyncio.sleep(10)")
# The intermediate await should also be included.
self.assert_text_contains(tb,
"task.exception() # search target")
else:
self.fail('CancelledError did not occur')
def test_stop_while_run_in_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
when = yield 0.1
self.assertAlmostEqual(0.3, when)
yield 0.1
loop = self.new_test_loop(gen)
x = 0
async def task():
nonlocal x
while x < 10:
await asyncio.sleep(0.1)
x += 1
if x == 2:
loop.stop()
t = self.new_task(loop, task())
with self.assertRaises(RuntimeError) as cm:
loop.run_until_complete(t)
self.assertEqual(str(cm.exception),
'Event loop stopped before Future completed.')
self.assertFalse(t.done())
self.assertEqual(x, 2)
self.assertAlmostEqual(0.3, loop.time())
t.cancel()
self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t)
def test_log_traceback(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
with self.assertRaisesRegex(ValueError, 'can only be set to False'):
task._log_traceback = True
self.loop.run_until_complete(task)
def test_wait_for_timeout_less_then_0_or_0_future_done(self):
def gen():
when = yield
self.assertAlmostEqual(0, when)
loop = self.new_test_loop(gen)
fut = self.new_future(loop)
fut.set_result('done')
ret = loop.run_until_complete(asyncio.wait_for(fut, 0))
self.assertEqual(ret, 'done')
self.assertTrue(fut.done())
self.assertAlmostEqual(0, loop.time())
def test_wait_for_timeout_less_then_0_or_0_coroutine_do_not_started(self):
def gen():
when = yield
self.assertAlmostEqual(0, when)
loop = self.new_test_loop(gen)
foo_started = False
async def foo():
nonlocal foo_started
foo_started = True
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(foo(), 0))
self.assertAlmostEqual(0, loop.time())
self.assertEqual(foo_started, False)
def test_wait_for_timeout_less_then_0_or_0(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0, when)
for timeout in [0, -1]:
with self.subTest(timeout=timeout):
loop = self.new_test_loop(gen)
foo_running = None
async def foo():
nonlocal foo_running
foo_running = True
try:
await asyncio.sleep(0.2)
finally:
foo_running = False
return 'done'
fut = self.new_task(loop, foo())
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, timeout))
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0, loop.time())
self.assertEqual(foo_running, False)
def test_wait_for(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
when = yield 0.1
loop = self.new_test_loop(gen)
foo_running = None
async def foo():
nonlocal foo_running
foo_running = True
try:
await asyncio.sleep(0.2)
finally:
foo_running = False
return 'done'
fut = self.new_task(loop, foo())
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.1))
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0.1, loop.time())
self.assertEqual(foo_running, False)
def test_wait_for_blocking(self):
loop = self.new_test_loop()
async def coro():
return 'done'
res = loop.run_until_complete(asyncio.wait_for(coro(), timeout=None))
self.assertEqual(res, 'done')
def test_wait_for_race_condition(self):
def gen():
yield 0.1
yield 0.1
yield 0.1
loop = self.new_test_loop(gen)
fut = self.new_future(loop)
task = asyncio.wait_for(fut, timeout=0.2)
loop.call_later(0.1, fut.set_result, "ok")
res = loop.run_until_complete(task)
self.assertEqual(res, "ok")
def test_wait_for_cancellation_race_condition(self):
def gen():
yield 0.1
yield 0.1
yield 0.1
yield 0.1
loop = self.new_test_loop(gen)
fut = self.new_future(loop)
loop.call_later(0.1, fut.set_result, "ok")
task = loop.create_task(asyncio.wait_for(fut, timeout=1))
loop.call_later(0.1, task.cancel)
res = loop.run_until_complete(task)
self.assertEqual(res, "ok")
def test_wait_for_waits_for_task_cancellation(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
task_done = False
async def foo():
async def inner():
nonlocal task_done
try:
await asyncio.sleep(0.2)
except asyncio.CancelledError:
await asyncio.sleep(_EPSILON)
raise
finally:
task_done = True
inner_task = self.new_task(loop, inner())
await asyncio.wait_for(inner_task, timeout=_EPSILON)
with self.assertRaises(asyncio.TimeoutError) as cm:
loop.run_until_complete(foo())
self.assertTrue(task_done)
chained = cm.exception.__context__
self.assertEqual(type(chained), asyncio.CancelledError)
def test_wait_for_waits_for_task_cancellation_w_timeout_0(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
task_done = False
async def foo():
async def inner():
nonlocal task_done
try:
await asyncio.sleep(10)
except asyncio.CancelledError:
await asyncio.sleep(_EPSILON)
raise
finally:
task_done = True
inner_task = self.new_task(loop, inner())
await asyncio.sleep(_EPSILON)
await asyncio.wait_for(inner_task, timeout=0)
with self.assertRaises(asyncio.TimeoutError) as cm:
loop.run_until_complete(foo())
self.assertTrue(task_done)
chained = cm.exception.__context__
self.assertEqual(type(chained), asyncio.CancelledError)
def test_wait_for_reraises_exception_during_cancellation(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
class FooException(Exception):
pass
async def foo():
async def inner():
try:
await asyncio.sleep(0.2)
finally:
raise FooException
inner_task = self.new_task(loop, inner())
await asyncio.wait_for(inner_task, timeout=_EPSILON)
with self.assertRaises(FooException):
loop.run_until_complete(foo())
def test_wait_for_raises_timeout_error_if_returned_during_cancellation(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def foo():
async def inner():
try:
await asyncio.sleep(0.2)
except asyncio.CancelledError:
return 42
inner_task = self.new_task(loop, inner())
await asyncio.wait_for(inner_task, timeout=_EPSILON)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(foo())
def test_wait_for_self_cancellation(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def foo():
async def inner():
try:
await asyncio.sleep(0.3)
except asyncio.CancelledError:
try:
await asyncio.sleep(0.3)
except asyncio.CancelledError:
await asyncio.sleep(0.3)
return 42
inner_task = self.new_task(loop, inner())
wait = asyncio.wait_for(inner_task, timeout=0.1)
# Test that wait_for itself is properly cancellable
# even when the initial task holds up the initial cancellation.
task = self.new_task(loop, wait)
await asyncio.sleep(0.2)
task.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertEqual(await inner_task, 42)
loop.run_until_complete(foo())
def test_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait([b, a])
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
self.assertEqual(res, 42)
def test_wait_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
c = coro('test')
task = self.new_task(
self.loop,
asyncio.wait([c, c, coro('spam')]))
with self.assertWarns(DeprecationWarning):
done, pending = self.loop.run_until_complete(task)
self.assertFalse(pending)
self.assertEqual(set(f.result() for f in done), {'test', 'spam'})
def test_wait_errors(self):
self.assertRaises(
ValueError, self.loop.run_until_complete,
asyncio.wait(set()))
# -1 is an invalid return_when value
sleep_coro = asyncio.sleep(10.0)
wait_coro = asyncio.wait([sleep_coro], return_when=-1)
self.assertRaises(ValueError,
self.loop.run_until_complete, wait_coro)
sleep_coro.close()
def test_wait_first_completed(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(10.0))
b = self.new_task(loop, asyncio.sleep(0.1))
task = self.new_task(
loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED))
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertFalse(a.done())
self.assertTrue(b.done())
self.assertIsNone(b.result())
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_really_done(self):
# there is possibility that some tasks in the pending list
# became done but their callbacks haven't all been called yet
async def coro1():
await asyncio.sleep(0)
async def coro2():
await asyncio.sleep(0)
await asyncio.sleep(0)
a = self.new_task(self.loop, coro1())
b = self.new_task(self.loop, coro2())
task = self.new_task(
self.loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED))
done, pending = self.loop.run_until_complete(task)
self.assertEqual({a, b}, done)
self.assertTrue(a.done())
self.assertIsNone(a.result())
self.assertTrue(b.done())
self.assertIsNone(b.result())
def test_wait_first_exception(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(10.0))
async def exc():
raise ZeroDivisionError('err')
b = self.new_task(loop, exc())
task = self.new_task(
loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION))
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0, loop.time())
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_first_exception_in_wait(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(10.0))
async def exc():
await asyncio.sleep(0.01)
raise ZeroDivisionError('err')
b = self.new_task(loop, exc())
task = asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0.01, loop.time())
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_with_exception(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
async def sleeper():
await asyncio.sleep(0.15)
raise ZeroDivisionError('really')
b = self.new_task(loop, sleeper())
async def foo():
done, pending = await asyncio.wait([b, a])
self.assertEqual(len(done), 2)
self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None)
self.assertEqual(len(errors), 1)
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
def test_wait_with_timeout(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.11, when)
yield 0.11
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait([b, a], timeout=0.11)
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.11, loop.time())
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_concurrent_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
done, pending = loop.run_until_complete(
asyncio.wait([b, a], timeout=0.1))
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
self.assertAlmostEqual(0.1, loop.time())
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_with_iterator_of_tasks(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait(iter([b, a]))
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed(self):
def gen():
yield 0
yield 0
yield 0.01
yield 0
loop = self.new_test_loop(gen)
loop.slow_callback_duration = 1.0
completed = set()
time_shifted = False
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def sleeper(dt, x):
nonlocal time_shifted
yield from asyncio.sleep(dt)
completed.add(x)
if not time_shifted and 'a' in completed and 'b' in completed:
time_shifted = True
loop.advance_time(0.14)
return x
a = sleeper(0.01, 'a')
b = sleeper(0.01, 'b')
c = sleeper(0.15, 'c')
async def foo():
values = []
for f in asyncio.as_completed([b, c, a]):
values.append(await f)
return values
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
self.assertTrue('a' in res[:2])
self.assertTrue('b' in res[:2])
self.assertEqual(res[2], 'c')
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed_with_timeout(self):
def gen():
yield
yield 0
yield 0
yield 0.1
loop = self.new_test_loop(gen)
a = loop.create_task(asyncio.sleep(0.1, 'a'))
b = loop.create_task(asyncio.sleep(0.15, 'b'))
async def foo():
values = []
for f in asyncio.as_completed([a, b], timeout=0.12):
if values:
loop.advance_time(0.02)
try:
v = await f
values.append((1, v))
except asyncio.TimeoutError as exc:
values.append((2, exc))
return values
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(len(res), 2, res)
self.assertEqual(res[0], (1, 'a'))
self.assertEqual(res[1][0], 2)
self.assertIsInstance(res[1][1], asyncio.TimeoutError)
self.assertAlmostEqual(0.12, loop.time())
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_as_completed_with_unused_timeout(self):
def gen():
yield
yield 0
yield 0.01
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.01, 'a')
async def foo():
for f in asyncio.as_completed([a], timeout=1):
v = await f
self.assertEqual(v, 'a')
loop.run_until_complete(self.new_task(loop, foo()))
def test_as_completed_reverse_wait(self):
def gen():
yield 0
yield 0.05
yield 0
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.05, 'a')
b = asyncio.sleep(0.10, 'b')
fs = {a, b}
async def test():
futs = list(asyncio.as_completed(fs))
self.assertEqual(len(futs), 2)
x = await futs[1]
self.assertEqual(x, 'a')
self.assertAlmostEqual(0.05, loop.time())
loop.advance_time(0.05)
y = await futs[0]
self.assertEqual(y, 'b')
self.assertAlmostEqual(0.10, loop.time())
loop.run_until_complete(test())
def test_as_completed_concurrent(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0
self.assertAlmostEqual(0.05, when)
yield 0.05
a = asyncio.sleep(0.05, 'a')
b = asyncio.sleep(0.05, 'b')
fs = {a, b}
async def test():
futs = list(asyncio.as_completed(fs))
self.assertEqual(len(futs), 2)
waiter = asyncio.wait(futs)
with self.assertWarns(DeprecationWarning) as cm:
done, pending = await waiter
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertEqual(set(f.result() for f in done), {'a', 'b'})
loop = self.new_test_loop(gen)
loop.run_until_complete(test())
def test_as_completed_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def runner():
result = []
c = coro('ham')
for f in asyncio.as_completed([c, c, coro('spam')]):
result.append((yield from f))
return result
fut = self.new_task(self.loop, runner())
self.loop.run_until_complete(fut)
result = fut.result()
self.assertEqual(set(result), {'ham', 'spam'})
self.assertEqual(len(result), 2)
def test_as_completed_coroutine_without_loop(self):
async def coro():
return 42
a = coro()
self.addCleanup(a.close)
futs = asyncio.as_completed([a])
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'):
list(futs)
self.assertEqual(cm.warnings[0].filename, __file__)
def test_as_completed_coroutine_use_running_loop(self):
loop = self.new_test_loop()
async def coro():
return 42
async def test():
futs = list(asyncio.as_completed([coro()]))
self.assertEqual(len(futs), 1)
self.assertEqual(await futs[0], 42)
loop.run_until_complete(test())
def test_as_completed_coroutine_use_global_loop(self):
async def coro():
return 42
loop = self.new_test_loop()
asyncio.set_event_loop(loop)
self.addCleanup(asyncio.set_event_loop, None)
futs = asyncio.as_completed([coro()])
with self.assertWarns(DeprecationWarning) as cm:
futs = list(futs)
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertEqual(len(futs), 1)
self.assertEqual(loop.run_until_complete(futs[0]), 42)
def test_sleep(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0.05
self.assertAlmostEqual(0.1, when)
yield 0.05
loop = self.new_test_loop(gen)
async def sleeper(dt, arg):
await asyncio.sleep(dt/2)
res = await asyncio.sleep(dt/2, arg)
return res
t = self.new_task(loop, sleeper(0.1, 'yeah'))
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'yeah')
self.assertAlmostEqual(0.1, loop.time())
def test_sleep_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
t = self.new_task(loop, asyncio.sleep(10.0, 'yeah'))
handle = None
orig_call_later = loop.call_later
def call_later(delay, callback, *args):
nonlocal handle
handle = orig_call_later(delay, callback, *args)
return handle
loop.call_later = call_later
test_utils.run_briefly(loop)
self.assertFalse(handle._cancelled)
t.cancel()
test_utils.run_briefly(loop)
self.assertTrue(handle._cancelled)
def test_task_cancel_sleeping_task(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(5000, when)
yield 0.1
loop = self.new_test_loop(gen)
async def sleep(dt):
await asyncio.sleep(dt)
async def doit():
sleeper = self.new_task(loop, sleep(5000))
loop.call_later(0.1, sleeper.cancel)
try:
await sleeper
except asyncio.CancelledError:
return 'cancelled'
else:
return 'slept in'
doer = doit()
self.assertEqual(loop.run_until_complete(doer), 'cancelled')
self.assertAlmostEqual(0.1, loop.time())
def test_task_cancel_waiter_future(self):
fut = self.new_future(self.loop)
async def coro():
await fut
task = self.new_task(self.loop, coro())
test_utils.run_briefly(self.loop)
self.assertIs(task._fut_waiter, fut)
task.cancel()
test_utils.run_briefly(self.loop)
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, task)
self.assertIsNone(task._fut_waiter)
self.assertTrue(fut.cancelled())
def test_task_set_methods(self):
async def notmuch():
return 'ko'
gen = notmuch()
task = self.new_task(self.loop, gen)
with self.assertRaisesRegex(RuntimeError, 'not support set_result'):
task.set_result('ok')
with self.assertRaisesRegex(RuntimeError, 'not support set_exception'):
task.set_exception(ValueError())
self.assertEqual(
self.loop.run_until_complete(task),
'ko')
def test_step_result(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
yield None
yield 1
return 'ko'
self.assertRaises(
RuntimeError, self.loop.run_until_complete, notmuch())
def test_step_result_future(self):
class Fut(asyncio.Future):
def __init__(self, *args, **kwds):
self.cb_added = False
super().__init__(*args, **kwds)
def add_done_callback(self, *args, **kwargs):
self.cb_added = True
super().add_done_callback(*args, **kwargs)
fut = Fut(loop=self.loop)
result = None
async def wait_for_future():
nonlocal result
result = await fut
t = self.new_task(self.loop, wait_for_future())
test_utils.run_briefly(self.loop)
self.assertTrue(fut.cb_added)
res = object()
fut.set_result(res)
test_utils.run_briefly(self.loop)
self.assertIs(res, result)
self.assertTrue(t.done())
self.assertIsNone(t.result())
def test_baseexception_during_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
async def sleeper():
await asyncio.sleep(10)
base_exc = SystemExit()
async def notmutch():
try:
await sleeper()
except asyncio.CancelledError:
raise base_exc
task = self.new_task(loop, notmutch())
test_utils.run_briefly(loop)
task.cancel()
self.assertFalse(task.done())
self.assertRaises(SystemExit, test_utils.run_briefly, loop)
self.assertTrue(task.done())
self.assertFalse(task.cancelled())
self.assertIs(task.exception(), base_exc)
def test_iscoroutinefunction(self):
def fn():
pass
self.assertFalse(asyncio.iscoroutinefunction(fn))
def fn1():
yield
self.assertFalse(asyncio.iscoroutinefunction(fn1))
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def fn2():
yield
self.assertTrue(asyncio.iscoroutinefunction(fn2))
self.assertFalse(asyncio.iscoroutinefunction(mock.Mock()))
def test_yield_vs_yield_from(self):
fut = self.new_future(self.loop)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def wait_for_future():
yield fut
task = wait_for_future()
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(task)
self.assertFalse(fut.done())
def test_yield_vs_yield_from_generator(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
yield
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def wait_for_future():
gen = coro()
try:
yield gen
finally:
gen.close()
task = wait_for_future()
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, task)
def test_coroutine_non_gen_function(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def func():
return 'test'
self.assertTrue(asyncio.iscoroutinefunction(func))
coro = func()
self.assertTrue(asyncio.iscoroutine(coro))
res = self.loop.run_until_complete(coro)
self.assertEqual(res, 'test')
def test_coroutine_non_gen_function_return_future(self):
fut = self.new_future(self.loop)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def func():
return fut
async def coro():
fut.set_result('test')
t1 = self.new_task(self.loop, func())
t2 = self.new_task(self.loop, coro())
res = self.loop.run_until_complete(t1)
self.assertEqual(res, 'test')
self.assertIsNone(t2.result())
def test_current_task(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
async def coro(loop):
self.assertIs(asyncio.current_task(), task)
self.assertIs(asyncio.current_task(None), task)
self.assertIs(asyncio.current_task(), task)
task = self.new_task(self.loop, coro(self.loop))
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.current_task(loop=self.loop))
def test_current_task_with_interleaving_tasks(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
async def coro1(loop):
self.assertTrue(asyncio.current_task() is task1)
await fut1
self.assertTrue(asyncio.current_task() is task1)
fut2.set_result(True)
async def coro2(loop):
self.assertTrue(asyncio.current_task() is task2)
fut1.set_result(True)
await fut2
self.assertTrue(asyncio.current_task() is task2)
task1 = self.new_task(self.loop, coro1(self.loop))
task2 = self.new_task(self.loop, coro2(self.loop))
self.loop.run_until_complete(asyncio.wait((task1, task2)))
self.assertIsNone(asyncio.current_task(loop=self.loop))
def test_yield_future_passes_cancel(self):
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
try:
await waiter
except asyncio.CancelledError:
proof += 1
raise
else:
self.fail('got past sleep() in inner()')
async def outer():
nonlocal proof
try:
await inner()
except asyncio.CancelledError:
proof += 100
else:
proof += 10
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.loop.run_until_complete(f)
self.assertEqual(proof, 101)
self.assertTrue(waiter.cancelled())
def test_yield_wait_does_not_shield_cancel(self):
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
await waiter
proof += 1
async def outer():
nonlocal proof
with self.assertWarns(DeprecationWarning):
d, p = await asyncio.wait([inner()])
proof += 100
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_result(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
inner.set_result(42)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_shield_exception(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
exc = RuntimeError('expected')
inner.set_exception(exc)
test_utils.run_briefly(self.loop)
self.assertIs(outer.exception(), exc)
def test_shield_cancel_inner(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
inner.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
def test_shield_cancel_outer(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
self.assertEqual(0, 0 if outer._callbacks is None else len(outer._callbacks))
def test_shield_shortcut(self):
fut = self.new_future(self.loop)
fut.set_result(42)
res = self.loop.run_until_complete(asyncio.shield(fut))
self.assertEqual(res, 42)
def test_shield_effect(self):
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
await waiter
proof += 1
async def outer():
nonlocal proof
await asyncio.shield(inner())
proof += 100
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_gather(self):
child1 = self.new_future(self.loop)
child2 = self.new_future(self.loop)
parent = asyncio.gather(child1, child2)
outer = asyncio.shield(parent)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
self.assertEqual(parent.result(), [1, 2])
def test_gather_shield(self):
child1 = self.new_future(self.loop)
child2 = self.new_future(self.loop)
inner1 = asyncio.shield(child1)
inner2 = asyncio.shield(child2)
parent = asyncio.gather(inner1, inner2)
test_utils.run_briefly(self.loop)
parent.cancel()
test_utils.run_briefly(self.loop)
self.assertIsInstance(parent.exception(), asyncio.CancelledError)
self.assertTrue(inner1.cancelled())
self.assertTrue(inner2.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
def test_shield_coroutine_without_loop(self):
async def coro():
return 42
inner = coro()
self.addCleanup(inner.close)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'):
asyncio.shield(inner)
self.assertEqual(cm.warnings[0].filename, __file__)
def test_shield_coroutine_use_running_loop(self):
async def coro():
return 42
async def test():
return asyncio.shield(coro())
outer = self.loop.run_until_complete(test())
self.assertEqual(outer._loop, self.loop)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_shield_coroutine_use_global_loop(self):
async def coro():
return 42
asyncio.set_event_loop(self.loop)
self.addCleanup(asyncio.set_event_loop, None)
with self.assertWarns(DeprecationWarning) as cm:
outer = asyncio.shield(coro())
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertEqual(outer._loop, self.loop)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_as_completed_invalid_args(self):
fut = self.new_future(self.loop)
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(fut))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(coro))
coro.close()
def test_wait_invalid_args(self):
fut = self.new_future(self.loop)
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(fut))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(coro))
coro.close()
self.assertRaises(ValueError, self.loop.run_until_complete,
asyncio.wait([]))
def test_corowrapper_mocks_generator(self):
def check():
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
self.assertTrue(gen.gi_running)
yield from fut
fut = self.new_future(self.loop)
fut.set_result(None)
gen = coro()
self.assertTrue(asyncio.iscoroutine(gen))
self.assertIsInstance(gen.gi_frame, types.FrameType)
self.assertFalse(gen.gi_running)
self.assertIsInstance(gen.gi_code, types.CodeType)
self.loop.run_until_complete(gen)
self.assertIsNone(gen.gi_frame)
with set_coroutine_debug(False):
check()
with set_coroutine_debug(True):
check()
def test_yield_from_corowrapper(self):
with set_coroutine_debug(True):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t1():
return (yield from t2())
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t2():
f = self.new_future(self.loop)
self.new_task(self.loop, t3(f))
return (yield from f)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t3(f):
f.set_result((1, 2, 3))
task = self.new_task(self.loop, t1())
val = self.loop.run_until_complete(task)
self.assertEqual(val, (1, 2, 3))
def test_yield_from_corowrapper_send(self):
def foo():
a = yield
return a
def call(arg):
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
try:
cw.send(arg)
except StopIteration as ex:
return ex.args[0]
else:
raise AssertionError('StopIteration was expected')
self.assertEqual(call((1, 2)), (1, 2))
self.assertEqual(call('spam'), 'spam')
def test_corowrapper_weakref(self):
wd = weakref.WeakValueDictionary()
def foo(): yield from []
cw = asyncio.coroutines.CoroWrapper(foo())
wd['cw'] = cw
cw.gen = None
def test_corowrapper_throw(self):
def foo():
value = None
while True:
try:
value = yield value
except Exception as e:
value = e
exception = Exception("foo")
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
self.assertIs(exception, cw.throw(exception))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
self.assertIs(exception, cw.throw(Exception, exception))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
exception = cw.throw(Exception, "foo")
self.assertIsInstance(exception, Exception)
self.assertEqual(exception.args, ("foo", ))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
exception = cw.throw(Exception, "foo", None)
self.assertIsInstance(exception, Exception)
self.assertEqual(exception.args, ("foo", ))
def test_log_destroyed_pending_task(self):
Task = self.__class__.Task
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def kill_me(loop):
future = self.new_future(loop)
yield from future
raise Exception("code never reached")
mock_handler = mock.Mock()
self.loop.set_debug(True)
self.loop.set_exception_handler(mock_handler)
coro = kill_me(self.loop)
task = asyncio.ensure_future(coro, loop=self.loop)
self.assertEqual(asyncio.all_tasks(loop=self.loop), {task})
asyncio.set_event_loop(None)
self.loop._run_once()
self.assertEqual(len(self.loop._ready), 0)
del coro.gi_frame.f_locals['future']
coro = None
source_traceback = task._source_traceback
task = None
support.gc_collect()
self.assertEqual(asyncio.all_tasks(loop=self.loop), set())
mock_handler.assert_called_with(self.loop, {
'message': 'Task was destroyed but it is pending!',
'task': mock.ANY,
'source_traceback': source_traceback,
})
mock_handler.reset_mock()
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_not_called_after_cancel(self, m_log):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def coro():
raise TypeError
async def runner():
task = self.new_task(loop, coro())
await asyncio.sleep(0.05)
task.cancel()
task = None
loop.run_until_complete(runner())
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.coroutines.logger')
def test_coroutine_never_yielded(self, m_log):
with set_coroutine_debug(True):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro_noop():
pass
tb_filename = __file__
tb_lineno = sys._getframe().f_lineno + 2
coro_noop()
support.gc_collect()
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
func_filename, func_lineno = test_utils.get_function_source(coro_noop)
regex = (r'^<CoroWrapper %s\(?\)? .* at %s:%s, .*> '
r'was never yielded from\n'
r'Coroutine object created at \(most recent call last, truncated to \d+ last lines\):\n'
r'.*\n'
r' File "%s", line %s, in test_coroutine_never_yielded\n'
r' coro_noop\(\)$'
% (re.escape(coro_noop.__qualname__),
re.escape(func_filename), func_lineno,
re.escape(tb_filename), tb_lineno))
self.assertRegex(message, re.compile(regex, re.DOTALL))
def test_return_coroutine_from_coroutine(self):
def check():
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def outer_coro():
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def inner_coro():
return 1
return inner_coro()
result = self.loop.run_until_complete(outer_coro())
self.assertEqual(result, 1)
# Test with debug flag cleared.
with set_coroutine_debug(False):
check()
# Test with debug flag set.
with set_coroutine_debug(True):
check()
def test_task_source_traceback(self):
self.loop.set_debug(True)
task = self.new_task(self.loop, coroutine_function())
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(task._source_traceback, list)
self.assertEqual(task._source_traceback[-2][:3],
(__file__,
lineno,
'test_task_source_traceback'))
self.loop.run_until_complete(task)
def _test_cancel_wait_for(self, timeout):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def blocking_coroutine():
fut = self.new_future(loop)
# Block: fut result is never set
await fut
task = loop.create_task(blocking_coroutine())
wait = loop.create_task(asyncio.wait_for(task, timeout))
loop.call_soon(wait.cancel)
self.assertRaises(asyncio.CancelledError,
loop.run_until_complete, wait)
# Python issue #23219: cancelling the wait must also cancel the task
self.assertTrue(task.cancelled())
def test_cancel_blocking_wait_for(self):
self._test_cancel_wait_for(None)
def test_cancel_wait_for(self):
self._test_cancel_wait_for(60.0)
def test_cancel_gather_1(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
fut = self.new_future(loop)
async def create():
# The indirection fut->child_coro is needed since otherwise the
# gathering task is done at the same time as the child future
def child_coro():
return (yield from fut)
gather_future = asyncio.gather(child_coro())
return asyncio.ensure_future(gather_future)
gather_task = loop.run_until_complete(create())
cancel_result = None
def cancelling_callback(_):
nonlocal cancel_result
cancel_result = gather_task.cancel()
fut.add_done_callback(cancelling_callback)
fut.set_result(42) # calls the cancelling_callback after fut is done()
# At this point the task should complete.
loop.run_until_complete(gather_task)
# Python issue #26923: asyncio.gather drops cancellation
self.assertEqual(cancel_result, False)
self.assertFalse(gather_task.cancelled())
self.assertEqual(gather_task.result(), [42])
def test_cancel_gather_2(self):
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def test():
time = 0
while True:
time += 0.05
await asyncio.gather(asyncio.sleep(0.05),
return_exceptions=True)
if time > 1:
return
async def main():
qwe = self.new_task(loop, test())
await asyncio.sleep(0.2)
qwe.cancel(*cancel_args)
await qwe
try:
loop.run_until_complete(main())
except asyncio.CancelledError as exc:
self.assertEqual(exc.args, ())
exc_type, exc_args, depth = get_innermost_context(exc)
self.assertEqual((exc_type, exc_args),
(asyncio.CancelledError, expected_args))
# The exact traceback seems to vary in CI.
self.assertIn(depth, (2, 3))
else:
self.fail('gather did not propagate the cancellation '
'request')
def test_exception_traceback(self):
# See http://bugs.python.org/issue28843
async def foo():
1 / 0
async def main():
task = self.new_task(self.loop, foo())
await asyncio.sleep(0) # skip one loop iteration
self.assertIsNotNone(task.exception().__traceback__)
self.loop.run_until_complete(main())
@mock.patch('asyncio.base_events.logger')
def test_error_in_call_soon(self, m_log):
def call_soon(callback, *args, **kwargs):
raise ValueError
self.loop.call_soon = call_soon
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
pass
self.assertFalse(m_log.error.called)
with self.assertRaises(ValueError):
gen = coro()
try:
self.new_task(self.loop, gen)
finally:
gen.close()
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
self.assertIn('Task was destroyed but it is pending', message)
self.assertEqual(asyncio.all_tasks(self.loop), set())
def test_create_task_with_noncoroutine(self):
with self.assertRaisesRegex(TypeError,
"a coroutine was expected, got 123"):
self.new_task(self.loop, 123)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
with self.assertRaisesRegex(TypeError,
"a coroutine was expected, got 123"):
self.new_task(self.loop, 123)
def test_create_task_with_oldstyle_coroutine(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
pass
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
def test_create_task_with_async_function(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
def test_create_task_with_asynclike_function(self):
task = self.new_task(self.loop, CoroLikeObject())
self.assertIsInstance(task, self.Task)
self.assertEqual(self.loop.run_until_complete(task), 42)
task = self.new_task(self.loop, CoroLikeObject())
self.assertIsInstance(task, self.Task)
self.assertEqual(self.loop.run_until_complete(task), 42)
def test_bare_create_task(self):
async def inner():
return 1
async def coro():
task = asyncio.create_task(inner())
self.assertIsInstance(task, self.Task)
ret = await task
self.assertEqual(1, ret)
self.loop.run_until_complete(coro())
def test_bare_create_named_task(self):
async def coro_noop():
pass
async def coro():
task = asyncio.create_task(coro_noop(), name='No-op')
self.assertEqual(task.get_name(), 'No-op')
await task
self.loop.run_until_complete(coro())
def test_context_1(self):
cvar = contextvars.ContextVar('cvar', default='nope')
async def sub():
await asyncio.sleep(0.01)
self.assertEqual(cvar.get(), 'nope')
cvar.set('something else')
async def main():
self.assertEqual(cvar.get(), 'nope')
subtask = self.new_task(loop, sub())
cvar.set('yes')
self.assertEqual(cvar.get(), 'yes')
await subtask
self.assertEqual(cvar.get(), 'yes')
loop = asyncio.new_event_loop()
try:
task = self.new_task(loop, main())
loop.run_until_complete(task)
finally:
loop.close()
def test_context_2(self):
cvar = contextvars.ContextVar('cvar', default='nope')
async def main():
def fut_on_done(fut):
# This change must not pollute the context
# of the "main()" task.
cvar.set('something else')
self.assertEqual(cvar.get(), 'nope')
for j in range(2):
fut = self.new_future(loop)
fut.add_done_callback(fut_on_done)
cvar.set(f'yes{j}')
loop.call_soon(fut.set_result, None)
await fut
self.assertEqual(cvar.get(), f'yes{j}')
for i in range(3):
# Test that task passed its context to add_done_callback:
cvar.set(f'yes{i}-{j}')
await asyncio.sleep(0.001)
self.assertEqual(cvar.get(), f'yes{i}-{j}')
loop = asyncio.new_event_loop()
try:
task = self.new_task(loop, main())
loop.run_until_complete(task)
finally:
loop.close()
self.assertEqual(cvar.get(), 'nope')
def test_context_3(self):
# Run 100 Tasks in parallel, each modifying cvar.
cvar = contextvars.ContextVar('cvar', default=-1)
async def sub(num):
for i in range(10):
cvar.set(num + i)
await asyncio.sleep(random.uniform(0.001, 0.05))
self.assertEqual(cvar.get(), num + i)
async def main():
tasks = []
for i in range(100):
task = loop.create_task(sub(random.randint(0, 10)))
tasks.append(task)
await asyncio.gather(*tasks)
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
self.assertEqual(cvar.get(), -1)
def test_get_coro(self):
loop = asyncio.new_event_loop()
coro = coroutine_function()
try:
task = self.new_task(loop, coro)
loop.run_until_complete(task)
self.assertIs(task.get_coro(), coro)
finally:
loop.close()
def add_subclass_tests(cls):
BaseTask = cls.Task
BaseFuture = cls.Future
if BaseTask is None or BaseFuture is None:
return cls
class CommonFuture:
def __init__(self, *args, **kwargs):
self.calls = collections.defaultdict(lambda: 0)
super().__init__(*args, **kwargs)
def add_done_callback(self, *args, **kwargs):
self.calls['add_done_callback'] += 1
return super().add_done_callback(*args, **kwargs)
class Task(CommonFuture, BaseTask):
pass
class Future(CommonFuture, BaseFuture):
pass
def test_subclasses_ctask_cfuture(self):
fut = self.Future(loop=self.loop)
async def func():
self.loop.call_soon(lambda: fut.set_result('spam'))
return await fut
task = self.Task(func(), loop=self.loop)
result = self.loop.run_until_complete(task)
self.assertEqual(result, 'spam')
self.assertEqual(
dict(task.calls),
{'add_done_callback': 1})
self.assertEqual(
dict(fut.calls),
{'add_done_callback': 1})
# Add patched Task & Future back to the test case
cls.Task = Task
cls.Future = Future
# Add an extra unit-test
cls.test_subclasses_ctask_cfuture = test_subclasses_ctask_cfuture
# Disable the "test_task_source_traceback" test
# (the test is hardcoded for a particular call stack, which
# is slightly different for Task subclasses)
cls.test_task_source_traceback = None
return cls
class SetMethodsTest:
def test_set_result_causes_invalid_state(self):
Future = type(self).Future
self.loop.call_exception_handler = exc_handler = mock.Mock()
async def foo():
await asyncio.sleep(0.1)
return 10
coro = foo()
task = self.new_task(self.loop, coro)
Future.set_result(task, 'spam')
self.assertEqual(
self.loop.run_until_complete(task),
'spam')
exc_handler.assert_called_once()
exc = exc_handler.call_args[0][0]['exception']
with self.assertRaisesRegex(asyncio.InvalidStateError,
r'step\(\): already done'):
raise exc
coro.close()
def test_set_exception_causes_invalid_state(self):
class MyExc(Exception):
pass
Future = type(self).Future
self.loop.call_exception_handler = exc_handler = mock.Mock()
async def foo():
await asyncio.sleep(0.1)
return 10
coro = foo()
task = self.new_task(self.loop, coro)
Future.set_exception(task, MyExc())
with self.assertRaises(MyExc):
self.loop.run_until_complete(task)
exc_handler.assert_called_once()
exc = exc_handler.call_args[0][0]['exception']
with self.assertRaisesRegex(asyncio.InvalidStateError,
r'step\(\): already done'):
raise exc
coro.close()
@unittest.skipUnless(hasattr(futures, '_CFuture') and
hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_CFuture_Tests(BaseTaskTests, SetMethodsTest,
test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = getattr(futures, '_CFuture', None)
@support.refcount_test
def test_refleaks_in_task___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
async def coro():
pass
task = self.new_task(self.loop, coro())
self.loop.run_until_complete(task)
refs_before = gettotalrefcount()
for i in range(100):
task.__init__(coro(), loop=self.loop)
self.loop.run_until_complete(task)
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
def test_del__log_destroy_pending_segfault(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
self.loop.run_until_complete(task)
with self.assertRaises(AttributeError):
del task._log_destroy_pending
@unittest.skipUnless(hasattr(futures, '_CFuture') and
hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
@add_subclass_tests
class CTask_CFuture_SubclassTests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = getattr(futures, '_CFuture', None)
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
@add_subclass_tests
class CTaskSubclass_PyFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = futures._PyFuture
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
@add_subclass_tests
class PyTask_CFutureSubclass_Tests(BaseTaskTests, test_utils.TestCase):
Future = getattr(futures, '_CFuture', None)
Task = tasks._PyTask
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_PyFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = futures._PyFuture
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
class PyTask_CFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = tasks._PyTask
Future = getattr(futures, '_CFuture', None)
class PyTask_PyFuture_Tests(BaseTaskTests, SetMethodsTest,
test_utils.TestCase):
Task = tasks._PyTask
Future = futures._PyFuture
@add_subclass_tests
class PyTask_PyFuture_SubclassTests(BaseTaskTests, test_utils.TestCase):
Task = tasks._PyTask
Future = futures._PyFuture
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_Future_Tests(test_utils.TestCase):
def test_foobar(self):
class Fut(asyncio.Future):
@property
def get_loop(self):
raise AttributeError
async def coro():
await fut
return 'spam'
self.loop = asyncio.new_event_loop()
try:
fut = Fut(loop=self.loop)
self.loop.call_later(0.1, fut.set_result, 1)
task = self.loop.create_task(coro())
res = self.loop.run_until_complete(task)
finally:
self.loop.close()
self.assertEqual(res, 'spam')
class BaseTaskIntrospectionTests:
_register_task = None
_unregister_task = None
_enter_task = None
_leave_task = None
def test__register_task_1(self):
class TaskLike:
@property
def _loop(self):
return loop
def done(self):
return False
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), {task})
self._unregister_task(task)
def test__register_task_2(self):
class TaskLike:
def get_loop(self):
return loop
def done(self):
return False
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), {task})
self._unregister_task(task)
def test__register_task_3(self):
class TaskLike:
def get_loop(self):
return loop
def done(self):
return True
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
self._unregister_task(task)
def test__enter_task(self):
task = mock.Mock()
loop = mock.Mock()
self.assertIsNone(asyncio.current_task(loop))
self._enter_task(loop, task)
self.assertIs(asyncio.current_task(loop), task)
self._leave_task(loop, task)
def test__enter_task_failure(self):
task1 = mock.Mock()
task2 = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task1)
with self.assertRaises(RuntimeError):
self._enter_task(loop, task2)
self.assertIs(asyncio.current_task(loop), task1)
self._leave_task(loop, task1)
def test__leave_task(self):
task = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task)
self._leave_task(loop, task)
self.assertIsNone(asyncio.current_task(loop))
def test__leave_task_failure1(self):
task1 = mock.Mock()
task2 = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task1)
with self.assertRaises(RuntimeError):
self._leave_task(loop, task2)
self.assertIs(asyncio.current_task(loop), task1)
self._leave_task(loop, task1)
def test__leave_task_failure2(self):
task = mock.Mock()
loop = mock.Mock()
with self.assertRaises(RuntimeError):
self._leave_task(loop, task)
self.assertIsNone(asyncio.current_task(loop))
def test__unregister_task(self):
task = mock.Mock()
loop = mock.Mock()
task.get_loop = lambda: loop
self._register_task(task)
self._unregister_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
def test__unregister_task_not_registered(self):
task = mock.Mock()
loop = mock.Mock()
self._unregister_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
class PyIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests):
_register_task = staticmethod(tasks._py_register_task)
_unregister_task = staticmethod(tasks._py_unregister_task)
_enter_task = staticmethod(tasks._py_enter_task)
_leave_task = staticmethod(tasks._py_leave_task)
@unittest.skipUnless(hasattr(tasks, '_c_register_task'),
'requires the C _asyncio module')
class CIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests):
if hasattr(tasks, '_c_register_task'):
_register_task = staticmethod(tasks._c_register_task)
_unregister_task = staticmethod(tasks._c_unregister_task)
_enter_task = staticmethod(tasks._c_enter_task)
_leave_task = staticmethod(tasks._c_leave_task)
else:
_register_task = _unregister_task = _enter_task = _leave_task = None
class BaseCurrentLoopTests:
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def new_task(self, coro):
raise NotImplementedError
def test_current_task_no_running_loop(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
def test_current_task_no_running_loop_implicit(self):
with self.assertRaises(RuntimeError):
asyncio.current_task()
def test_current_task_with_implicit_loop(self):
async def coro():
self.assertIs(asyncio.current_task(loop=self.loop), task)
self.assertIs(asyncio.current_task(None), task)
self.assertIs(asyncio.current_task(), task)
task = self.new_task(coro())
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.current_task(loop=self.loop))
class PyCurrentLoopTests(BaseCurrentLoopTests, test_utils.TestCase):
def new_task(self, coro):
return tasks._PyTask(coro, loop=self.loop)
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CCurrentLoopTests(BaseCurrentLoopTests, test_utils.TestCase):
def new_task(self, coro):
return getattr(tasks, '_CTask')(coro, loop=self.loop)
class GenericTaskTests(test_utils.TestCase):
def test_future_subclass(self):
self.assertTrue(issubclass(asyncio.Task, asyncio.Future))
@support.cpython_only
def test_asyncio_module_compiled(self):
# Because of circular imports it's easy to make _asyncio
try:
import _functools
import _json
import _pickle
except ImportError:
self.skipTest('C modules are not available')
else:
try:
import _asyncio
except ImportError:
self.fail('_asyncio module is missing')
class GatherTestsBase:
def setUp(self):
super().setUp()
self.one_loop = self.new_test_loop()
self.other_loop = self.new_test_loop()
self.set_event_loop(self.one_loop, cleanup=False)
def _run_loop(self, loop):
while loop._ready:
test_utils.run_briefly(loop)
def _check_success(self, **kwargs):
a, b, c = [self.one_loop.create_future() for i in range(3)]
fut = self._gather(*self.wrap_futures(a, b, c), **kwargs)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
b.set_result(1)
a.set_result(2)
self._run_loop(self.one_loop)
self.assertEqual(cb.called, False)
self.assertFalse(fut.done())
c.set_result(3)
self._run_loop(self.one_loop)
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [2, 1, 3])
def test_success(self):
self._check_success()
self._check_success(return_exceptions=False)
def test_result_exception_success(self):
self._check_success(return_exceptions=True)
def test_one_exception(self):
a, b, c, d, e = [self.one_loop.create_future() for i in range(5)]
fut = self._gather(*self.wrap_futures(a, b, c, d, e))
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
a.set_result(1)
b.set_exception(exc)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertIs(fut.exception(), exc)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_return_exceptions(self):
a, b, c, d = [self.one_loop.create_future() for i in range(4)]
fut = self._gather(*self.wrap_futures(a, b, c, d),
return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
exc2 = RuntimeError()
b.set_result(1)
c.set_exception(exc)
a.set_result(3)
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_exception(exc2)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [3, 1, exc, exc2])
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio.coroutines',
'print(asyncio.coroutines._DEBUG)'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
def wrap_futures(self, *futures):
return futures
def _gather(self, *args, **kwargs):
return asyncio.gather(*args, **kwargs)
def test_constructor_empty_sequence_without_loop(self):
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaises(RuntimeError):
asyncio.gather()
self.assertEqual(cm.warnings[0].filename, __file__)
def test_constructor_empty_sequence_use_running_loop(self):
async def gather():
return asyncio.gather()
fut = self.one_loop.run_until_complete(gather())
self.assertIsInstance(fut, asyncio.Future)
self.assertIs(fut._loop, self.one_loop)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
self.assertEqual(fut.result(), [])
def test_constructor_empty_sequence_use_global_loop(self):
# Deprecated in 3.10
asyncio.set_event_loop(self.one_loop)
self.addCleanup(asyncio.set_event_loop, None)
with self.assertWarns(DeprecationWarning) as cm:
fut = asyncio.gather()
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertIsInstance(fut, asyncio.Future)
self.assertIs(fut._loop, self.one_loop)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
self.assertEqual(fut.result(), [])
def test_constructor_heterogenous_futures(self):
fut1 = self.one_loop.create_future()
fut2 = self.other_loop.create_future()
with self.assertRaises(ValueError):
asyncio.gather(fut1, fut2)
def test_constructor_homogenous_futures(self):
children = [self.other_loop.create_future() for i in range(3)]
fut = asyncio.gather(*children)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
fut = asyncio.gather(*children)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
def test_one_cancellation(self):
a, b, c, d, e = [self.one_loop.create_future() for i in range(5)]
fut = asyncio.gather(a, b, c, d, e)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
b.cancel()
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertFalse(fut.cancelled())
self.assertIsInstance(fut.exception(), asyncio.CancelledError)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_result_exception_one_cancellation(self):
a, b, c, d, e, f = [self.one_loop.create_future()
for i in range(6)]
fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
zde = ZeroDivisionError()
b.set_exception(zde)
c.cancel()
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_result(3)
e.cancel()
rte = RuntimeError()
f.set_exception(rte)
res = self.one_loop.run_until_complete(fut)
self.assertIsInstance(res[2], asyncio.CancelledError)
self.assertIsInstance(res[4], asyncio.CancelledError)
res[2] = res[4] = None
self.assertEqual(res, [1, zde, None, 3, None, rte])
cb.assert_called_once_with(fut)
class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def wrap_futures(self, *futures):
coros = []
for fut in futures:
async def coro(fut=fut):
return await fut
coros.append(coro())
return coros
def _gather(self, *args, **kwargs):
async def coro():
return asyncio.gather(*args, **kwargs)
return self.one_loop.run_until_complete(coro())
def test_constructor_without_loop(self):
async def coro():
return 'abc'
gen1 = coro()
self.addCleanup(gen1.close)
gen2 = coro()
self.addCleanup(gen2.close)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaises(RuntimeError):
asyncio.gather(gen1, gen2)
self.assertEqual(cm.warnings[0].filename, __file__)
def test_constructor_use_running_loop(self):
async def coro():
return 'abc'
gen1 = coro()
gen2 = coro()
async def gather():
return asyncio.gather(gen1, gen2)
fut = self.one_loop.run_until_complete(gather())
self.assertIs(fut._loop, self.one_loop)
self.one_loop.run_until_complete(fut)
def test_constructor_use_global_loop(self):
# Deprecated in 3.10
async def coro():
return 'abc'
asyncio.set_event_loop(self.other_loop)
self.addCleanup(asyncio.set_event_loop, None)
gen1 = coro()
gen2 = coro()
with self.assertWarns(DeprecationWarning) as cm:
fut = asyncio.gather(gen1, gen2)
self.assertEqual(cm.warnings[0].filename, __file__)
self.assertIs(fut._loop, self.other_loop)
self.other_loop.run_until_complete(fut)
def test_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
c = coro('abc')
fut = self._gather(c, c, coro('def'), c)
self._run_loop(self.one_loop)
self.assertEqual(fut.result(), ['abc', 'abc', 'def', 'abc'])
def test_cancellation_broadcast(self):
# Cancelling outer() cancels all children.
proof = 0
waiter = self.one_loop.create_future()
async def inner():
nonlocal proof
await waiter
proof += 1
child1 = asyncio.ensure_future(inner(), loop=self.one_loop)
child2 = asyncio.ensure_future(inner(), loop=self.one_loop)
gatherer = None
async def outer():
nonlocal proof, gatherer
gatherer = asyncio.gather(child1, child2)
await gatherer
proof += 100
f = asyncio.ensure_future(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
self.assertTrue(f.cancel())
with self.assertRaises(asyncio.CancelledError):
self.one_loop.run_until_complete(f)
self.assertFalse(gatherer.cancel())
self.assertTrue(waiter.cancelled())
self.assertTrue(child1.cancelled())
self.assertTrue(child2.cancelled())
test_utils.run_briefly(self.one_loop)
self.assertEqual(proof, 0)
def test_exception_marking(self):
# Test for the first line marked "Mark exception retrieved."
async def inner(f):
await f
raise RuntimeError('should not be ignored')
a = self.one_loop.create_future()
b = self.one_loop.create_future()
async def outer():
await asyncio.gather(inner(a), inner(b))
f = asyncio.ensure_future(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
a.set_result(None)
test_utils.run_briefly(self.one_loop)
b.set_result(None)
test_utils.run_briefly(self.one_loop)
self.assertIsInstance(f.exception(), RuntimeError)
class RunCoroutineThreadsafeTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop) # Will cleanup properly
async def add(self, a, b, fail=False, cancel=False):
await asyncio.sleep(0.05)
if fail:
raise RuntimeError("Fail!")
if cancel:
asyncio.current_task(self.loop).cancel()
await asyncio.sleep(0)
return a + b
def target(self, fail=False, cancel=False, timeout=None,
advance_coro=False):
coro = self.add(1, 2, fail=fail, cancel=cancel)
future = asyncio.run_coroutine_threadsafe(coro, self.loop)
if advance_coro:
# this is for test_run_coroutine_threadsafe_task_factory_exception;
# otherwise it spills errors and breaks **other** unittests, since
# 'target' is interacting with threads.
# With this call, `coro` will be advanced, so that
# CoroWrapper.__del__ won't do anything when asyncio tests run
self.loop.call_soon_threadsafe(coro.send, None)
try:
return future.result(timeout)
finally:
future.done() or future.cancel()
def test_run_coroutine_threadsafe(self):
future = self.loop.run_in_executor(None, self.target)
result = self.loop.run_until_complete(future)
self.assertEqual(result, 3)
def test_run_coroutine_threadsafe_with_exception(self):
future = self.loop.run_in_executor(None, self.target, True)
with self.assertRaises(RuntimeError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Fail!", exc_context.exception.args)
def test_run_coroutine_threadsafe_with_timeout(self):
callback = lambda: self.target(timeout=0)
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(future)
test_utils.run_briefly(self.loop)
for task in asyncio.all_tasks(self.loop):
self.assertTrue(task.done())
def test_run_coroutine_threadsafe_task_cancelled(self):
callback = lambda: self.target(cancel=True)
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(future)
def test_run_coroutine_threadsafe_task_factory_exception(self):
def task_factory(loop, coro):
raise NameError
run = self.loop.run_in_executor(
None, lambda: self.target(advance_coro=True))
# Set exception handler
callback = test_utils.MockCallback()
self.loop.set_exception_handler(callback)
# Set corrupted task factory
self.addCleanup(self.loop.set_task_factory,
self.loop.get_task_factory())
self.loop.set_task_factory(task_factory)
# Run event loop
with self.assertRaises(NameError) as exc_context:
self.loop.run_until_complete(run)
# Check exceptions
self.assertEqual(len(callback.call_args_list), 1)
(loop, context), kwargs = callback.call_args
self.assertEqual(context['exception'], exc_context.exception)
class SleepTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_sleep_zero(self):
result = 0
def inc_result(num):
nonlocal result
result += num
async def coro():
self.loop.call_soon(inc_result, 1)
self.assertEqual(result, 0)
num = await asyncio.sleep(0, result=10)
self.assertEqual(result, 1) # inc'ed by call_soon
inc_result(num)
self.loop.run_until_complete(coro())
self.assertEqual(result, 11)
class WaitTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_coro_is_deprecated_in_wait(self):
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(
asyncio.wait([coroutine_function()]))
task = self.loop.create_task(coroutine_function())
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(
asyncio.wait([task, coroutine_function()]))
class CompatibilityTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_yield_from_awaitable(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
yield from asyncio.sleep(0)
return 'ok'
result = self.loop.run_until_complete(coro())
self.assertEqual('ok', result)
def test_await_old_style_coro(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro1():
return 'ok1'
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro2():
yield from asyncio.sleep(0)
return 'ok2'
async def inner():
return await asyncio.gather(coro1(), coro2())
result = self.loop.run_until_complete(inner())
self.assertEqual(['ok1', 'ok2'], result)
def test_debug_mode_interop(self):
code = textwrap.dedent("""
import asyncio
async def native_coro():
pass
@asyncio.coroutine
def old_style_coro():
yield from native_coro()
asyncio.run(old_style_coro())
""")
assert_python_ok("-Wignore::DeprecationWarning", "-c", code,
PYTHONASYNCIODEBUG="1")
if __name__ == '__main__':
unittest.main()
| true | true |
f7345ed683d8e259c5cd6ab20ecb5ca4fbafbe94 | 6,149 | py | Python | lantz/drivers/thorlabs/cld101xlp.py | 0xInfty/lantz-drivers | 8ea49bbe247547356533bbbdb4af09581f9b44b5 | [
"BSD-3-Clause"
] | 6 | 2016-04-13T12:59:18.000Z | 2020-06-24T17:43:04.000Z | lantz/drivers/thorlabs/cld101xlp.py | 0xInfty/lantz-drivers | 8ea49bbe247547356533bbbdb4af09581f9b44b5 | [
"BSD-3-Clause"
] | null | null | null | lantz/drivers/thorlabs/cld101xlp.py | 0xInfty/lantz-drivers | 8ea49bbe247547356533bbbdb4af09581f9b44b5 | [
"BSD-3-Clause"
] | 6 | 2015-12-14T19:30:36.000Z | 2020-06-29T21:16:01.000Z | from lantz import Feat, DictFeat, Action
from lantz.errors import InstrumentError
from lantz.messagebased import MessageBasedDriver
from pint import UnitRegistry
from time import sleep
class CLD101XLP(MessageBasedDriver):
DEFAULTS = {
'COMMON': {
'write_termination': '\n',
'read_termination': '\n'
}
}
COM_DELAY = 0.2
ureg = UnitRegistry()
def write(self, *args, **kwargs):
super().write(*args, **kwargs)
sleep(self.COM_DELAY)
return
@Feat(read_once=True)
def idn(self):
return self.query('*IDN?')
@Feat()
def key_locked(self):
return bool(int(self.query('OUTP:PROT:KEYL:TRIP?')))
@Feat(values={'C': 'CEL', 'F': 'FAR', 'K': 'K'})
def temperature_unit(self):
self.t_unit = self.query('UNIT:TEMP?')
return self.t_unit
@temperature_unit.setter
def temperature_unit(self, value):
self.write('UNIT:TEMP {}'.format(value))
@Feat()
def temperature(self):
return float(self.query('MEAS:SCAL:TEMP?'))
@Feat()
def temperature_setpoint(self):
return float(self.query('SOUR2:TEMP?'))
@Action()
def read_error_queue(self):
no_error = "+0,'No error'"
error = inst.query('SYST:ERR:NEXT?')
while(error != no_error):
print(error)
error = inst.query('SYST:ERR:NEXT?')
@Feat(values={False: '0', True: '1'})
def ld_state(self):
return self.query('OUTP1:STAT?')
@ld_state.setter
def ld_state(self, value):
self.write('OUTP1:STAT {}'.format(value))
@Feat(units='A', limits=(0,0.9))
def ld_current_setpoint(self):
return float(self.query('SOUR:CURR?'))
@Feat(units='A', limits=(0.0, 0.9))
def ld_current(self):
return float(self.query('MEAS:CURR?'))
@ld_current.setter
def ld_current(self, value):
inst.write('SOUR:CURR {:.5f}'.format(value))
@DictFeat(units='W', keys={'photodiode', 'pd', 'thermopile', 'tp', 'power meter'})
def ld_power(self, method):
query = 'MEAS:POW{}?'
ml = method.lower()
if ml in {'photodiode', 'pd'}:
method_val = 2
elif ml in {'thermopile', 'tp', 'power meter'}:
method_val = 3
return float(self.query(query.format(method_val)))
@Feat(values={False: '0', True: '1'})
def tec_state(self):
return self.query('OUTP2:STAT?')
@tec_state.setter
def tec_state(self, value):
self.write('OUTP2:STAT {}'.format(value))
@Action()
def turn_on_seq(self, temp_error=0.05, current_error=0.005*ureg.milliamp):
if self.ld_state == 1:
print("Laser is already ON!")
return
#Turn ON sequence:
# 1. TEC ON
# 2. Wait for temperature == set_temperature
# 3. LD ON
# 4. Wait for current == set_current
# 1. TEC ON
self.tec_state = 1
# 2. Wait
setpoint = self.temperature_setpoint
while(abs(setpoint-self.temperature)>temp_error):pass
# 3. LD ON
self.ld_state = 1
# 4. Wait
setpoint = self.ld_current_setpoint
while(abs(setpoint.m-self.ld_current.m)>current_error.m):pass
@Action()
def turn_off_seq(self, current_error=0.005*ureg.milliamp):
#Turn OFF sequence:
# 1. LD OFF
# 2. Wait for current == 0
# 3. TEC OFF
# 1. LD OFF
self.ld_state = 0
# 2. Wait
while(abs(self.ld_current.m) > current_error.m):pass
# 1. TEC OFF
self.tec_state = 0
if __name__ == '__main__':
import logging
import sys
from lantz.log import log_to_screen
log_to_screen(logging.CRITICAL)
#res_name = sys.argv[1]
res_names = ['USB0::0x1313::0x804F::SERIALNUMBER::INSTR', 'USB0::0x1313::0x804F::SERIALNUMBER::INSTR']
print('update res_names!')
fmt_str = "{:<30}|{:>30}"
on_time = 20
for resource in res_names:
with CLD101XLP(resource) as inst:
# with CLD101XLP(res_name) as inst:
print(fmt_str.format("Temperature unit", inst.temperature_unit))
print(fmt_str.format("Device name", inst.query('*IDN?')))
print(fmt_str.format("LD state", inst.ld_state))
print(fmt_str.format("TEC state", inst.tec_state))
print(fmt_str.format("Temp setpoint", inst.temperature_setpoint))
# inst.ld_current = .0885
print(fmt_str.format("LD current", inst.ld_current))
print(fmt_str.format("LD current setpoint", inst.ld_current_setpoint))
print(fmt_str.format("LD state", inst.ld_state))
print(fmt_str.format("TEC state", inst.tec_state))
print(fmt_str.format("LD temperature", inst.temperature))
#
# print("Turning on TEC and LD...")
# inst.turn_on_seq()
# #print(fmt_str.format("LD power (via photodiode)", inst.ld_power['pd']))
# #print(fmt_str.format("LD power (via thermopile)", inst.ld_power['tp']))
print(fmt_str.format("LD state", inst.ld_state))
print(fmt_str.format("TEC state", inst.tec_state))
print(fmt_str.format("LD temperature", inst.temperature))
# print(fmt_str.format("LD current", inst.ld_current))
# print(fmt_str.format("LD current setpoint", inst.ld_current_setpoint))
#
#
# inst.ld_current = .025
# print(fmt_str.format("LD current", inst.ld_current))
# print(fmt_str.format("LD current setpoint", inst.ld_current_setpoint))
# #print(fmt_str.format("LD power (via photodiode)", inst.ld_power['pd']))
# #print(fmt_str.format("LD power (via thermopile)", inst.ld_power['tp']))
# sleep(on_time)
#
# print("Turning off TEC and LD...")
# inst.turn_off_seq()
# print(fmt_str.format("LD state", inst.ld_state))
# print(fmt_str.format("TEC state", inst.tec_state))
# print(fmt_str.format("LD current", inst.ld_current))
# print(fmt_str.format("LD current setpoint", inst.ld_current_setpoint))
| 30.59204 | 106 | 0.595381 | from lantz import Feat, DictFeat, Action
from lantz.errors import InstrumentError
from lantz.messagebased import MessageBasedDriver
from pint import UnitRegistry
from time import sleep
class CLD101XLP(MessageBasedDriver):
DEFAULTS = {
'COMMON': {
'write_termination': '\n',
'read_termination': '\n'
}
}
COM_DELAY = 0.2
ureg = UnitRegistry()
def write(self, *args, **kwargs):
super().write(*args, **kwargs)
sleep(self.COM_DELAY)
return
@Feat(read_once=True)
def idn(self):
return self.query('*IDN?')
@Feat()
def key_locked(self):
return bool(int(self.query('OUTP:PROT:KEYL:TRIP?')))
@Feat(values={'C': 'CEL', 'F': 'FAR', 'K': 'K'})
def temperature_unit(self):
self.t_unit = self.query('UNIT:TEMP?')
return self.t_unit
@temperature_unit.setter
def temperature_unit(self, value):
self.write('UNIT:TEMP {}'.format(value))
@Feat()
def temperature(self):
return float(self.query('MEAS:SCAL:TEMP?'))
@Feat()
def temperature_setpoint(self):
return float(self.query('SOUR2:TEMP?'))
@Action()
def read_error_queue(self):
no_error = "+0,'No error'"
error = inst.query('SYST:ERR:NEXT?')
while(error != no_error):
print(error)
error = inst.query('SYST:ERR:NEXT?')
@Feat(values={False: '0', True: '1'})
def ld_state(self):
return self.query('OUTP1:STAT?')
@ld_state.setter
def ld_state(self, value):
self.write('OUTP1:STAT {}'.format(value))
@Feat(units='A', limits=(0,0.9))
def ld_current_setpoint(self):
return float(self.query('SOUR:CURR?'))
@Feat(units='A', limits=(0.0, 0.9))
def ld_current(self):
return float(self.query('MEAS:CURR?'))
@ld_current.setter
def ld_current(self, value):
inst.write('SOUR:CURR {:.5f}'.format(value))
@DictFeat(units='W', keys={'photodiode', 'pd', 'thermopile', 'tp', 'power meter'})
def ld_power(self, method):
query = 'MEAS:POW{}?'
ml = method.lower()
if ml in {'photodiode', 'pd'}:
method_val = 2
elif ml in {'thermopile', 'tp', 'power meter'}:
method_val = 3
return float(self.query(query.format(method_val)))
@Feat(values={False: '0', True: '1'})
def tec_state(self):
return self.query('OUTP2:STAT?')
@tec_state.setter
def tec_state(self, value):
self.write('OUTP2:STAT {}'.format(value))
@Action()
def turn_on_seq(self, temp_error=0.05, current_error=0.005*ureg.milliamp):
if self.ld_state == 1:
print("Laser is already ON!")
return
self.tec_state = 1
setpoint = self.temperature_setpoint
while(abs(setpoint-self.temperature)>temp_error):pass
self.ld_state = 1
setpoint = self.ld_current_setpoint
while(abs(setpoint.m-self.ld_current.m)>current_error.m):pass
@Action()
def turn_off_seq(self, current_error=0.005*ureg.milliamp):
self.ld_state = 0
while(abs(self.ld_current.m) > current_error.m):pass
self.tec_state = 0
if __name__ == '__main__':
import logging
import sys
from lantz.log import log_to_screen
log_to_screen(logging.CRITICAL)
res_names = ['USB0::0x1313::0x804F::SERIALNUMBER::INSTR', 'USB0::0x1313::0x804F::SERIALNUMBER::INSTR']
print('update res_names!')
fmt_str = "{:<30}|{:>30}"
on_time = 20
for resource in res_names:
with CLD101XLP(resource) as inst:
print(fmt_str.format("Temperature unit", inst.temperature_unit))
print(fmt_str.format("Device name", inst.query('*IDN?')))
print(fmt_str.format("LD state", inst.ld_state))
print(fmt_str.format("TEC state", inst.tec_state))
print(fmt_str.format("Temp setpoint", inst.temperature_setpoint))
print(fmt_str.format("LD current", inst.ld_current))
print(fmt_str.format("LD current setpoint", inst.ld_current_setpoint))
print(fmt_str.format("LD state", inst.ld_state))
print(fmt_str.format("TEC state", inst.tec_state))
print(fmt_str.format("LD temperature", inst.temperature))
int(fmt_str.format("LD temperature", inst.temperature))
| true | true |
f7345ef0369ef2031d2687b9f2fb079fc03cd7e7 | 2,132 | py | Python | vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c585_nsxv3_add_nsx_dhcp_service_tables.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c585_nsxv3_add_nsx_dhcp_service_tables.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c585_nsxv3_add_nsx_dhcp_service_tables.py | mail2nsrajesh/vmware-nsx | 63154b510b9fd95c10fffae86bfc49073cafeb40 | [
"Apache-2.0"
] | 1 | 2019-06-21T18:07:53.000Z | 2019-06-21T18:07:53.000Z | # Copyright 2016 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""NSXv3 add nsx_service_bindings and nsx_dhcp_bindings tables
Revision ID: c644ec62c585
Revises: c288bb6a7252
Create Date: 2016-04-29 23:19:39.523196
"""
# revision identifiers, used by Alembic.
revision = 'c644ec62c585'
down_revision = 'c288bb6a7252'
from alembic import op
import sqlalchemy as sa
from vmware_nsxlib.v3 import nsx_constants
nsx_service_type_enum = sa.Enum(
nsx_constants.SERVICE_DHCP,
name='neutron_nsx_service_bindings_service_type')
def upgrade():
op.create_table(
'neutron_nsx_service_bindings',
sa.Column('network_id', sa.String(36), nullable=False),
sa.Column('port_id', sa.String(36), nullable=True),
sa.Column('nsx_service_type', nsx_service_type_enum, nullable=False),
sa.Column('nsx_service_id', sa.String(36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id', 'nsx_service_type'))
op.create_table(
'neutron_nsx_dhcp_bindings',
sa.Column('port_id', sa.String(36), nullable=False),
sa.Column('subnet_id', sa.String(36), nullable=False),
sa.Column('ip_address', sa.String(64), nullable=False),
sa.Column('nsx_service_id', sa.String(36), nullable=False),
sa.Column('nsx_binding_id', sa.String(36), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id', 'nsx_binding_id'))
| 36.758621 | 79 | 0.70075 |
revision = 'c644ec62c585'
down_revision = 'c288bb6a7252'
from alembic import op
import sqlalchemy as sa
from vmware_nsxlib.v3 import nsx_constants
nsx_service_type_enum = sa.Enum(
nsx_constants.SERVICE_DHCP,
name='neutron_nsx_service_bindings_service_type')
def upgrade():
op.create_table(
'neutron_nsx_service_bindings',
sa.Column('network_id', sa.String(36), nullable=False),
sa.Column('port_id', sa.String(36), nullable=True),
sa.Column('nsx_service_type', nsx_service_type_enum, nullable=False),
sa.Column('nsx_service_id', sa.String(36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id', 'nsx_service_type'))
op.create_table(
'neutron_nsx_dhcp_bindings',
sa.Column('port_id', sa.String(36), nullable=False),
sa.Column('subnet_id', sa.String(36), nullable=False),
sa.Column('ip_address', sa.String(64), nullable=False),
sa.Column('nsx_service_id', sa.String(36), nullable=False),
sa.Column('nsx_binding_id', sa.String(36), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id', 'nsx_binding_id'))
| true | true |
f7345f2923e39ea9f4a7d509e635b842f0a95693 | 314 | py | Python | items.py | haowg/scrapy_api | 06930e58a363de3007fb5550f8c1da7a8effd094 | [
"Apache-2.0"
] | null | null | null | items.py | haowg/scrapy_api | 06930e58a363de3007fb5550f8c1da7a8effd094 | [
"Apache-2.0"
] | null | null | null | items.py | haowg/scrapy_api | 06930e58a363de3007fb5550f8c1da7a8effd094 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
class ResumeItem(scrapy.Item):
time = scrapy.Field()
org_name = scrapy.Field()
job_title = scrapy.Field()
location = scrapy.Field()
product_name = scrapy.Field()
company_name = scrapy.Field()
person_name = scrapy.Field()
id = scrapy.Field()
| 20.933333 | 33 | 0.640127 |
import scrapy
class ResumeItem(scrapy.Item):
time = scrapy.Field()
org_name = scrapy.Field()
job_title = scrapy.Field()
location = scrapy.Field()
product_name = scrapy.Field()
company_name = scrapy.Field()
person_name = scrapy.Field()
id = scrapy.Field()
| true | true |
f73460d20d6a0dad6b30903362fd64dcff72ef26 | 1,177 | py | Python | django_webapp/views.py | Alexmhack/bootstrap_django | 8a07c980834f3cd5e63fde19b5a989f1f2baa48f | [
"Apache-2.0"
] | null | null | null | django_webapp/views.py | Alexmhack/bootstrap_django | 8a07c980834f3cd5e63fde19b5a989f1f2baa48f | [
"Apache-2.0"
] | null | null | null | django_webapp/views.py | Alexmhack/bootstrap_django | 8a07c980834f3cd5e63fde19b5a989f1f2baa48f | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render, redirect
from django.template.loader import get_template
from django.core.mail import EmailMessage
def home_page(request):
return render(request, 'bootstrap/home_page.html')
def features_page(request):
return render(request, 'bootstrap/features_page.html')
def pricing_page(request):
return render(request, 'bootstrap/pricing_page.html')
def about_page(request):
return render(request, 'bootstrap/about_page.html')
def contact_page(request):
if request.method == 'POST':
user_name = request.POST.get('name')
user_email = request.POST.get('email')
user_subject = request.POST.get('subject')
user_message = request.POST.get('message')
template = get_template('contact_form.txt')
context = {
'contact_name': user_name,
'contact_email': user_email,
'contact_subject': user_subject,
'contact_message': user_message
}
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
"FirebaseDjangoApp",
['78030psg@gmail.com'],
headers={'Reply-To': user_email})
email.send()
return redirect('contact_page')
return render(request, 'contact.html') | 24.520833 | 55 | 0.742566 | from django.shortcuts import render, redirect
from django.template.loader import get_template
from django.core.mail import EmailMessage
def home_page(request):
return render(request, 'bootstrap/home_page.html')
def features_page(request):
return render(request, 'bootstrap/features_page.html')
def pricing_page(request):
return render(request, 'bootstrap/pricing_page.html')
def about_page(request):
return render(request, 'bootstrap/about_page.html')
def contact_page(request):
if request.method == 'POST':
user_name = request.POST.get('name')
user_email = request.POST.get('email')
user_subject = request.POST.get('subject')
user_message = request.POST.get('message')
template = get_template('contact_form.txt')
context = {
'contact_name': user_name,
'contact_email': user_email,
'contact_subject': user_subject,
'contact_message': user_message
}
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
"FirebaseDjangoApp",
['78030psg@gmail.com'],
headers={'Reply-To': user_email})
email.send()
return redirect('contact_page')
return render(request, 'contact.html') | true | true |
f73460e62cafe319971f36bb5ab832036fec4708 | 4,158 | py | Python | src/twisted/trial/_dist/test/test_workertrial.py | yan12125/twisted | 97e9d612cff41b5a83a6a6ddd2c37f616428cfde | [
"MIT",
"Unlicense"
] | null | null | null | src/twisted/trial/_dist/test/test_workertrial.py | yan12125/twisted | 97e9d612cff41b5a83a6a6ddd2c37f616428cfde | [
"MIT",
"Unlicense"
] | null | null | null | src/twisted/trial/_dist/test/test_workertrial.py | yan12125/twisted | 97e9d612cff41b5a83a6a6ddd2c37f616428cfde | [
"MIT",
"Unlicense"
] | null | null | null | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.workertrial}.
"""
import errno
import sys
from io import BytesIO
from twisted.protocols.amp import AMP
from twisted.test.proto_helpers import StringTransport
from twisted.trial._dist import (
_WORKER_AMP_STDIN,
_WORKER_AMP_STDOUT,
managercommands,
workercommands,
workertrial,
)
from twisted.trial._dist.workertrial import WorkerLogObserver, main
from twisted.trial.unittest import TestCase
class FakeAMP(AMP):
"""
A fake amp protocol.
"""
class WorkerLogObserverTests(TestCase):
"""
Tests for L{WorkerLogObserver}.
"""
def test_emit(self):
"""
L{WorkerLogObserver} forwards data to L{managercommands.TestWrite}.
"""
calls = []
class FakeClient:
def callRemote(self, method, **kwargs):
calls.append((method, kwargs))
observer = WorkerLogObserver(FakeClient())
observer.emit({"message": ["Some log"]})
self.assertEqual(calls, [(managercommands.TestWrite, {"out": "Some log"})])
class MainTests(TestCase):
"""
Tests for L{main}.
"""
def setUp(self):
self.readStream = BytesIO()
self.writeStream = BytesIO()
self.patch(
workertrial, "startLoggingWithObserver", self.startLoggingWithObserver
)
self.addCleanup(setattr, sys, "argv", sys.argv)
sys.argv = ["trial"]
def fdopen(self, fd, mode=None):
"""
Fake C{os.fdopen} implementation which returns C{self.readStream} for
the stdin fd and C{self.writeStream} for the stdout fd.
"""
if fd == _WORKER_AMP_STDIN:
self.assertEqual("rb", mode)
return self.readStream
elif fd == _WORKER_AMP_STDOUT:
self.assertEqual("wb", mode)
return self.writeStream
else:
raise AssertionError(f"Unexpected fd {fd!r}")
def startLoggingWithObserver(self, emit, setStdout):
"""
Override C{startLoggingWithObserver} for not starting logging.
"""
self.assertFalse(setStdout)
def test_empty(self):
"""
If no data is ever written, L{main} exits without writing data out.
"""
main(self.fdopen)
self.assertEqual(b"", self.writeStream.getvalue())
def test_forwardCommand(self):
"""
L{main} forwards data from its input stream to a L{WorkerProtocol}
instance which writes data to the output stream.
"""
client = FakeAMP()
clientTransport = StringTransport()
client.makeConnection(clientTransport)
client.callRemote(workercommands.Run, testCase="doesntexist")
self.readStream = clientTransport.io
self.readStream.seek(0, 0)
main(self.fdopen)
self.assertIn(b"No module named 'doesntexist'", self.writeStream.getvalue())
def test_readInterrupted(self):
"""
If reading the input stream fails with a C{IOError} with errno
C{EINTR}, L{main} ignores it and continues reading.
"""
excInfos = []
class FakeStream:
count = 0
def read(oself, size):
oself.count += 1
if oself.count == 1:
raise OSError(errno.EINTR)
else:
excInfos.append(sys.exc_info())
return b""
self.readStream = FakeStream()
main(self.fdopen)
self.assertEqual(b"", self.writeStream.getvalue())
self.assertEqual([(None, None, None)], excInfos)
def test_otherReadError(self):
"""
L{main} only ignores C{IOError} with C{EINTR} errno: otherwise, the
error pops out.
"""
class FakeStream:
count = 0
def read(oself, size):
oself.count += 1
if oself.count == 1:
raise OSError("Something else")
return ""
self.readStream = FakeStream()
self.assertRaises(IOError, main, self.fdopen)
| 28.479452 | 84 | 0.595719 |
import errno
import sys
from io import BytesIO
from twisted.protocols.amp import AMP
from twisted.test.proto_helpers import StringTransport
from twisted.trial._dist import (
_WORKER_AMP_STDIN,
_WORKER_AMP_STDOUT,
managercommands,
workercommands,
workertrial,
)
from twisted.trial._dist.workertrial import WorkerLogObserver, main
from twisted.trial.unittest import TestCase
class FakeAMP(AMP):
class WorkerLogObserverTests(TestCase):
def test_emit(self):
calls = []
class FakeClient:
def callRemote(self, method, **kwargs):
calls.append((method, kwargs))
observer = WorkerLogObserver(FakeClient())
observer.emit({"message": ["Some log"]})
self.assertEqual(calls, [(managercommands.TestWrite, {"out": "Some log"})])
class MainTests(TestCase):
def setUp(self):
self.readStream = BytesIO()
self.writeStream = BytesIO()
self.patch(
workertrial, "startLoggingWithObserver", self.startLoggingWithObserver
)
self.addCleanup(setattr, sys, "argv", sys.argv)
sys.argv = ["trial"]
def fdopen(self, fd, mode=None):
if fd == _WORKER_AMP_STDIN:
self.assertEqual("rb", mode)
return self.readStream
elif fd == _WORKER_AMP_STDOUT:
self.assertEqual("wb", mode)
return self.writeStream
else:
raise AssertionError(f"Unexpected fd {fd!r}")
def startLoggingWithObserver(self, emit, setStdout):
self.assertFalse(setStdout)
def test_empty(self):
main(self.fdopen)
self.assertEqual(b"", self.writeStream.getvalue())
def test_forwardCommand(self):
client = FakeAMP()
clientTransport = StringTransport()
client.makeConnection(clientTransport)
client.callRemote(workercommands.Run, testCase="doesntexist")
self.readStream = clientTransport.io
self.readStream.seek(0, 0)
main(self.fdopen)
self.assertIn(b"No module named 'doesntexist'", self.writeStream.getvalue())
def test_readInterrupted(self):
excInfos = []
class FakeStream:
count = 0
def read(oself, size):
oself.count += 1
if oself.count == 1:
raise OSError(errno.EINTR)
else:
excInfos.append(sys.exc_info())
return b""
self.readStream = FakeStream()
main(self.fdopen)
self.assertEqual(b"", self.writeStream.getvalue())
self.assertEqual([(None, None, None)], excInfos)
def test_otherReadError(self):
class FakeStream:
count = 0
def read(oself, size):
oself.count += 1
if oself.count == 1:
raise OSError("Something else")
return ""
self.readStream = FakeStream()
self.assertRaises(IOError, main, self.fdopen)
| true | true |
f734614dbacbcea2945b58a22500b3b5d0fe1098 | 3,239 | py | Python | installers/wix/genfiles.py | d-salodki/BitCoenWallet | e5a68e3c95b17413242a2e39bbb60661794299db | [
"Apache-2.0"
] | 7 | 2017-12-10T23:25:26.000Z | 2020-02-18T11:05:17.000Z | installers/wix/genfiles.py | d-salodki/BitCoenWallet | e5a68e3c95b17413242a2e39bbb60661794299db | [
"Apache-2.0"
] | 4 | 2021-01-28T19:31:45.000Z | 2022-03-25T18:19:52.000Z | installers/wix/genfiles.py | d-salodki/BitCoenWallet | e5a68e3c95b17413242a2e39bbb60661794299db | [
"Apache-2.0"
] | 4 | 2018-01-28T00:05:30.000Z | 2021-03-06T18:08:48.000Z | #!/usr/bin/env python
import os
import uuid
from xml.sax.saxutils import escape
scriptDir = os.path.dirname(os.path.normpath(os.path.abspath(__file__)))
buildDir = os.path.join(os.path.dirname(os.path.dirname(scriptDir)), 'build')
dirs = {
'ELECTRONDIR': 'BitcoenWallet-win32-x64',
'COREDIR': 'core',
}
def print2(str):
# On Windows `print` sometimes causes "IOError [errno 0]".
try:
print(str)
except:
pass
def toIdentifier(string):
id = ''
first = True
for ch in string:
c = ord(ch)
if (c >= ord('a') and c <= ord('z')) or (c >= ord('A') and c <= ord('Z')) or ch == '_':
id += ch
elif (c >= ord('0') and c <= ord('9')) or ch == '.':
#if first:
# id += '_'
id += ch
else:
id += '_'
first = False
return id
def guidForFile(name):
if os.path.exists('guids.lst'):
with open('guids.lst', "r") as f:
for line in f.readlines():
s = line.rstrip().split(' ', 1)
if s[1] == name:
return s[0]
guid = uuid.uuid1()
with open('guids.lst', "a") as f:
f.write('%s %s\n' % (guid, name))
return guid
def addDir(dirId, path, indent = ' '):
print2(path)
for file in os.listdir(os.path.join(buildDir, path)):
if file == 'BitcoenWallet.exe':
continue
relpath = os.path.join(path, file)
srcpath = os.path.join('..', '..', 'build', relpath)
fileId = '%s.%s' % (dirId, toIdentifier(file))
if len(fileId) > 72:
fileId = '_' + fileId[len(fileId)-71:]
if os.path.isdir(srcpath):
f.write('%s<Directory Id="%s" Name="%s">\n' % (indent, fileId, escape(file)))
addDir(fileId, relpath, indent + ' ')
f.write('%s</Directory>\n' % (indent))
else:
allIds.append(fileId)
guid = guidForFile(relpath)
f.write('%s<Component Id="%s" Guid="%s">\n' % (indent, fileId, str(guid)))
f.write('%s <File Id="%s" Name="%s" Source="%s" KeyPath="yes" />\n' % (indent, fileId, escape(file), escape(srcpath)))
f.write('%s</Component>\n' % indent)
allIds = []
with open("files.wxi", "w") as f:
f.write('<?xml version="1.0" encoding="windows-1251"?>\n')
f.write('<Include>\n')
for dirId, path in dirs.items():
f.write(' <DirectoryRef Id="%s">\n' % dirId)
addDir(dirId, path)
f.write(' </DirectoryRef>\n')
f.write(' <Feature\n')
f.write(' Id="Complete"\n')
f.write(' Level="1"\n')
f.write(' Title="BitCoen Wallet"\n')
f.write(' Description="The complete package."\n')
f.write(' AllowAdvertise="no"\n')
f.write(' InstallDefault="local"\n')
f.write(' Absent="disallow"\n')
f.write(' ConfigurableDirectory="INSTALLDIR">\n')
f.write(' <ComponentRef Id="BitcoenWallet.exe" />\n')
for id in allIds:
f.write(' <ComponentRef Id="%s" />\n' % id)
f.write(' <ComponentRef Id="ProgramMenuDir" />\n')
f.write(' </Feature>\n')
f.write('</Include>\n')
| 34.457447 | 133 | 0.511269 |
import os
import uuid
from xml.sax.saxutils import escape
scriptDir = os.path.dirname(os.path.normpath(os.path.abspath(__file__)))
buildDir = os.path.join(os.path.dirname(os.path.dirname(scriptDir)), 'build')
dirs = {
'ELECTRONDIR': 'BitcoenWallet-win32-x64',
'COREDIR': 'core',
}
def print2(str):
try:
print(str)
except:
pass
def toIdentifier(string):
id = ''
first = True
for ch in string:
c = ord(ch)
if (c >= ord('a') and c <= ord('z')) or (c >= ord('A') and c <= ord('Z')) or ch == '_':
id += ch
elif (c >= ord('0') and c <= ord('9')) or ch == '.':
id += ch
else:
id += '_'
first = False
return id
def guidForFile(name):
if os.path.exists('guids.lst'):
with open('guids.lst', "r") as f:
for line in f.readlines():
s = line.rstrip().split(' ', 1)
if s[1] == name:
return s[0]
guid = uuid.uuid1()
with open('guids.lst', "a") as f:
f.write('%s %s\n' % (guid, name))
return guid
def addDir(dirId, path, indent = ' '):
print2(path)
for file in os.listdir(os.path.join(buildDir, path)):
if file == 'BitcoenWallet.exe':
continue
relpath = os.path.join(path, file)
srcpath = os.path.join('..', '..', 'build', relpath)
fileId = '%s.%s' % (dirId, toIdentifier(file))
if len(fileId) > 72:
fileId = '_' + fileId[len(fileId)-71:]
if os.path.isdir(srcpath):
f.write('%s<Directory Id="%s" Name="%s">\n' % (indent, fileId, escape(file)))
addDir(fileId, relpath, indent + ' ')
f.write('%s</Directory>\n' % (indent))
else:
allIds.append(fileId)
guid = guidForFile(relpath)
f.write('%s<Component Id="%s" Guid="%s">\n' % (indent, fileId, str(guid)))
f.write('%s <File Id="%s" Name="%s" Source="%s" KeyPath="yes" />\n' % (indent, fileId, escape(file), escape(srcpath)))
f.write('%s</Component>\n' % indent)
allIds = []
with open("files.wxi", "w") as f:
f.write('<?xml version="1.0" encoding="windows-1251"?>\n')
f.write('<Include>\n')
for dirId, path in dirs.items():
f.write(' <DirectoryRef Id="%s">\n' % dirId)
addDir(dirId, path)
f.write(' </DirectoryRef>\n')
f.write(' <Feature\n')
f.write(' Id="Complete"\n')
f.write(' Level="1"\n')
f.write(' Title="BitCoen Wallet"\n')
f.write(' Description="The complete package."\n')
f.write(' AllowAdvertise="no"\n')
f.write(' InstallDefault="local"\n')
f.write(' Absent="disallow"\n')
f.write(' ConfigurableDirectory="INSTALLDIR">\n')
f.write(' <ComponentRef Id="BitcoenWallet.exe" />\n')
for id in allIds:
f.write(' <ComponentRef Id="%s" />\n' % id)
f.write(' <ComponentRef Id="ProgramMenuDir" />\n')
f.write(' </Feature>\n')
f.write('</Include>\n')
| true | true |
f7346162b45a7962aacf380692b2de379af58981 | 3,143 | py | Python | copyexcel.py | RaviTejaKomma/Automate-Boring-Stuff-Python | e5d8df1b060f20e50691f824ecabc3a30dc845c7 | [
"MIT"
] | null | null | null | copyexcel.py | RaviTejaKomma/Automate-Boring-Stuff-Python | e5d8df1b060f20e50691f824ecabc3a30dc845c7 | [
"MIT"
] | null | null | null | copyexcel.py | RaviTejaKomma/Automate-Boring-Stuff-Python | e5d8df1b060f20e50691f824ecabc3a30dc845c7 | [
"MIT"
] | null | null | null | '''
This script is about reading and writing excel files through python. Refer to the guidelines in @33.
This builds on assignment 1 (on using click).
Here's the high level spec, you have to figure out all the details and get this done:
Create a script called copyexcel.py which uses openpyxl and click.
It copies all data from one excel file to another and transforms them according to specified criteria.
If the input has more than one worksheet, then all of them are copied.
Script usage is as follows:
copyexcel.py --capitalize --preservestyles <source_excel> <dest_excel>
--capitalize -> boolean flag. default false. If specified, all string data will be capitalized during copy into destination
--preservestyles -> boolean falg. default false. If specified, even the cell styles will be copied, else only data.
source_excel -> argument that specifies the input file
dest_excel -> argument that specifies the output file. It already exists, prompt if user wants to overwrite. If yes, overwrite it.
You can use this as the sample data file for testing: students.xlsx
'''
import click
from openpyxl import *
from os.path import exists
from copy import copy
def copy_the_content(src_filepath,dest_filepath,capitalize,preservestyles):
src_wb = load_workbook(src_filepath)
dest_wb = Workbook()
dest_wb.remove_sheet(dest_wb.active)
for sheet in src_wb:
dest_curr_ws = dest_wb.create_sheet(sheet.title)
alph = 65
num=1
for col in sheet.iter_cols():
dest_curr_ws.column_dimensions[chr(alph)].width = sheet.column_dimensions[chr(alph)].width
alph+=1
for cell in col:
if capitalize:
dest_curr_ws[cell.coordinate] = cell.value.capitalize() if cell.data_type=='s' else cell.value
if preservestyles:
dest_curr_ws[cell.coordinate].style = copy(cell.style)
dest_curr_ws[cell.coordinate].font = copy(cell.font)
dest_curr_ws[cell.coordinate].border = copy(cell.border)
dest_curr_ws[cell.coordinate].alignment = copy(cell.alignment)
dest_curr_ws[cell.coordinate].fill = copy(cell.fill)
dest_curr_ws[cell.coordinate].protection = copy(cell.protection)
dest_wb.save(dest_filepath)
@click.command()
@click.option("--capitalize",is_flag=True,help="If specified, all string data will be capitalized during copy into destination.")
@click.option("--preservestyles",is_flag=True,help="If specified, even the cell styles will be copied, else only data.")
@click.argument("source_excel",nargs=1)
@click.argument("dest_excel",nargs=1)
def copying_excel_data(capitalize,preservestyles,source_excel,dest_excel):
if exists(dest_excel):
overwrite = raw_input("The destination file already exists, do you want to overwrite it : Y/N ?\n")
if overwrite == 'Y':
copy_the_content(source_excel,dest_excel,capitalize,preservestyles)
else:
copy_the_content(source_excel,dest_excel,capitalize,preservestyles)
if __name__ == "__main__":
copying_excel_data()
| 45.550725 | 130 | 0.715558 |
import click
from openpyxl import *
from os.path import exists
from copy import copy
def copy_the_content(src_filepath,dest_filepath,capitalize,preservestyles):
src_wb = load_workbook(src_filepath)
dest_wb = Workbook()
dest_wb.remove_sheet(dest_wb.active)
for sheet in src_wb:
dest_curr_ws = dest_wb.create_sheet(sheet.title)
alph = 65
num=1
for col in sheet.iter_cols():
dest_curr_ws.column_dimensions[chr(alph)].width = sheet.column_dimensions[chr(alph)].width
alph+=1
for cell in col:
if capitalize:
dest_curr_ws[cell.coordinate] = cell.value.capitalize() if cell.data_type=='s' else cell.value
if preservestyles:
dest_curr_ws[cell.coordinate].style = copy(cell.style)
dest_curr_ws[cell.coordinate].font = copy(cell.font)
dest_curr_ws[cell.coordinate].border = copy(cell.border)
dest_curr_ws[cell.coordinate].alignment = copy(cell.alignment)
dest_curr_ws[cell.coordinate].fill = copy(cell.fill)
dest_curr_ws[cell.coordinate].protection = copy(cell.protection)
dest_wb.save(dest_filepath)
@click.command()
@click.option("--capitalize",is_flag=True,help="If specified, all string data will be capitalized during copy into destination.")
@click.option("--preservestyles",is_flag=True,help="If specified, even the cell styles will be copied, else only data.")
@click.argument("source_excel",nargs=1)
@click.argument("dest_excel",nargs=1)
def copying_excel_data(capitalize,preservestyles,source_excel,dest_excel):
if exists(dest_excel):
overwrite = raw_input("The destination file already exists, do you want to overwrite it : Y/N ?\n")
if overwrite == 'Y':
copy_the_content(source_excel,dest_excel,capitalize,preservestyles)
else:
copy_the_content(source_excel,dest_excel,capitalize,preservestyles)
if __name__ == "__main__":
copying_excel_data()
| true | true |
f734620a92c87bf2d9b8868aa2af756591238d37 | 159 | py | Python | models.py | mwang87/GNPS_Batch_Validator | 97a8b0c1f2a0977eec8c7eaf842e15392d865676 | [
"Apache-2.0"
] | null | null | null | models.py | mwang87/GNPS_Batch_Validator | 97a8b0c1f2a0977eec8c7eaf842e15392d865676 | [
"Apache-2.0"
] | null | null | null | models.py | mwang87/GNPS_Batch_Validator | 97a8b0c1f2a0977eec8c7eaf842e15392d865676 | [
"Apache-2.0"
] | null | null | null | # models.py
from peewee import *
from app import db
class Filename(Model):
filepath = TextField(primary_key=True)
class Meta:
database = db
| 14.454545 | 42 | 0.679245 |
from peewee import *
from app import db
class Filename(Model):
filepath = TextField(primary_key=True)
class Meta:
database = db
| true | true |
f73462b423a2598169b46014fc637480ebb7aabe | 19,095 | py | Python | example/myshop/migrations/simple/0001_initial.py | Iv/django-shop | aa52dce6e9115d3b7a913ffa6027f978260b324c | [
"BSD-3-Clause"
] | null | null | null | example/myshop/migrations/simple/0001_initial.py | Iv/django-shop | aa52dce6e9115d3b7a913ffa6027f978260b324c | [
"BSD-3-Clause"
] | null | null | null | example/myshop/migrations/simple/0001_initial.py | Iv/django-shop | aa52dce6e9115d3b7a913ffa6027f978260b324c | [
"BSD-3-Clause"
] | 1 | 2020-01-10T01:51:07.000Z | 2020-01-10T01:51:07.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import shop.payment.defaults
import filer.fields.image
import django_fsm
import django.db.models.deletion
import jsonfield.fields
import djangocms_text_ckeditor.fields
import django.utils.timezone
from django.conf import settings
import django.core.validators
import shop_stripe.payment
class Migration(migrations.Migration):
dependencies = [
('email_auth', '0001_initial'),
('cms', '0013_urlconfrevision'),
('contenttypes', '0002_remove_content_type_name'),
('filer', '0002_auto_20150606_2003'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('priority_shipping', models.SmallIntegerField(default=None, help_text='Priority of using this address for shipping', null=True)),
('priority_billing', models.SmallIntegerField(default=None, help_text='Priority of using this address for invoicing', null=True)),
('addressee', models.CharField(max_length=50, verbose_name='Addressee')),
('supplement', models.CharField(max_length=50, null=True, verbose_name='Supplement', blank=True)),
('street', models.CharField(max_length=50, verbose_name='Street')),
('zip_code', models.CharField(max_length=10, verbose_name='ZIP')),
('location', models.CharField(max_length=50, verbose_name='Location')),
('country', models.CharField(max_length=3, verbose_name='Country', choices=[('AF', 'Afghanistan'), ('AX', 'Aland Islands'), ('AL', 'Albania'), ('DZ', 'Algeria'), ('AS', 'American Samoa'), ('AD', 'Andorra'), ('AO', 'Angola'), ('AI', 'Anguilla'), ('AQ', 'Antarctica'), ('AG', 'Antigua And Barbuda'), ('AR', 'Argentina'), ('AM', 'Armenia'), ('AW', 'Aruba'), ('AU', 'Australia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BS', 'Bahamas'), ('BH', 'Bahrain'), ('BD', 'Bangladesh'), ('BB', 'Barbados'), ('BY', 'Belarus'), ('BE', 'Belgium'), ('BZ', 'Belize'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BT', 'Bhutan'), ('BO', 'Bolivia, Plurinational State Of'), ('BQ', 'Bonaire, Saint Eustatius And Saba'), ('BA', 'Bosnia And Herzegovina'), ('BW', 'Botswana'), ('BV', 'Bouvet Island'), ('BR', 'Brazil'), ('IO', 'British Indian Ocean Territory'), ('BN', 'Brunei Darussalam'), ('BG', 'Bulgaria'), ('BF', 'Burkina Faso'), ('BI', 'Burundi'), ('KH', 'Cambodia'), ('CM', 'Cameroon'), ('CA', 'Canada'), ('CV', 'Cape Verde'), ('KY', 'Cayman Islands'), ('CF', 'Central African Republic'), ('TD', 'Chad'), ('CL', 'Chile'), ('CN', 'China'), ('CX', 'Christmas Island'), ('CC', 'Cocos (Keeling) Islands'), ('CO', 'Colombia'), ('KM', 'Comoros'), ('CG', 'Congo'), ('CD', 'Congo, The Democratic Republic Of The'), ('CK', 'Cook Islands'), ('CR', 'Costa Rica'), ('HR', 'Croatia'), ('CU', 'Cuba'), ('CW', 'Curacao'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DK', 'Denmark'), ('DJ', 'Djibouti'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('EC', 'Ecuador'), ('EG', 'Egypt'), ('SV', 'El Salvador'), ('GQ', 'Equatorial Guinea'), ('ER', 'Eritrea'), ('EE', 'Estonia'), ('ET', 'Ethiopia'), ('FK', 'Falkland Islands (Malvinas)'), ('FO', 'Faroe Islands'), ('FJ', 'Fiji'), ('FI', 'Finland'), ('FR', 'France'), ('GF', 'French Guiana'), ('PF', 'French Polynesia'), ('TF', 'French Southern Territories'), ('GA', 'Gabon'), ('GM', 'Gambia'), ('DE', 'Germany'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GR', 'Greece'), ('GL', 'Greenland'), ('GD', 'Grenada'), ('GP', 'Guadeloupe'), ('GU', 'Guam'), ('GT', 'Guatemala'), ('GG', 'Guernsey'), ('GN', 'Guinea'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HT', 'Haiti'), ('HM', 'Heard Island and McDonald Islands'), ('VA', 'Holy See (Vatican City State)'), ('HN', 'Honduras'), ('HK', 'Hong Kong'), ('HU', 'Hungary'), ('IS', 'Iceland'), ('IN', 'India'), ('ID', 'Indonesia'), ('IR', 'Iran, Islamic Republic Of'), ('IQ', 'Iraq'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IT', 'Italy'), ('CI', 'Ivory Coast'), ('JM', 'Jamaica'), ('JP', 'Japan'), ('JE', 'Jersey'), ('JO', 'Jordan'), ('KZ', 'Kazakhstan'), ('KE', 'Kenya'), ('KI', 'Kiribati'), ('KP', "Korea, Democratic People's Republic Of"), ('KR', 'Korea, Republic Of'), ('KS', 'Kosovo'), ('KW', 'Kuwait'), ('KG', 'Kyrgyzstan'), ('LA', "Lao People's Democratic Republic"), ('LV', 'Latvia'), ('LB', 'Lebanon'), ('LS', 'Lesotho'), ('LR', 'Liberia'), ('LY', 'Libyan Arab Jamahiriya'), ('LI', 'Liechtenstein'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('MO', 'Macao'), ('MK', 'Macedonia'), ('MG', 'Madagascar'), ('MW', 'Malawi'), ('MY', 'Malaysia'), ('MV', 'Maldives'), ('ML', 'Mali'), ('ML', 'Malta'), ('MH', 'Marshall Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MU', 'Mauritius'), ('YT', 'Mayotte'), ('MX', 'Mexico'), ('FM', 'Micronesia'), ('MD', 'Moldova'), ('MC', 'Monaco'), ('MN', 'Mongolia'), ('ME', 'Montenegro'), ('MS', 'Montserrat'), ('MA', 'Morocco'), ('MZ', 'Mozambique'), ('MM', 'Myanmar'), ('NA', 'Namibia'), ('NR', 'Nauru'), ('NP', 'Nepal'), ('NL', 'Netherlands'), ('AN', 'Netherlands Antilles'), ('NC', 'New Caledonia'), ('NZ', 'New Zealand'), ('NI', 'Nicaragua'), ('NE', 'Niger'), ('NG', 'Nigeria'), ('NU', 'Niue'), ('NF', 'Norfolk Island'), ('MP', 'Northern Mariana Islands'), ('NO', 'Norway'), ('OM', 'Oman'), ('PK', 'Pakistan'), ('PW', 'Palau'), ('PS', 'Palestinian Territory, Occupied'), ('PA', 'Panama'), ('PG', 'Papua New Guinea'), ('PY', 'Paraguay'), ('PE', 'Peru'), ('PH', 'Philippines'), ('PN', 'Pitcairn'), ('PL', 'Poland'), ('PT', 'Portugal'), ('PR', 'Puerto Rico'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('BL', 'Saint Barthelemy'), ('SH', 'Saint Helena, Ascension & Tristan Da Cunha'), ('KN', 'Saint Kitts and Nevis'), ('LC', 'Saint Lucia'), ('MF', 'Saint Martin (French Part)'), ('PM', 'Saint Pierre and Miquelon'), ('VC', 'Saint Vincent And The Grenadines'), ('WS', 'Samoa'), ('SM', 'San Marino'), ('ST', 'Sao Tome And Principe'), ('SA', 'Saudi Arabia'), ('SN', 'Senegal'), ('RS', 'Serbia'), ('SC', 'Seychelles'), ('SL', 'Sierra Leone'), ('SG', 'Singapore'), ('SX', 'Sint Maarten (Dutch Part)'), ('SK', 'Slovakia'), ('SI', 'Slovenia'), ('SB', 'Solomon Islands'), ('SO', 'Somalia'), ('ZA', 'South Africa'), ('GS', 'South Georgia And The South Sandwich Islands'), ('ES', 'Spain'), ('LK', 'Sri Lanka'), ('SD', 'Sudan'), ('SR', 'Suriname'), ('SJ', 'Svalbard And Jan Mayen'), ('SZ', 'Swaziland'), ('SE', 'Sweden'), ('CH', 'Switzerland'), ('SY', 'Syrian Arab Republic'), ('TW', 'Taiwan'), ('TJ', 'Tajikistan'), ('TZ', 'Tanzania'), ('TH', 'Thailand'), ('TL', 'Timor-Leste'), ('TG', 'Togo'), ('TK', 'Tokelau'), ('TO', 'Tonga'), ('TT', 'Trinidad and Tobago'), ('TN', 'Tunisia'), ('TR', 'Turkey'), ('TM', 'Turkmenistan'), ('TC', 'Turks And Caicos Islands'), ('TV', 'Tuvalu'), ('UG', 'Uganda'), ('UA', 'Ukraine'), ('AE', 'United Arab Emirates'), ('GB', 'United Kingdom'), ('US', 'United States'), ('UM', 'United States Minor Outlying Islands'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VU', 'Vanuatu'), ('VE', 'Venezuela, Bolivarian Republic Of'), ('VN', 'Viet Nam'), ('VG', 'Virgin Islands, British'), ('VI', 'Virgin Islands, U.S.'), ('WF', 'Wallis and Futuna'), ('EH', 'Western Sahara'), ('YE', 'Yemen'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')])),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('extra', jsonfield.fields.JSONField(default={}, verbose_name='Arbitrary information for this cart')),
('billing_address', models.ForeignKey(related_name='+', default=None, to='myshop.Address', null=True)),
],
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantity', models.IntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('extra', jsonfield.fields.JSONField(default={}, verbose_name='Arbitrary information for this cart item')),
('cart', models.ForeignKey(related_name='items', to='myshop.Cart')),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('recognized', models.PositiveSmallIntegerField(default=0, help_text='Designates the state the customer is recognized as.', verbose_name='Recognized as', choices=[(0, 'Unrecognized'), (1, 'Guest'), (2, 'Registered')])),
('salutation', models.CharField(max_length=5, verbose_name='Salutation', choices=[('mrs', 'Mrs.'), ('mr', 'Mr.'), ('na', '(n/a)')])),
('last_access', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Last accessed')),
('extra', jsonfield.fields.JSONField(default={}, verbose_name='Extra information about this customer', editable=False)),
('number', models.PositiveIntegerField(default=None, unique=True, null=True, verbose_name='Customer Number')),
],
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='Name')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', django_fsm.FSMField(default='new', protected=True, max_length=50, verbose_name='Status')),
('currency', models.CharField(help_text='Currency in which this order was concluded', max_length=7, editable=False)),
('_subtotal', models.DecimalField(verbose_name='Subtotal', max_digits=30, decimal_places=2)),
('_total', models.DecimalField(verbose_name='Total', max_digits=30, decimal_places=2)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('extra', jsonfield.fields.JSONField(default={}, help_text='Arbitrary information for this order object on the moment of purchase.', verbose_name='Extra fields')),
('stored_request', jsonfield.fields.JSONField(default={}, help_text='Parts of the Request objects on the moment of purchase.')),
('number', models.PositiveIntegerField(default=None, unique=True, null=True, verbose_name='Order Number')),
('shipping_address_text', models.TextField(help_text='Shipping address at the moment of purchase.', null=True, verbose_name='Shipping Address', blank=True)),
('billing_address_text', models.TextField(help_text='Billing address at the moment of purchase.', null=True, verbose_name='Billing Address', blank=True)),
('customer', models.ForeignKey(related_name='orders', verbose_name='Customer', to='myshop.Customer')),
],
options={
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
},
bases=(shop.payment.defaults.PayInAdvanceWorkflowMixin, shop.payment.defaults.CommissionGoodsWorkflowMixin, shop_stripe.payment.OrderWorkflowMixin, models.Model),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_name', models.CharField(help_text='Product name at the moment of purchase.', max_length=255, null=True, verbose_name='Product name', blank=True)),
('product_code', models.CharField(help_text='Product code at the moment of purchase.', max_length=255, null=True, verbose_name='Product code', blank=True)),
('_unit_price', models.DecimalField(help_text='Products unit price at the moment of purchase.', null=True, verbose_name='Unit price', max_digits=30, decimal_places=2)),
('_line_total', models.DecimalField(help_text='Line total on the invoice at the moment of purchase.', null=True, verbose_name='Line Total', max_digits=30, decimal_places=2)),
('quantity', models.IntegerField(verbose_name='Ordered quantity')),
('extra', jsonfield.fields.JSONField(default={}, help_text='Arbitrary information for this order item', verbose_name='Extra fields')),
('order', models.ForeignKey(related_name='items', verbose_name='Order', to='myshop.Order')),
],
),
migrations.CreateModel(
name='OrderPayment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', django_fsm.FSMField(default='new', protected=True, max_length=50, verbose_name='Status')),
('amount', models.DecimalField(default='0', help_text='How much was paid with this particular transfer.', max_digits=30, decimal_places=2)),
('transaction_id', models.CharField(help_text="The transaction processor's reference", max_length=255, verbose_name='Transaction ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Received at')),
('payment_method', models.CharField(help_text='The payment backend used to process the purchase', max_length=255, verbose_name='Payment method')),
('order', models.ForeignKey(verbose_name='Order', to='myshop.Order')),
],
options={
'verbose_name': 'Order payment',
'verbose_name_plural': 'Order payments',
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.SmallIntegerField(default=0)),
('image', filer.fields.image.FilerImageField(to='filer.Image')),
],
),
migrations.CreateModel(
name='ProductPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('page', models.ForeignKey(to='cms.Page')),
],
),
migrations.CreateModel(
name='SmartCard',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('active', models.BooleanField(default=True, help_text='Is this product publicly visible.', verbose_name='Active')),
('product_name', models.CharField(max_length=255, verbose_name='Product Name')),
('slug', models.SlugField(verbose_name='Slug')),
('unit_price', models.DecimalField(default='0', help_text='Net price for this product', max_digits=30, decimal_places=3)),
('description', djangocms_text_ckeditor.fields.HTMLField(help_text='Description for the list view of products.', verbose_name='Description')),
('card_type', models.CharField(max_length=15, verbose_name='Card Type', choices=[('SD', 'SD'), ('micro SD', 'micro SD'), ('SDXC', 'SDXC'), ('micro SDXC', 'micro SDXC'), ('SDHC', 'SDHC'), ('micro SDHC', 'micro SDHC'), ('SDHC II', 'SDHC II'), ('micro SDHC II', 'micro SDHC II')])),
('speed', models.CharField(max_length=8, verbose_name='Transfer Speed', choices=[(b'4', '4 MB/s'), (b'20', '20 MB/s'), (b'30', '30 MB/s'), (b'40', '40 MB/s'), (b'48', '48 MB/s'), (b'80', '80 MB/s'), (b'95', '95 MB/s'), (b'280', '280 MB/s')])),
('product_code', models.CharField(unique=True, max_length=255, verbose_name='Product code')),
('storage', models.PositiveIntegerField(help_text='Storage capacity in GB', verbose_name='Storage Capacity')),
('order', models.PositiveIntegerField(verbose_name='Sort by', db_index=True)),
('cms_pages', models.ManyToManyField(help_text='Choose list view this product shall appear on.', to='cms.Page', through='myshop.ProductPage')),
('images', models.ManyToManyField(to='filer.Image', through='myshop.ProductImage')),
('manufacturer', models.ForeignKey(verbose_name='Manufacturer', to='myshop.Manufacturer')),
('polymorphic_ctype', models.ForeignKey(related_name='polymorphic_myshop.smartcard_set+', editable=False, to='contenttypes.ContentType', null=True)),
],
options={
'ordering': ('order',),
'verbose_name': 'Smart Card',
'verbose_name_plural': 'Smart Cards',
},
),
migrations.AddField(
model_name='productpage',
name='product',
field=models.ForeignKey(to='myshop.SmartCard'),
),
migrations.AddField(
model_name='productimage',
name='product',
field=models.ForeignKey(to='myshop.SmartCard'),
),
migrations.AddField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Product', blank=True, to='myshop.SmartCard', null=True),
),
migrations.AddField(
model_name='cartitem',
name='product',
field=models.ForeignKey(to='myshop.SmartCard'),
),
migrations.AddField(
model_name='cart',
name='customer',
field=models.OneToOneField(related_name='cart', verbose_name='Customer', to='myshop.Customer'),
),
migrations.AddField(
model_name='cart',
name='shipping_address',
field=models.ForeignKey(related_name='+', default=None, to='myshop.Address', null=True),
),
migrations.AddField(
model_name='address',
name='customer',
field=models.ForeignKey(to='myshop.Customer'),
),
]
| 91.802885 | 5,814 | 0.595391 |
from __future__ import unicode_literals
from django.db import migrations, models
import shop.payment.defaults
import filer.fields.image
import django_fsm
import django.db.models.deletion
import jsonfield.fields
import djangocms_text_ckeditor.fields
import django.utils.timezone
from django.conf import settings
import django.core.validators
import shop_stripe.payment
class Migration(migrations.Migration):
dependencies = [
('email_auth', '0001_initial'),
('cms', '0013_urlconfrevision'),
('contenttypes', '0002_remove_content_type_name'),
('filer', '0002_auto_20150606_2003'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('priority_shipping', models.SmallIntegerField(default=None, help_text='Priority of using this address for shipping', null=True)),
('priority_billing', models.SmallIntegerField(default=None, help_text='Priority of using this address for invoicing', null=True)),
('addressee', models.CharField(max_length=50, verbose_name='Addressee')),
('supplement', models.CharField(max_length=50, null=True, verbose_name='Supplement', blank=True)),
('street', models.CharField(max_length=50, verbose_name='Street')),
('zip_code', models.CharField(max_length=10, verbose_name='ZIP')),
('location', models.CharField(max_length=50, verbose_name='Location')),
('country', models.CharField(max_length=3, verbose_name='Country', choices=[('AF', 'Afghanistan'), ('AX', 'Aland Islands'), ('AL', 'Albania'), ('DZ', 'Algeria'), ('AS', 'American Samoa'), ('AD', 'Andorra'), ('AO', 'Angola'), ('AI', 'Anguilla'), ('AQ', 'Antarctica'), ('AG', 'Antigua And Barbuda'), ('AR', 'Argentina'), ('AM', 'Armenia'), ('AW', 'Aruba'), ('AU', 'Australia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BS', 'Bahamas'), ('BH', 'Bahrain'), ('BD', 'Bangladesh'), ('BB', 'Barbados'), ('BY', 'Belarus'), ('BE', 'Belgium'), ('BZ', 'Belize'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BT', 'Bhutan'), ('BO', 'Bolivia, Plurinational State Of'), ('BQ', 'Bonaire, Saint Eustatius And Saba'), ('BA', 'Bosnia And Herzegovina'), ('BW', 'Botswana'), ('BV', 'Bouvet Island'), ('BR', 'Brazil'), ('IO', 'British Indian Ocean Territory'), ('BN', 'Brunei Darussalam'), ('BG', 'Bulgaria'), ('BF', 'Burkina Faso'), ('BI', 'Burundi'), ('KH', 'Cambodia'), ('CM', 'Cameroon'), ('CA', 'Canada'), ('CV', 'Cape Verde'), ('KY', 'Cayman Islands'), ('CF', 'Central African Republic'), ('TD', 'Chad'), ('CL', 'Chile'), ('CN', 'China'), ('CX', 'Christmas Island'), ('CC', 'Cocos (Keeling) Islands'), ('CO', 'Colombia'), ('KM', 'Comoros'), ('CG', 'Congo'), ('CD', 'Congo, The Democratic Republic Of The'), ('CK', 'Cook Islands'), ('CR', 'Costa Rica'), ('HR', 'Croatia'), ('CU', 'Cuba'), ('CW', 'Curacao'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DK', 'Denmark'), ('DJ', 'Djibouti'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('EC', 'Ecuador'), ('EG', 'Egypt'), ('SV', 'El Salvador'), ('GQ', 'Equatorial Guinea'), ('ER', 'Eritrea'), ('EE', 'Estonia'), ('ET', 'Ethiopia'), ('FK', 'Falkland Islands (Malvinas)'), ('FO', 'Faroe Islands'), ('FJ', 'Fiji'), ('FI', 'Finland'), ('FR', 'France'), ('GF', 'French Guiana'), ('PF', 'French Polynesia'), ('TF', 'French Southern Territories'), ('GA', 'Gabon'), ('GM', 'Gambia'), ('DE', 'Germany'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GR', 'Greece'), ('GL', 'Greenland'), ('GD', 'Grenada'), ('GP', 'Guadeloupe'), ('GU', 'Guam'), ('GT', 'Guatemala'), ('GG', 'Guernsey'), ('GN', 'Guinea'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HT', 'Haiti'), ('HM', 'Heard Island and McDonald Islands'), ('VA', 'Holy See (Vatican City State)'), ('HN', 'Honduras'), ('HK', 'Hong Kong'), ('HU', 'Hungary'), ('IS', 'Iceland'), ('IN', 'India'), ('ID', 'Indonesia'), ('IR', 'Iran, Islamic Republic Of'), ('IQ', 'Iraq'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IT', 'Italy'), ('CI', 'Ivory Coast'), ('JM', 'Jamaica'), ('JP', 'Japan'), ('JE', 'Jersey'), ('JO', 'Jordan'), ('KZ', 'Kazakhstan'), ('KE', 'Kenya'), ('KI', 'Kiribati'), ('KP', "Korea, Democratic People's Republic Of"), ('KR', 'Korea, Republic Of'), ('KS', 'Kosovo'), ('KW', 'Kuwait'), ('KG', 'Kyrgyzstan'), ('LA', "Lao People's Democratic Republic"), ('LV', 'Latvia'), ('LB', 'Lebanon'), ('LS', 'Lesotho'), ('LR', 'Liberia'), ('LY', 'Libyan Arab Jamahiriya'), ('LI', 'Liechtenstein'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('MO', 'Macao'), ('MK', 'Macedonia'), ('MG', 'Madagascar'), ('MW', 'Malawi'), ('MY', 'Malaysia'), ('MV', 'Maldives'), ('ML', 'Mali'), ('ML', 'Malta'), ('MH', 'Marshall Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MU', 'Mauritius'), ('YT', 'Mayotte'), ('MX', 'Mexico'), ('FM', 'Micronesia'), ('MD', 'Moldova'), ('MC', 'Monaco'), ('MN', 'Mongolia'), ('ME', 'Montenegro'), ('MS', 'Montserrat'), ('MA', 'Morocco'), ('MZ', 'Mozambique'), ('MM', 'Myanmar'), ('NA', 'Namibia'), ('NR', 'Nauru'), ('NP', 'Nepal'), ('NL', 'Netherlands'), ('AN', 'Netherlands Antilles'), ('NC', 'New Caledonia'), ('NZ', 'New Zealand'), ('NI', 'Nicaragua'), ('NE', 'Niger'), ('NG', 'Nigeria'), ('NU', 'Niue'), ('NF', 'Norfolk Island'), ('MP', 'Northern Mariana Islands'), ('NO', 'Norway'), ('OM', 'Oman'), ('PK', 'Pakistan'), ('PW', 'Palau'), ('PS', 'Palestinian Territory, Occupied'), ('PA', 'Panama'), ('PG', 'Papua New Guinea'), ('PY', 'Paraguay'), ('PE', 'Peru'), ('PH', 'Philippines'), ('PN', 'Pitcairn'), ('PL', 'Poland'), ('PT', 'Portugal'), ('PR', 'Puerto Rico'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('BL', 'Saint Barthelemy'), ('SH', 'Saint Helena, Ascension & Tristan Da Cunha'), ('KN', 'Saint Kitts and Nevis'), ('LC', 'Saint Lucia'), ('MF', 'Saint Martin (French Part)'), ('PM', 'Saint Pierre and Miquelon'), ('VC', 'Saint Vincent And The Grenadines'), ('WS', 'Samoa'), ('SM', 'San Marino'), ('ST', 'Sao Tome And Principe'), ('SA', 'Saudi Arabia'), ('SN', 'Senegal'), ('RS', 'Serbia'), ('SC', 'Seychelles'), ('SL', 'Sierra Leone'), ('SG', 'Singapore'), ('SX', 'Sint Maarten (Dutch Part)'), ('SK', 'Slovakia'), ('SI', 'Slovenia'), ('SB', 'Solomon Islands'), ('SO', 'Somalia'), ('ZA', 'South Africa'), ('GS', 'South Georgia And The South Sandwich Islands'), ('ES', 'Spain'), ('LK', 'Sri Lanka'), ('SD', 'Sudan'), ('SR', 'Suriname'), ('SJ', 'Svalbard And Jan Mayen'), ('SZ', 'Swaziland'), ('SE', 'Sweden'), ('CH', 'Switzerland'), ('SY', 'Syrian Arab Republic'), ('TW', 'Taiwan'), ('TJ', 'Tajikistan'), ('TZ', 'Tanzania'), ('TH', 'Thailand'), ('TL', 'Timor-Leste'), ('TG', 'Togo'), ('TK', 'Tokelau'), ('TO', 'Tonga'), ('TT', 'Trinidad and Tobago'), ('TN', 'Tunisia'), ('TR', 'Turkey'), ('TM', 'Turkmenistan'), ('TC', 'Turks And Caicos Islands'), ('TV', 'Tuvalu'), ('UG', 'Uganda'), ('UA', 'Ukraine'), ('AE', 'United Arab Emirates'), ('GB', 'United Kingdom'), ('US', 'United States'), ('UM', 'United States Minor Outlying Islands'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VU', 'Vanuatu'), ('VE', 'Venezuela, Bolivarian Republic Of'), ('VN', 'Viet Nam'), ('VG', 'Virgin Islands, British'), ('VI', 'Virgin Islands, U.S.'), ('WF', 'Wallis and Futuna'), ('EH', 'Western Sahara'), ('YE', 'Yemen'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')])),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('extra', jsonfield.fields.JSONField(default={}, verbose_name='Arbitrary information for this cart')),
('billing_address', models.ForeignKey(related_name='+', default=None, to='myshop.Address', null=True)),
],
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantity', models.IntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('extra', jsonfield.fields.JSONField(default={}, verbose_name='Arbitrary information for this cart item')),
('cart', models.ForeignKey(related_name='items', to='myshop.Cart')),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('recognized', models.PositiveSmallIntegerField(default=0, help_text='Designates the state the customer is recognized as.', verbose_name='Recognized as', choices=[(0, 'Unrecognized'), (1, 'Guest'), (2, 'Registered')])),
('salutation', models.CharField(max_length=5, verbose_name='Salutation', choices=[('mrs', 'Mrs.'), ('mr', 'Mr.'), ('na', '(n/a)')])),
('last_access', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Last accessed')),
('extra', jsonfield.fields.JSONField(default={}, verbose_name='Extra information about this customer', editable=False)),
('number', models.PositiveIntegerField(default=None, unique=True, null=True, verbose_name='Customer Number')),
],
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='Name')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', django_fsm.FSMField(default='new', protected=True, max_length=50, verbose_name='Status')),
('currency', models.CharField(help_text='Currency in which this order was concluded', max_length=7, editable=False)),
('_subtotal', models.DecimalField(verbose_name='Subtotal', max_digits=30, decimal_places=2)),
('_total', models.DecimalField(verbose_name='Total', max_digits=30, decimal_places=2)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('extra', jsonfield.fields.JSONField(default={}, help_text='Arbitrary information for this order object on the moment of purchase.', verbose_name='Extra fields')),
('stored_request', jsonfield.fields.JSONField(default={}, help_text='Parts of the Request objects on the moment of purchase.')),
('number', models.PositiveIntegerField(default=None, unique=True, null=True, verbose_name='Order Number')),
('shipping_address_text', models.TextField(help_text='Shipping address at the moment of purchase.', null=True, verbose_name='Shipping Address', blank=True)),
('billing_address_text', models.TextField(help_text='Billing address at the moment of purchase.', null=True, verbose_name='Billing Address', blank=True)),
('customer', models.ForeignKey(related_name='orders', verbose_name='Customer', to='myshop.Customer')),
],
options={
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
},
bases=(shop.payment.defaults.PayInAdvanceWorkflowMixin, shop.payment.defaults.CommissionGoodsWorkflowMixin, shop_stripe.payment.OrderWorkflowMixin, models.Model),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_name', models.CharField(help_text='Product name at the moment of purchase.', max_length=255, null=True, verbose_name='Product name', blank=True)),
('product_code', models.CharField(help_text='Product code at the moment of purchase.', max_length=255, null=True, verbose_name='Product code', blank=True)),
('_unit_price', models.DecimalField(help_text='Products unit price at the moment of purchase.', null=True, verbose_name='Unit price', max_digits=30, decimal_places=2)),
('_line_total', models.DecimalField(help_text='Line total on the invoice at the moment of purchase.', null=True, verbose_name='Line Total', max_digits=30, decimal_places=2)),
('quantity', models.IntegerField(verbose_name='Ordered quantity')),
('extra', jsonfield.fields.JSONField(default={}, help_text='Arbitrary information for this order item', verbose_name='Extra fields')),
('order', models.ForeignKey(related_name='items', verbose_name='Order', to='myshop.Order')),
],
),
migrations.CreateModel(
name='OrderPayment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', django_fsm.FSMField(default='new', protected=True, max_length=50, verbose_name='Status')),
('amount', models.DecimalField(default='0', help_text='How much was paid with this particular transfer.', max_digits=30, decimal_places=2)),
('transaction_id', models.CharField(help_text="The transaction processor's reference", max_length=255, verbose_name='Transaction ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Received at')),
('payment_method', models.CharField(help_text='The payment backend used to process the purchase', max_length=255, verbose_name='Payment method')),
('order', models.ForeignKey(verbose_name='Order', to='myshop.Order')),
],
options={
'verbose_name': 'Order payment',
'verbose_name_plural': 'Order payments',
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.SmallIntegerField(default=0)),
('image', filer.fields.image.FilerImageField(to='filer.Image')),
],
),
migrations.CreateModel(
name='ProductPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('page', models.ForeignKey(to='cms.Page')),
],
),
migrations.CreateModel(
name='SmartCard',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('active', models.BooleanField(default=True, help_text='Is this product publicly visible.', verbose_name='Active')),
('product_name', models.CharField(max_length=255, verbose_name='Product Name')),
('slug', models.SlugField(verbose_name='Slug')),
('unit_price', models.DecimalField(default='0', help_text='Net price for this product', max_digits=30, decimal_places=3)),
('description', djangocms_text_ckeditor.fields.HTMLField(help_text='Description for the list view of products.', verbose_name='Description')),
('card_type', models.CharField(max_length=15, verbose_name='Card Type', choices=[('SD', 'SD'), ('micro SD', 'micro SD'), ('SDXC', 'SDXC'), ('micro SDXC', 'micro SDXC'), ('SDHC', 'SDHC'), ('micro SDHC', 'micro SDHC'), ('SDHC II', 'SDHC II'), ('micro SDHC II', 'micro SDHC II')])),
('speed', models.CharField(max_length=8, verbose_name='Transfer Speed', choices=[(b'4', '4 MB/s'), (b'20', '20 MB/s'), (b'30', '30 MB/s'), (b'40', '40 MB/s'), (b'48', '48 MB/s'), (b'80', '80 MB/s'), (b'95', '95 MB/s'), (b'280', '280 MB/s')])),
('product_code', models.CharField(unique=True, max_length=255, verbose_name='Product code')),
('storage', models.PositiveIntegerField(help_text='Storage capacity in GB', verbose_name='Storage Capacity')),
('order', models.PositiveIntegerField(verbose_name='Sort by', db_index=True)),
('cms_pages', models.ManyToManyField(help_text='Choose list view this product shall appear on.', to='cms.Page', through='myshop.ProductPage')),
('images', models.ManyToManyField(to='filer.Image', through='myshop.ProductImage')),
('manufacturer', models.ForeignKey(verbose_name='Manufacturer', to='myshop.Manufacturer')),
('polymorphic_ctype', models.ForeignKey(related_name='polymorphic_myshop.smartcard_set+', editable=False, to='contenttypes.ContentType', null=True)),
],
options={
'ordering': ('order',),
'verbose_name': 'Smart Card',
'verbose_name_plural': 'Smart Cards',
},
),
migrations.AddField(
model_name='productpage',
name='product',
field=models.ForeignKey(to='myshop.SmartCard'),
),
migrations.AddField(
model_name='productimage',
name='product',
field=models.ForeignKey(to='myshop.SmartCard'),
),
migrations.AddField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Product', blank=True, to='myshop.SmartCard', null=True),
),
migrations.AddField(
model_name='cartitem',
name='product',
field=models.ForeignKey(to='myshop.SmartCard'),
),
migrations.AddField(
model_name='cart',
name='customer',
field=models.OneToOneField(related_name='cart', verbose_name='Customer', to='myshop.Customer'),
),
migrations.AddField(
model_name='cart',
name='shipping_address',
field=models.ForeignKey(related_name='+', default=None, to='myshop.Address', null=True),
),
migrations.AddField(
model_name='address',
name='customer',
field=models.ForeignKey(to='myshop.Customer'),
),
]
| true | true |
f73463d08d01639f183f1999e62472356c36cbde | 1,551 | py | Python | Element/FlutterElement.py | sunnyyukaige/Automation-core | 96f22346069736a2ca7eab90083a64f226f43340 | [
"MIT"
] | null | null | null | Element/FlutterElement.py | sunnyyukaige/Automation-core | 96f22346069736a2ca7eab90083a64f226f43340 | [
"MIT"
] | null | null | null | Element/FlutterElement.py | sunnyyukaige/Automation-core | 96f22346069736a2ca7eab90083a64f226f43340 | [
"MIT"
] | null | null | null | from Element.FlutterFind import FlutterFind
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from Utilitys.WaitUtils import WaitUtils
class FlutterElement(FlutterFind):
def __init__(self, driver):
FlutterFind.__init__(self)
self.driver = driver
self.interval = 0.5
self.timeout = 20
def find_flutter_element_and_click(self, value):
try:
self.driver.find_flutter_element(value).click()
except Exception as e:
raise NoSuchElementException
def flutter_scroll_to_text(self, value):
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.execute_script(
"flutter:scrollIntoView", value, 0.1)
except Exception as handleRetry:
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.execute_script(
"flutter:scrollIntoView", value, 0.1)
except Exception as e:
raise NoSuchElementException
def find_flutter_element_sendkeys(self, locator, value):
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.elementSendKeys(locator, value)
except Exception as handleRetry:
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.elementSendKeys(locator, value)
except Exception as e:
raise NoSuchElementException
| 36.928571 | 81 | 0.648614 | from Element.FlutterFind import FlutterFind
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from Utilitys.WaitUtils import WaitUtils
class FlutterElement(FlutterFind):
def __init__(self, driver):
FlutterFind.__init__(self)
self.driver = driver
self.interval = 0.5
self.timeout = 20
def find_flutter_element_and_click(self, value):
try:
self.driver.find_flutter_element(value).click()
except Exception as e:
raise NoSuchElementException
def flutter_scroll_to_text(self, value):
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.execute_script(
"flutter:scrollIntoView", value, 0.1)
except Exception as handleRetry:
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.execute_script(
"flutter:scrollIntoView", value, 0.1)
except Exception as e:
raise NoSuchElementException
def find_flutter_element_sendkeys(self, locator, value):
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.elementSendKeys(locator, value)
except Exception as handleRetry:
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.elementSendKeys(locator, value)
except Exception as e:
raise NoSuchElementException
| true | true |
f73465b37d26d4f56850b11cee68f7c815e9a9f3 | 2,970 | py | Python | hknweb/candidate/admin/requirements/requirements.py | Boomaa23/hknweb | 2c2ce38b5f1c0c6e04ba46282141557357bd5326 | [
"MIT"
] | null | null | null | hknweb/candidate/admin/requirements/requirements.py | Boomaa23/hknweb | 2c2ce38b5f1c0c6e04ba46282141557357bd5326 | [
"MIT"
] | null | null | null | hknweb/candidate/admin/requirements/requirements.py | Boomaa23/hknweb | 2c2ce38b5f1c0c6e04ba46282141557357bd5326 | [
"MIT"
] | null | null | null | from django.contrib import admin
from hknweb.candidate.models import (
CandidateForm,
CandidateFormDoneEntry,
CommitteeProject,
CommitteeProjectDoneEntry,
DuePayment,
DuePaymentPaidEntry,
RequirementBitByteActivity,
RequriementEvent,
RequirementHangout,
RequirementMandatory,
RequirementMergeRequirement,
)
from hknweb.candidate.admin.requirements.mixins import SetSemesterMixin, SetVisibleAndSemesterMixin
@admin.register(RequriementEvent)
@admin.register(RequirementHangout)
@admin.register(RequirementBitByteActivity)
class RequirementAdminGeneral(admin.ModelAdmin, SetSemesterMixin):
actions = [
"set_enable",
"set_disable",
"set_fall_this_year",
"set_spring_this_year",
"set_summer_this_year",
]
def set_enable(self, request, queryset):
queryset.update(enable=True)
set_enable.short_description = "Enable selected"
def set_disable(self, request, queryset):
queryset.update(enable=False)
set_disable.short_description = "Disable selected"
@admin.register(CandidateForm)
class CandidateFormAdmin(admin.ModelAdmin, SetVisibleAndSemesterMixin):
fields = ["name", "link", "visible", "duedate", "candidateSemesterActive"]
list_display = ("name", "link", "visible", "duedate", "candidateSemesterActive")
list_filter = ["visible", "duedate", "candidateSemesterActive"]
search_fields = ["name", "link"]
@admin.register(DuePayment)
@admin.register(CommitteeProject)
class MiscRequirementAdmin(admin.ModelAdmin, SetVisibleAndSemesterMixin):
fields = ["name", "instructions", "visible", "duedate", "candidateSemesterActive"]
list_display = (
"name",
"instructions",
"visible",
"duedate",
"candidateSemesterActive",
)
list_filter = ["visible", "duedate", "candidateSemesterActive"]
search_fields = ["name", "instructions"]
@admin.register(RequirementMandatory)
class RequirementMandatoryAdmin(RequirementAdminGeneral):
filter_horizontal = ("events",)
@admin.register(RequirementMergeRequirement)
class RequirementMergeAdmin(RequirementAdminGeneral):
actions = RequirementAdminGeneral.actions + ["link", "clear_links"]
def link(self, request, queryset):
queryset = list(queryset)
for i, node in enumerate(queryset):
print(i, node)
node.linkedRequirement = (
queryset[i + 1] if (i + 1 < len(queryset)) else None
)
node.save()
link.short_description = "Link together selected (overwrites current links)"
def clear_links(self, request, queryset):
queryset.update(linkedRequirement=None)
clear_links.short_description = "Clear links of Merges"
@admin.register(DuePaymentPaidEntry)
@admin.register(CandidateFormDoneEntry)
@admin.register(CommitteeProjectDoneEntry)
class MiscRequirementEntryAdmin(admin.ModelAdmin):
filter_horizontal = ("users",)
| 30.9375 | 99 | 0.718182 | from django.contrib import admin
from hknweb.candidate.models import (
CandidateForm,
CandidateFormDoneEntry,
CommitteeProject,
CommitteeProjectDoneEntry,
DuePayment,
DuePaymentPaidEntry,
RequirementBitByteActivity,
RequriementEvent,
RequirementHangout,
RequirementMandatory,
RequirementMergeRequirement,
)
from hknweb.candidate.admin.requirements.mixins import SetSemesterMixin, SetVisibleAndSemesterMixin
@admin.register(RequriementEvent)
@admin.register(RequirementHangout)
@admin.register(RequirementBitByteActivity)
class RequirementAdminGeneral(admin.ModelAdmin, SetSemesterMixin):
actions = [
"set_enable",
"set_disable",
"set_fall_this_year",
"set_spring_this_year",
"set_summer_this_year",
]
def set_enable(self, request, queryset):
queryset.update(enable=True)
set_enable.short_description = "Enable selected"
def set_disable(self, request, queryset):
queryset.update(enable=False)
set_disable.short_description = "Disable selected"
@admin.register(CandidateForm)
class CandidateFormAdmin(admin.ModelAdmin, SetVisibleAndSemesterMixin):
fields = ["name", "link", "visible", "duedate", "candidateSemesterActive"]
list_display = ("name", "link", "visible", "duedate", "candidateSemesterActive")
list_filter = ["visible", "duedate", "candidateSemesterActive"]
search_fields = ["name", "link"]
@admin.register(DuePayment)
@admin.register(CommitteeProject)
class MiscRequirementAdmin(admin.ModelAdmin, SetVisibleAndSemesterMixin):
fields = ["name", "instructions", "visible", "duedate", "candidateSemesterActive"]
list_display = (
"name",
"instructions",
"visible",
"duedate",
"candidateSemesterActive",
)
list_filter = ["visible", "duedate", "candidateSemesterActive"]
search_fields = ["name", "instructions"]
@admin.register(RequirementMandatory)
class RequirementMandatoryAdmin(RequirementAdminGeneral):
filter_horizontal = ("events",)
@admin.register(RequirementMergeRequirement)
class RequirementMergeAdmin(RequirementAdminGeneral):
actions = RequirementAdminGeneral.actions + ["link", "clear_links"]
def link(self, request, queryset):
queryset = list(queryset)
for i, node in enumerate(queryset):
print(i, node)
node.linkedRequirement = (
queryset[i + 1] if (i + 1 < len(queryset)) else None
)
node.save()
link.short_description = "Link together selected (overwrites current links)"
def clear_links(self, request, queryset):
queryset.update(linkedRequirement=None)
clear_links.short_description = "Clear links of Merges"
@admin.register(DuePaymentPaidEntry)
@admin.register(CandidateFormDoneEntry)
@admin.register(CommitteeProjectDoneEntry)
class MiscRequirementEntryAdmin(admin.ModelAdmin):
filter_horizontal = ("users",)
| true | true |
f734679649791ee4b1095d9eedf1e71818a2b695 | 489 | py | Python | lego/apps/followers/migrations/0002_auto_20170831_1103.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 45 | 2017-10-24T12:09:06.000Z | 2021-11-03T21:21:03.000Z | lego/apps/followers/migrations/0002_auto_20170831_1103.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 980 | 2017-10-24T12:29:07.000Z | 2022-03-31T04:04:31.000Z | lego/apps/followers/migrations/0002_auto_20170831_1103.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 23 | 2018-04-11T16:34:22.000Z | 2021-11-23T12:28:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-31 11:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("followers", "0001_initial")]
operations = [
migrations.RemoveField(model_name="followcompany", name="deleted"),
migrations.RemoveField(model_name="followevent", name="deleted"),
migrations.RemoveField(model_name="followuser", name="deleted"),
]
| 28.764706 | 75 | 0.705521 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("followers", "0001_initial")]
operations = [
migrations.RemoveField(model_name="followcompany", name="deleted"),
migrations.RemoveField(model_name="followevent", name="deleted"),
migrations.RemoveField(model_name="followuser", name="deleted"),
]
| true | true |
f7346818a39b5a17a62b2966aaea4541f1d02cd3 | 55,751 | py | Python | operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/enterprisesearch/v1beta1/_inputs.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/enterprisesearch/v1beta1/_inputs.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | 2 | 2020-09-18T17:12:23.000Z | 2020-12-30T19:40:56.000Z | operators/elastic-cloud-eck/python/pulumi_pulumi_kubernetes_crds_operators_elastic_cloud_eck/enterprisesearch/v1beta1/_inputs.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'EnterpriseSearchSpecArgs',
'EnterpriseSearchSpecConfigRefArgs',
'EnterpriseSearchSpecElasticsearchRefArgs',
'EnterpriseSearchSpecHttpArgs',
'EnterpriseSearchSpecHttpServiceArgs',
'EnterpriseSearchSpecHttpServiceSpecArgs',
'EnterpriseSearchSpecHttpServiceSpecPortsArgs',
'EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs',
'EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs',
'EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs',
'EnterpriseSearchSpecHttpTlsArgs',
'EnterpriseSearchSpecHttpTlsCertificateArgs',
'EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs',
'EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs',
'EnterpriseSearchStatusArgs',
]
@pulumi.input_type
class EnterpriseSearchSpecArgs:
def __init__(__self__, *,
config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
config_ref: Optional[pulumi.Input['EnterpriseSearchSpecConfigRefArgs']] = None,
count: Optional[pulumi.Input[int]] = None,
elasticsearch_ref: Optional[pulumi.Input['EnterpriseSearchSpecElasticsearchRefArgs']] = None,
http: Optional[pulumi.Input['EnterpriseSearchSpecHttpArgs']] = None,
image: Optional[pulumi.Input[str]] = None,
pod_template: Optional[pulumi.Input[Mapping[str, Any]]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
EnterpriseSearchSpec holds the specification of an Enterprise Search resource.
:param pulumi.Input[Mapping[str, Any]] config: Config holds the Enterprise Search configuration.
:param pulumi.Input['EnterpriseSearchSpecConfigRefArgs'] config_ref: ConfigRef contains a reference to an existing Kubernetes Secret holding the Enterprise Search configuration. Configuration settings are merged and have precedence over settings specified in `config`.
:param pulumi.Input[int] count: Count of Enterprise Search instances to deploy.
:param pulumi.Input['EnterpriseSearchSpecElasticsearchRefArgs'] elasticsearch_ref: ElasticsearchRef is a reference to the Elasticsearch cluster running in the same Kubernetes cluster.
:param pulumi.Input['EnterpriseSearchSpecHttpArgs'] http: HTTP holds the HTTP layer configuration for Enterprise Search resource.
:param pulumi.Input[str] image: Image is the Enterprise Search Docker image to deploy.
:param pulumi.Input[Mapping[str, Any]] pod_template: PodTemplate provides customisation options (labels, annotations, affinity rules, resource requests, and so on) for the Enterprise Search pods.
:param pulumi.Input[str] service_account_name: ServiceAccountName is used to check access from the current resource to a resource (eg. Elasticsearch) in a different namespace. Can only be used if ECK is enforcing RBAC on references.
:param pulumi.Input[str] version: Version of Enterprise Search.
"""
if config is not None:
pulumi.set(__self__, "config", config)
if config_ref is not None:
pulumi.set(__self__, "config_ref", config_ref)
if count is not None:
pulumi.set(__self__, "count", count)
if elasticsearch_ref is not None:
pulumi.set(__self__, "elasticsearch_ref", elasticsearch_ref)
if http is not None:
pulumi.set(__self__, "http", http)
if image is not None:
pulumi.set(__self__, "image", image)
if pod_template is not None:
pulumi.set(__self__, "pod_template", pod_template)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Config holds the Enterprise Search configuration.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter(name="configRef")
def config_ref(self) -> Optional[pulumi.Input['EnterpriseSearchSpecConfigRefArgs']]:
"""
ConfigRef contains a reference to an existing Kubernetes Secret holding the Enterprise Search configuration. Configuration settings are merged and have precedence over settings specified in `config`.
"""
return pulumi.get(self, "config_ref")
@config_ref.setter
def config_ref(self, value: Optional[pulumi.Input['EnterpriseSearchSpecConfigRefArgs']]):
pulumi.set(self, "config_ref", value)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
"""
Count of Enterprise Search instances to deploy.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@property
@pulumi.getter(name="elasticsearchRef")
def elasticsearch_ref(self) -> Optional[pulumi.Input['EnterpriseSearchSpecElasticsearchRefArgs']]:
"""
ElasticsearchRef is a reference to the Elasticsearch cluster running in the same Kubernetes cluster.
"""
return pulumi.get(self, "elasticsearch_ref")
@elasticsearch_ref.setter
def elasticsearch_ref(self, value: Optional[pulumi.Input['EnterpriseSearchSpecElasticsearchRefArgs']]):
pulumi.set(self, "elasticsearch_ref", value)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpArgs']]:
"""
HTTP holds the HTTP layer configuration for Enterprise Search resource.
"""
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpArgs']]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Image is the Enterprise Search Docker image to deploy.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="podTemplate")
def pod_template(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
PodTemplate provides customisation options (labels, annotations, affinity rules, resource requests, and so on) for the Enterprise Search pods.
"""
return pulumi.get(self, "pod_template")
@pod_template.setter
def pod_template(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "pod_template", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
"""
ServiceAccountName is used to check access from the current resource to a resource (eg. Elasticsearch) in a different namespace. Can only be used if ECK is enforcing RBAC on references.
"""
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Version of Enterprise Search.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class EnterpriseSearchSpecConfigRefArgs:
def __init__(__self__, *,
secret_name: Optional[pulumi.Input[str]] = None):
"""
ConfigRef contains a reference to an existing Kubernetes Secret holding the Enterprise Search configuration. Configuration settings are merged and have precedence over settings specified in `config`.
:param pulumi.Input[str] secret_name: SecretName is the name of the secret.
"""
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[pulumi.Input[str]]:
"""
SecretName is the name of the secret.
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_name", value)
@pulumi.input_type
class EnterpriseSearchSpecElasticsearchRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
namespace: Optional[pulumi.Input[str]] = None):
"""
ElasticsearchRef is a reference to the Elasticsearch cluster running in the same Kubernetes cluster.
:param pulumi.Input[str] name: Name of the Kubernetes object.
:param pulumi.Input[str] namespace: Namespace of the Kubernetes object. If empty, defaults to the current namespace.
"""
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the Kubernetes object.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace of the Kubernetes object. If empty, defaults to the current namespace.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpArgs:
def __init__(__self__, *,
service: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceArgs']] = None,
tls: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsArgs']] = None):
"""
HTTP holds the HTTP layer configuration for Enterprise Search resource.
:param pulumi.Input['EnterpriseSearchSpecHttpServiceArgs'] service: Service defines the template for the associated Kubernetes Service object.
:param pulumi.Input['EnterpriseSearchSpecHttpTlsArgs'] tls: TLS defines options for configuring TLS for HTTP.
"""
if service is not None:
pulumi.set(__self__, "service", service)
if tls is not None:
pulumi.set(__self__, "tls", tls)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceArgs']]:
"""
Service defines the template for the associated Kubernetes Service object.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def tls(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsArgs']]:
"""
TLS defines options for configuring TLS for HTTP.
"""
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsArgs']]):
pulumi.set(self, "tls", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceArgs:
def __init__(__self__, *,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spec: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecArgs']] = None):
"""
Service defines the template for the associated Kubernetes Service object.
:param pulumi.Input[Mapping[str, Any]] metadata: ObjectMeta is the metadata of the service. The name and namespace provided here are managed by ECK and will be ignored.
:param pulumi.Input['EnterpriseSearchSpecHttpServiceSpecArgs'] spec: Spec is the specification of the service.
"""
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
ObjectMeta is the metadata of the service. The name and namespace provided here are managed by ECK and will be ignored.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecArgs']]:
"""
Spec is the specification of the service.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecArgs']]):
pulumi.set(self, "spec", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecArgs:
def __init__(__self__, *,
cluster_ip: Optional[pulumi.Input[str]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
external_name: Optional[pulumi.Input[str]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
health_check_node_port: Optional[pulumi.Input[int]] = None,
ip_family: Optional[pulumi.Input[str]] = None,
load_balancer_ip: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]]] = None,
publish_not_ready_addresses: Optional[pulumi.Input[bool]] = None,
selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
session_affinity_config: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs']] = None,
topology_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Spec is the specification of the service.
:param pulumi.Input[str] cluster_ip: clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are "None", empty string (""), or a valid IP address. "None" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_ips: externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.
:param pulumi.Input[str] external_name: externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.
:param pulumi.Input[str] external_traffic_policy: externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. "Local" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. "Cluster" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.
:param pulumi.Input[int] health_check_node_port: healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.
:param pulumi.Input[str] ip_family: ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
:param pulumi.Input[str] load_balancer_ip: Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.
:param pulumi.Input[Sequence[pulumi.Input[str]]] load_balancer_source_ranges: If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
:param pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]] ports: The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param pulumi.Input[bool] publish_not_ready_addresses: publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] selector: Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/
:param pulumi.Input[str] session_affinity: Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs'] session_affinity_config: sessionAffinityConfig contains the configurations of session affinity.
:param pulumi.Input[Sequence[pulumi.Input[str]]] topology_keys: topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value "*" may be used to mean "any topology". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.
:param pulumi.Input[str] type: type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. "ExternalName" maps to the specified externalName. "ClusterIP" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is "None", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. "NodePort" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. "LoadBalancer" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
"""
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if external_name is not None:
pulumi.set(__self__, "external_name", external_name)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if health_check_node_port is not None:
pulumi.set(__self__, "health_check_node_port", health_check_node_port)
if ip_family is not None:
pulumi.set(__self__, "ip_family", ip_family)
if load_balancer_ip is not None:
pulumi.set(__self__, "load_balancer_ip", load_balancer_ip)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if publish_not_ready_addresses is not None:
pulumi.set(__self__, "publish_not_ready_addresses", publish_not_ready_addresses)
if selector is not None:
pulumi.set(__self__, "selector", selector)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if session_affinity_config is not None:
pulumi.set(__self__, "session_affinity_config", session_affinity_config)
if topology_keys is not None:
pulumi.set(__self__, "topology_keys", topology_keys)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
"""
clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are "None", empty string (""), or a valid IP address. "None" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.
"""
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="externalName")
def external_name(self) -> Optional[pulumi.Input[str]]:
"""
externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.
"""
return pulumi.get(self, "external_name")
@external_name.setter
def external_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_name", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
"""
externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. "Local" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. "Cluster" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.
"""
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter(name="healthCheckNodePort")
def health_check_node_port(self) -> Optional[pulumi.Input[int]]:
"""
healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.
"""
return pulumi.get(self, "health_check_node_port")
@health_check_node_port.setter
def health_check_node_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_node_port", value)
@property
@pulumi.getter(name="ipFamily")
def ip_family(self) -> Optional[pulumi.Input[str]]:
"""
ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.
"""
return pulumi.get(self, "ip_family")
@ip_family.setter
def ip_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_family", value)
@property
@pulumi.getter(name="loadBalancerIP")
def load_balancer_ip(self) -> Optional[pulumi.Input[str]]:
"""
Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.
"""
return pulumi.get(self, "load_balancer_ip")
@load_balancer_ip.setter
def load_balancer_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ip", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
"""
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]]]:
"""
The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="publishNotReadyAddresses")
def publish_not_ready_addresses(self) -> Optional[pulumi.Input[bool]]:
"""
publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.
"""
return pulumi.get(self, "publish_not_ready_addresses")
@publish_not_ready_addresses.setter
def publish_not_ready_addresses(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "publish_not_ready_addresses", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "selector", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
"""
Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="sessionAffinityConfig")
def session_affinity_config(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs']]:
"""
sessionAffinityConfig contains the configurations of session affinity.
"""
return pulumi.get(self, "session_affinity_config")
@session_affinity_config.setter
def session_affinity_config(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs']]):
pulumi.set(self, "session_affinity_config", value)
@property
@pulumi.getter(name="topologyKeys")
def topology_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value "*" may be used to mean "any topology". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.
"""
return pulumi.get(self, "topology_keys")
@topology_keys.setter
def topology_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "topology_keys", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. "ExternalName" maps to the specified externalName. "ClusterIP" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is "None", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. "NodePort" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. "LoadBalancer" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecPortsArgs:
def __init__(__self__, *,
port: pulumi.Input[int],
app_protocol: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
target_port: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs']] = None):
"""
ServicePort contains information on service's port.
:param pulumi.Input[int] port: The port that will be exposed by this service.
:param pulumi.Input[str] app_protocol: The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.
:param pulumi.Input[str] name: The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.
:param pulumi.Input[int] node_port: The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
:param pulumi.Input[str] protocol: The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". Default is TCP.
:param pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs'] target_port: Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
"""
pulumi.set(__self__, "port", port)
if app_protocol is not None:
pulumi.set(__self__, "app_protocol", app_protocol)
if name is not None:
pulumi.set(__self__, "name", name)
if node_port is not None:
pulumi.set(__self__, "node_port", node_port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if target_port is not None:
pulumi.set(__self__, "target_port", target_port)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
The port that will be exposed by this service.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="appProtocol")
def app_protocol(self) -> Optional[pulumi.Input[str]]:
"""
The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.
"""
return pulumi.get(self, "app_protocol")
@app_protocol.setter
def app_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_protocol", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodePort")
def node_port(self) -> Optional[pulumi.Input[int]]:
"""
The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
"""
return pulumi.get(self, "node_port")
@node_port.setter
def node_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "node_port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". Default is TCP.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="targetPort")
def target_port(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs']]:
"""
Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
"""
return pulumi.get(self, "target_port")
@target_port.setter
def target_port(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs']]):
pulumi.set(self, "target_port", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs:
def __init__(__self__, *,
client_ip: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs']] = None):
"""
sessionAffinityConfig contains the configurations of session affinity.
:param pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs'] client_ip: clientIP contains the configurations of Client IP based session affinity.
"""
if client_ip is not None:
pulumi.set(__self__, "client_ip", client_ip)
@property
@pulumi.getter(name="clientIP")
def client_ip(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs']]:
"""
clientIP contains the configurations of Client IP based session affinity.
"""
return pulumi.get(self, "client_ip")
@client_ip.setter
def client_ip(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs']]):
pulumi.set(self, "client_ip", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs:
def __init__(__self__, *,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
clientIP contains the configurations of Client IP based session affinity.
:param pulumi.Input[int] timeout_seconds: timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours).
"""
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours).
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpTlsArgs:
def __init__(__self__, *,
certificate: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsCertificateArgs']] = None,
self_signed_certificate: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs']] = None):
"""
TLS defines options for configuring TLS for HTTP.
:param pulumi.Input['EnterpriseSearchSpecHttpTlsCertificateArgs'] certificate: Certificate is a reference to a Kubernetes secret that contains the certificate and private key for enabling TLS. The referenced secret should contain the following:
- `ca.crt`: The certificate authority (optional). - `tls.crt`: The certificate (or a chain). - `tls.key`: The private key to the first certificate in the certificate chain.
:param pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs'] self_signed_certificate: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator.
"""
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if self_signed_certificate is not None:
pulumi.set(__self__, "self_signed_certificate", self_signed_certificate)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsCertificateArgs']]:
"""
Certificate is a reference to a Kubernetes secret that contains the certificate and private key for enabling TLS. The referenced secret should contain the following:
- `ca.crt`: The certificate authority (optional). - `tls.crt`: The certificate (or a chain). - `tls.key`: The private key to the first certificate in the certificate chain.
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsCertificateArgs']]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="selfSignedCertificate")
def self_signed_certificate(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs']]:
"""
SelfSignedCertificate allows configuring the self-signed certificate generated by the operator.
"""
return pulumi.get(self, "self_signed_certificate")
@self_signed_certificate.setter
def self_signed_certificate(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs']]):
pulumi.set(self, "self_signed_certificate", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpTlsCertificateArgs:
def __init__(__self__, *,
secret_name: Optional[pulumi.Input[str]] = None):
"""
Certificate is a reference to a Kubernetes secret that contains the certificate and private key for enabling TLS. The referenced secret should contain the following:
- `ca.crt`: The certificate authority (optional). - `tls.crt`: The certificate (or a chain). - `tls.key`: The private key to the first certificate in the certificate chain.
:param pulumi.Input[str] secret_name: SecretName is the name of the secret.
"""
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[pulumi.Input[str]]:
"""
SecretName is the name of the secret.
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_name", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs:
def __init__(__self__, *,
disabled: Optional[pulumi.Input[bool]] = None,
subject_alt_names: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs']]]] = None):
"""
SelfSignedCertificate allows configuring the self-signed certificate generated by the operator.
:param pulumi.Input[bool] disabled: Disabled indicates that the provisioning of the self-signed certifcate should be disabled.
:param pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs']]] subject_alt_names: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate.
"""
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if subject_alt_names is not None:
pulumi.set(__self__, "subject_alt_names", subject_alt_names)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Disabled indicates that the provisioning of the self-signed certifcate should be disabled.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="subjectAltNames")
def subject_alt_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs']]]]:
"""
SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate.
"""
return pulumi.get(self, "subject_alt_names")
@subject_alt_names.setter
def subject_alt_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs']]]]):
pulumi.set(self, "subject_alt_names", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs:
def __init__(__self__, *,
dns: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None):
"""
SubjectAlternativeName represents a SAN entry in a x509 certificate.
:param pulumi.Input[str] dns: DNS is the DNS name of the subject.
:param pulumi.Input[str] ip: IP is the IP address of the subject.
"""
if dns is not None:
pulumi.set(__self__, "dns", dns)
if ip is not None:
pulumi.set(__self__, "ip", ip)
@property
@pulumi.getter
def dns(self) -> Optional[pulumi.Input[str]]:
"""
DNS is the DNS name of the subject.
"""
return pulumi.get(self, "dns")
@dns.setter
def dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns", value)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
"""
IP is the IP address of the subject.
"""
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@pulumi.input_type
class EnterpriseSearchStatusArgs:
def __init__(__self__, *,
association_status: Optional[pulumi.Input[str]] = None,
available_nodes: Optional[pulumi.Input[int]] = None,
health: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
EnterpriseSearchStatus defines the observed state of EnterpriseSearch
:param pulumi.Input[str] association_status: Association is the status of any auto-linking to Elasticsearch clusters.
:param pulumi.Input[int] available_nodes: AvailableNodes is the number of available replicas in the deployment.
:param pulumi.Input[str] health: Health of the deployment.
:param pulumi.Input[str] service: ExternalService is the name of the service associated to the Enterprise Search Pods.
:param pulumi.Input[str] version: Version of the stack resource currently running. During version upgrades, multiple versions may run in parallel: this value specifies the lowest version currently running.
"""
if association_status is not None:
pulumi.set(__self__, "association_status", association_status)
if available_nodes is not None:
pulumi.set(__self__, "available_nodes", available_nodes)
if health is not None:
pulumi.set(__self__, "health", health)
if service is not None:
pulumi.set(__self__, "service", service)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="associationStatus")
def association_status(self) -> Optional[pulumi.Input[str]]:
"""
Association is the status of any auto-linking to Elasticsearch clusters.
"""
return pulumi.get(self, "association_status")
@association_status.setter
def association_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "association_status", value)
@property
@pulumi.getter(name="availableNodes")
def available_nodes(self) -> Optional[pulumi.Input[int]]:
"""
AvailableNodes is the number of available replicas in the deployment.
"""
return pulumi.get(self, "available_nodes")
@available_nodes.setter
def available_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "available_nodes", value)
@property
@pulumi.getter
def health(self) -> Optional[pulumi.Input[str]]:
"""
Health of the deployment.
"""
return pulumi.get(self, "health")
@health.setter
def health(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
"""
ExternalService is the name of the service associated to the Enterprise Search Pods.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the stack resource currently running. During version upgrades, multiple versions may run in parallel: this value specifies the lowest version currently running.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
| 57.832988 | 905 | 0.711413 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'EnterpriseSearchSpecArgs',
'EnterpriseSearchSpecConfigRefArgs',
'EnterpriseSearchSpecElasticsearchRefArgs',
'EnterpriseSearchSpecHttpArgs',
'EnterpriseSearchSpecHttpServiceArgs',
'EnterpriseSearchSpecHttpServiceSpecArgs',
'EnterpriseSearchSpecHttpServiceSpecPortsArgs',
'EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs',
'EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs',
'EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs',
'EnterpriseSearchSpecHttpTlsArgs',
'EnterpriseSearchSpecHttpTlsCertificateArgs',
'EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs',
'EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs',
'EnterpriseSearchStatusArgs',
]
@pulumi.input_type
class EnterpriseSearchSpecArgs:
def __init__(__self__, *,
config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
config_ref: Optional[pulumi.Input['EnterpriseSearchSpecConfigRefArgs']] = None,
count: Optional[pulumi.Input[int]] = None,
elasticsearch_ref: Optional[pulumi.Input['EnterpriseSearchSpecElasticsearchRefArgs']] = None,
http: Optional[pulumi.Input['EnterpriseSearchSpecHttpArgs']] = None,
image: Optional[pulumi.Input[str]] = None,
pod_template: Optional[pulumi.Input[Mapping[str, Any]]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if config is not None:
pulumi.set(__self__, "config", config)
if config_ref is not None:
pulumi.set(__self__, "config_ref", config_ref)
if count is not None:
pulumi.set(__self__, "count", count)
if elasticsearch_ref is not None:
pulumi.set(__self__, "elasticsearch_ref", elasticsearch_ref)
if http is not None:
pulumi.set(__self__, "http", http)
if image is not None:
pulumi.set(__self__, "image", image)
if pod_template is not None:
pulumi.set(__self__, "pod_template", pod_template)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter(name="configRef")
def config_ref(self) -> Optional[pulumi.Input['EnterpriseSearchSpecConfigRefArgs']]:
return pulumi.get(self, "config_ref")
@config_ref.setter
def config_ref(self, value: Optional[pulumi.Input['EnterpriseSearchSpecConfigRefArgs']]):
pulumi.set(self, "config_ref", value)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@property
@pulumi.getter(name="elasticsearchRef")
def elasticsearch_ref(self) -> Optional[pulumi.Input['EnterpriseSearchSpecElasticsearchRefArgs']]:
return pulumi.get(self, "elasticsearch_ref")
@elasticsearch_ref.setter
def elasticsearch_ref(self, value: Optional[pulumi.Input['EnterpriseSearchSpecElasticsearchRefArgs']]):
pulumi.set(self, "elasticsearch_ref", value)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpArgs']]:
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpArgs']]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="podTemplate")
def pod_template(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "pod_template")
@pod_template.setter
def pod_template(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "pod_template", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class EnterpriseSearchSpecConfigRefArgs:
def __init__(__self__, *,
secret_name: Optional[pulumi.Input[str]] = None):
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_name", value)
@pulumi.input_type
class EnterpriseSearchSpecElasticsearchRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
namespace: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpArgs:
def __init__(__self__, *,
service: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceArgs']] = None,
tls: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsArgs']] = None):
if service is not None:
pulumi.set(__self__, "service", service)
if tls is not None:
pulumi.set(__self__, "tls", tls)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def tls(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsArgs']]:
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsArgs']]):
pulumi.set(self, "tls", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceArgs:
def __init__(__self__, *,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spec: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecArgs']] = None):
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecArgs']]:
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecArgs']]):
pulumi.set(self, "spec", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecArgs:
def __init__(__self__, *,
cluster_ip: Optional[pulumi.Input[str]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
external_name: Optional[pulumi.Input[str]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
health_check_node_port: Optional[pulumi.Input[int]] = None,
ip_family: Optional[pulumi.Input[str]] = None,
load_balancer_ip: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]]] = None,
publish_not_ready_addresses: Optional[pulumi.Input[bool]] = None,
selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
session_affinity_config: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs']] = None,
topology_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if external_name is not None:
pulumi.set(__self__, "external_name", external_name)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if health_check_node_port is not None:
pulumi.set(__self__, "health_check_node_port", health_check_node_port)
if ip_family is not None:
pulumi.set(__self__, "ip_family", ip_family)
if load_balancer_ip is not None:
pulumi.set(__self__, "load_balancer_ip", load_balancer_ip)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if publish_not_ready_addresses is not None:
pulumi.set(__self__, "publish_not_ready_addresses", publish_not_ready_addresses)
if selector is not None:
pulumi.set(__self__, "selector", selector)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if session_affinity_config is not None:
pulumi.set(__self__, "session_affinity_config", session_affinity_config)
if topology_keys is not None:
pulumi.set(__self__, "topology_keys", topology_keys)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="externalName")
def external_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "external_name")
@external_name.setter
def external_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_name", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter(name="healthCheckNodePort")
def health_check_node_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "health_check_node_port")
@health_check_node_port.setter
def health_check_node_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_node_port", value)
@property
@pulumi.getter(name="ipFamily")
def ip_family(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ip_family")
@ip_family.setter
def ip_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_family", value)
@property
@pulumi.getter(name="loadBalancerIP")
def load_balancer_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ip")
@load_balancer_ip.setter
def load_balancer_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ip", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]]]:
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="publishNotReadyAddresses")
def publish_not_ready_addresses(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "publish_not_ready_addresses")
@publish_not_ready_addresses.setter
def publish_not_ready_addresses(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "publish_not_ready_addresses", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "selector", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="sessionAffinityConfig")
def session_affinity_config(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs']]:
return pulumi.get(self, "session_affinity_config")
@session_affinity_config.setter
def session_affinity_config(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs']]):
pulumi.set(self, "session_affinity_config", value)
@property
@pulumi.getter(name="topologyKeys")
def topology_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "topology_keys")
@topology_keys.setter
def topology_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "topology_keys", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecPortsArgs:
def __init__(__self__, *,
port: pulumi.Input[int],
app_protocol: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
node_port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
target_port: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs']] = None):
pulumi.set(__self__, "port", port)
if app_protocol is not None:
pulumi.set(__self__, "app_protocol", app_protocol)
if name is not None:
pulumi.set(__self__, "name", name)
if node_port is not None:
pulumi.set(__self__, "node_port", node_port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if target_port is not None:
pulumi.set(__self__, "target_port", target_port)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="appProtocol")
def app_protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "app_protocol")
@app_protocol.setter
def app_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_protocol", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodePort")
def node_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "node_port")
@node_port.setter
def node_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "node_port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="targetPort")
def target_port(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs']]:
return pulumi.get(self, "target_port")
@target_port.setter
def target_port(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs']]):
pulumi.set(self, "target_port", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecPortsTargetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigArgs:
def __init__(__self__, *,
client_ip: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs']] = None):
if client_ip is not None:
pulumi.set(__self__, "client_ip", client_ip)
@property
@pulumi.getter(name="clientIP")
def client_ip(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs']]:
return pulumi.get(self, "client_ip")
@client_ip.setter
def client_ip(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs']]):
pulumi.set(self, "client_ip", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpServiceSpecSessionAffinityConfigClientIPArgs:
def __init__(__self__, *,
timeout_seconds: Optional[pulumi.Input[int]] = None):
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpTlsArgs:
def __init__(__self__, *,
certificate: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsCertificateArgs']] = None,
self_signed_certificate: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs']] = None):
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if self_signed_certificate is not None:
pulumi.set(__self__, "self_signed_certificate", self_signed_certificate)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsCertificateArgs']]:
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsCertificateArgs']]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="selfSignedCertificate")
def self_signed_certificate(self) -> Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs']]:
return pulumi.get(self, "self_signed_certificate")
@self_signed_certificate.setter
def self_signed_certificate(self, value: Optional[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs']]):
pulumi.set(self, "self_signed_certificate", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpTlsCertificateArgs:
def __init__(__self__, *,
secret_name: Optional[pulumi.Input[str]] = None):
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_name", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpTlsSelfSignedCertificateArgs:
def __init__(__self__, *,
disabled: Optional[pulumi.Input[bool]] = None,
subject_alt_names: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs']]]] = None):
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if subject_alt_names is not None:
pulumi.set(__self__, "subject_alt_names", subject_alt_names)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="subjectAltNames")
def subject_alt_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs']]]]:
return pulumi.get(self, "subject_alt_names")
@subject_alt_names.setter
def subject_alt_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs']]]]):
pulumi.set(self, "subject_alt_names", value)
@pulumi.input_type
class EnterpriseSearchSpecHttpTlsSelfSignedCertificateSubjectAltNamesArgs:
def __init__(__self__, *,
dns: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None):
if dns is not None:
pulumi.set(__self__, "dns", dns)
if ip is not None:
pulumi.set(__self__, "ip", ip)
@property
@pulumi.getter
def dns(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "dns")
@dns.setter
def dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns", value)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@pulumi.input_type
class EnterpriseSearchStatusArgs:
def __init__(__self__, *,
association_status: Optional[pulumi.Input[str]] = None,
available_nodes: Optional[pulumi.Input[int]] = None,
health: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if association_status is not None:
pulumi.set(__self__, "association_status", association_status)
if available_nodes is not None:
pulumi.set(__self__, "available_nodes", available_nodes)
if health is not None:
pulumi.set(__self__, "health", health)
if service is not None:
pulumi.set(__self__, "service", service)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="associationStatus")
def association_status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "association_status")
@association_status.setter
def association_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "association_status", value)
@property
@pulumi.getter(name="availableNodes")
def available_nodes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "available_nodes")
@available_nodes.setter
def available_nodes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "available_nodes", value)
@property
@pulumi.getter
def health(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "health")
@health.setter
def health(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
| true | true |
f7346a678b201b6551646b27b58b1a43bf3c84a7 | 2,627 | py | Python | flask_security/__init__.py | Identeco/flask-security | 0b2dc59767af72f435fceac69f72c04a7cd9cc13 | [
"MIT"
] | 317 | 2020-04-30T16:29:58.000Z | 2022-03-31T07:19:09.000Z | flask_security/__init__.py | Identeco/flask-security | 0b2dc59767af72f435fceac69f72c04a7cd9cc13 | [
"MIT"
] | 216 | 2020-04-30T16:49:01.000Z | 2022-03-31T19:02:37.000Z | flask_security/__init__.py | Identeco/flask-security | 0b2dc59767af72f435fceac69f72c04a7cd9cc13 | [
"MIT"
] | 82 | 2020-04-30T16:22:36.000Z | 2022-03-27T16:52:40.000Z | """
flask_security
~~~~~~~~~~~~~~
Flask-Security is a Flask extension that aims to add quick and simple
security via Flask-Login, Flask-Principal, Flask-WTF, and passlib.
:copyright: (c) 2012-2019 by Matt Wright.
:copyright: (c) 2019-2020 by J. Christopher Wagner.
:license: MIT, see LICENSE for more details.
"""
# flake8: noqa: F401
from .changeable import admin_change_password
from .core import Security, RoleMixin, UserMixin, AnonymousUser, current_user
from .datastore import (
UserDatastore,
SQLAlchemyUserDatastore,
MongoEngineUserDatastore,
PeeweeUserDatastore,
PonyUserDatastore,
SQLAlchemySessionUserDatastore,
)
from .decorators import (
auth_token_required,
anonymous_user_required,
handle_csrf,
http_auth_required,
login_required,
roles_accepted,
roles_required,
auth_required,
permissions_accepted,
permissions_required,
unauth_csrf,
)
from .forms import (
ChangePasswordForm,
ForgotPasswordForm,
LoginForm,
RegisterForm,
ResetPasswordForm,
PasswordlessLoginForm,
ConfirmRegisterForm,
SendConfirmationForm,
TwoFactorRescueForm,
TwoFactorSetupForm,
TwoFactorVerifyCodeForm,
VerifyForm,
unique_identity_attribute,
)
from .mail_util import MailUtil
from .password_util import PasswordUtil
from .phone_util import PhoneUtil
from .signals import (
confirm_instructions_sent,
login_instructions_sent,
password_changed,
password_reset,
reset_password_instructions_sent,
tf_code_confirmed,
tf_profile_changed,
tf_security_token_sent,
tf_disabled,
user_authenticated,
user_confirmed,
user_registered,
us_security_token_sent,
us_profile_changed,
)
from .totp import Totp
from .twofactor import tf_send_security_token
from .unified_signin import (
UnifiedSigninForm,
UnifiedSigninSetupForm,
UnifiedSigninSetupValidateForm,
UnifiedVerifyForm,
us_send_security_token,
)
from .username_util import UsernameUtil
from .utils import (
FsJsonEncoder,
SmsSenderBaseClass,
SmsSenderFactory,
check_and_get_token_status,
get_hmac,
get_request_attr,
get_token_status,
get_url,
hash_password,
check_and_update_authn_fresh,
login_user,
logout_user,
password_breached_validator,
password_complexity_validator,
password_length_validator,
pwned,
send_mail,
transform_url,
uia_phone_mapper,
uia_email_mapper,
uia_username_mapper,
url_for_security,
verify_password,
verify_and_update_password,
)
__version__ = "4.1.2"
| 24.100917 | 77 | 0.745337 |
from .changeable import admin_change_password
from .core import Security, RoleMixin, UserMixin, AnonymousUser, current_user
from .datastore import (
UserDatastore,
SQLAlchemyUserDatastore,
MongoEngineUserDatastore,
PeeweeUserDatastore,
PonyUserDatastore,
SQLAlchemySessionUserDatastore,
)
from .decorators import (
auth_token_required,
anonymous_user_required,
handle_csrf,
http_auth_required,
login_required,
roles_accepted,
roles_required,
auth_required,
permissions_accepted,
permissions_required,
unauth_csrf,
)
from .forms import (
ChangePasswordForm,
ForgotPasswordForm,
LoginForm,
RegisterForm,
ResetPasswordForm,
PasswordlessLoginForm,
ConfirmRegisterForm,
SendConfirmationForm,
TwoFactorRescueForm,
TwoFactorSetupForm,
TwoFactorVerifyCodeForm,
VerifyForm,
unique_identity_attribute,
)
from .mail_util import MailUtil
from .password_util import PasswordUtil
from .phone_util import PhoneUtil
from .signals import (
confirm_instructions_sent,
login_instructions_sent,
password_changed,
password_reset,
reset_password_instructions_sent,
tf_code_confirmed,
tf_profile_changed,
tf_security_token_sent,
tf_disabled,
user_authenticated,
user_confirmed,
user_registered,
us_security_token_sent,
us_profile_changed,
)
from .totp import Totp
from .twofactor import tf_send_security_token
from .unified_signin import (
UnifiedSigninForm,
UnifiedSigninSetupForm,
UnifiedSigninSetupValidateForm,
UnifiedVerifyForm,
us_send_security_token,
)
from .username_util import UsernameUtil
from .utils import (
FsJsonEncoder,
SmsSenderBaseClass,
SmsSenderFactory,
check_and_get_token_status,
get_hmac,
get_request_attr,
get_token_status,
get_url,
hash_password,
check_and_update_authn_fresh,
login_user,
logout_user,
password_breached_validator,
password_complexity_validator,
password_length_validator,
pwned,
send_mail,
transform_url,
uia_phone_mapper,
uia_email_mapper,
uia_username_mapper,
url_for_security,
verify_password,
verify_and_update_password,
)
__version__ = "4.1.2"
| true | true |
f7346a70e18759da1614021a2b5e7ca0c222edb2 | 6,747 | py | Python | torchbenchmark/models/BERT_pytorch/bert_pytorch/dataset/vocab.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | 5,013 | 2018-10-16T06:02:03.000Z | 2022-03-31T11:36:18.000Z | torchbenchmark/models/BERT_pytorch/bert_pytorch/dataset/vocab.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | 81 | 2018-10-15T14:28:32.000Z | 2022-02-07T14:21:53.000Z | torchbenchmark/models/BERT_pytorch/bert_pytorch/dataset/vocab.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | 1,129 | 2018-10-17T04:01:40.000Z | 2022-03-31T15:41:14.000Z | import pickle
import tqdm
from collections import Counter
class TorchVocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
freqs: A collections.Counter object holding the frequencies of tokens
in the data used to build the Vocab.
stoi: A collections.defaultdict instance mapping token strings to
numerical identifiers.
itos: A list of token strings indexed by their numerical identifiers.
"""
def __init__(self, counter, max_size=None, min_freq=1, specials=['<pad>', '<oov>'],
vectors=None, unk_init=None, vectors_cache=None):
"""Create a Vocab object from a collections.Counter.
Arguments:
counter: collections.Counter object holding the frequencies of
each value found in the data.
max_size: The maximum size of the vocabulary, or None for no
maximum. Default: None.
min_freq: The minimum frequency needed to include a token in the
vocabulary. Values less than 1 will be set to 1. Default: 1.
specials: The list of special tokens (e.g., padding or eos) that
will be prepended to the vocabulary in addition to an <unk>
token. Default: ['<pad>']
vectors: One of either the available pretrained vectors
or custom pretrained vectors (see Vocab.load_vectors);
or a list of aforementioned vectors
unk_init (callback): by default, initialize out-of-vocabulary word vectors
to zero vectors; can be any function that takes in a Tensor and
returns a Tensor of the same size. Default: torch.Tensor.zero_
vectors_cache: directory for cached vectors. Default: '.vector_cache'
"""
self.freqs = counter
counter = counter.copy()
min_freq = max(min_freq, 1)
self.itos = list(specials)
# frequencies of special tokens are not counted when building vocabulary
# in frequency order
for tok in specials:
del counter[tok]
max_size = None if max_size is None else max_size + len(self.itos)
# sort by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if freq < min_freq or len(self.itos) == max_size:
break
self.itos.append(word)
# stoi is simply a reverse dict for itos
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
self.vectors = None
if vectors is not None:
self.load_vectors(vectors, unk_init=unk_init, cache=vectors_cache)
else:
assert unk_init is None and vectors_cache is None
def __eq__(self, other):
if self.freqs != other.freqs:
return False
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
if self.vectors != other.vectors:
return False
return True
def __len__(self):
return len(self.itos)
def vocab_rerank(self):
self.stoi = {word: i for i, word in enumerate(self.itos)}
def extend(self, v, sort=False):
words = sorted(v.itos) if sort else v.itos
for w in words:
if w not in self.stoi:
self.itos.append(w)
self.stoi[w] = len(self.itos) - 1
class Vocab(TorchVocab):
def __init__(self, counter, max_size=None, min_freq=1):
self.pad_index = 0
self.unk_index = 1
self.eos_index = 2
self.sos_index = 3
self.mask_index = 4
super().__init__(counter, specials=["<pad>", "<unk>", "<eos>", "<sos>", "<mask>"],
max_size=max_size, min_freq=min_freq)
def to_seq(self, sentece, seq_len, with_eos=False, with_sos=False) -> list:
pass
def from_seq(self, seq, join=False, with_pad=False):
pass
@staticmethod
def load_vocab(vocab_path: str) -> 'Vocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def save_vocab(self, vocab_path):
with open(vocab_path, "wb") as f:
pickle.dump(self, f)
# Building Vocab with text files
class WordVocab(Vocab):
def __init__(self, texts, max_size=None, min_freq=1):
print("Building Vocab")
counter = Counter()
for line in tqdm.tqdm(texts):
if isinstance(line, list):
words = line
else:
words = line.replace("\n", "").replace("\t", "").split()
for word in words:
counter[word] += 1
super().__init__(counter, max_size=max_size, min_freq=min_freq)
def to_seq(self, sentence, seq_len=None, with_eos=False, with_sos=False, with_len=False):
if isinstance(sentence, str):
sentence = sentence.split()
seq = [self.stoi.get(word, self.unk_index) for word in sentence]
if with_eos:
seq += [self.eos_index] # this would be index 1
if with_sos:
seq = [self.sos_index] + seq
origin_seq_len = len(seq)
if seq_len is None:
pass
elif len(seq) <= seq_len:
seq += [self.pad_index for _ in range(seq_len - len(seq))]
else:
seq = seq[:seq_len]
return (seq, origin_seq_len) if with_len else seq
def from_seq(self, seq, join=False, with_pad=False):
words = [self.itos[idx]
if idx < len(self.itos)
else "<%d>" % idx
for idx in seq
if not with_pad or idx != self.pad_index]
return " ".join(words) if join else words
@staticmethod
def load_vocab(vocab_path: str) -> 'WordVocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def build():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--corpus_path", required=True, type=str)
parser.add_argument("-o", "--output_path", required=True, type=str)
parser.add_argument("-s", "--vocab_size", type=int, default=None)
parser.add_argument("-e", "--encoding", type=str, default="utf-8")
parser.add_argument("-m", "--min_freq", type=int, default=1)
args = parser.parse_args()
with open(args.corpus_path, "r", encoding=args.encoding) as f:
vocab = WordVocab(f, max_size=args.vocab_size, min_freq=args.min_freq)
print("VOCAB SIZE:", len(vocab))
vocab.save_vocab(args.output_path)
| 36.274194 | 93 | 0.601304 | import pickle
import tqdm
from collections import Counter
class TorchVocab(object):
def __init__(self, counter, max_size=None, min_freq=1, specials=['<pad>', '<oov>'],
vectors=None, unk_init=None, vectors_cache=None):
self.freqs = counter
counter = counter.copy()
min_freq = max(min_freq, 1)
self.itos = list(specials)
for tok in specials:
del counter[tok]
max_size = None if max_size is None else max_size + len(self.itos)
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if freq < min_freq or len(self.itos) == max_size:
break
self.itos.append(word)
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
self.vectors = None
if vectors is not None:
self.load_vectors(vectors, unk_init=unk_init, cache=vectors_cache)
else:
assert unk_init is None and vectors_cache is None
def __eq__(self, other):
if self.freqs != other.freqs:
return False
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
if self.vectors != other.vectors:
return False
return True
def __len__(self):
return len(self.itos)
def vocab_rerank(self):
self.stoi = {word: i for i, word in enumerate(self.itos)}
def extend(self, v, sort=False):
words = sorted(v.itos) if sort else v.itos
for w in words:
if w not in self.stoi:
self.itos.append(w)
self.stoi[w] = len(self.itos) - 1
class Vocab(TorchVocab):
def __init__(self, counter, max_size=None, min_freq=1):
self.pad_index = 0
self.unk_index = 1
self.eos_index = 2
self.sos_index = 3
self.mask_index = 4
super().__init__(counter, specials=["<pad>", "<unk>", "<eos>", "<sos>", "<mask>"],
max_size=max_size, min_freq=min_freq)
def to_seq(self, sentece, seq_len, with_eos=False, with_sos=False) -> list:
pass
def from_seq(self, seq, join=False, with_pad=False):
pass
@staticmethod
def load_vocab(vocab_path: str) -> 'Vocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def save_vocab(self, vocab_path):
with open(vocab_path, "wb") as f:
pickle.dump(self, f)
class WordVocab(Vocab):
def __init__(self, texts, max_size=None, min_freq=1):
print("Building Vocab")
counter = Counter()
for line in tqdm.tqdm(texts):
if isinstance(line, list):
words = line
else:
words = line.replace("\n", "").replace("\t", "").split()
for word in words:
counter[word] += 1
super().__init__(counter, max_size=max_size, min_freq=min_freq)
def to_seq(self, sentence, seq_len=None, with_eos=False, with_sos=False, with_len=False):
if isinstance(sentence, str):
sentence = sentence.split()
seq = [self.stoi.get(word, self.unk_index) for word in sentence]
if with_eos:
seq += [self.eos_index]
if with_sos:
seq = [self.sos_index] + seq
origin_seq_len = len(seq)
if seq_len is None:
pass
elif len(seq) <= seq_len:
seq += [self.pad_index for _ in range(seq_len - len(seq))]
else:
seq = seq[:seq_len]
return (seq, origin_seq_len) if with_len else seq
def from_seq(self, seq, join=False, with_pad=False):
words = [self.itos[idx]
if idx < len(self.itos)
else "<%d>" % idx
for idx in seq
if not with_pad or idx != self.pad_index]
return " ".join(words) if join else words
@staticmethod
def load_vocab(vocab_path: str) -> 'WordVocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def build():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--corpus_path", required=True, type=str)
parser.add_argument("-o", "--output_path", required=True, type=str)
parser.add_argument("-s", "--vocab_size", type=int, default=None)
parser.add_argument("-e", "--encoding", type=str, default="utf-8")
parser.add_argument("-m", "--min_freq", type=int, default=1)
args = parser.parse_args()
with open(args.corpus_path, "r", encoding=args.encoding) as f:
vocab = WordVocab(f, max_size=args.vocab_size, min_freq=args.min_freq)
print("VOCAB SIZE:", len(vocab))
vocab.save_vocab(args.output_path)
| true | true |
f7346b6921603b2a7215f45fff0ce7b1c912e958 | 486 | py | Python | 14_Lesson14/test_marking/test_example1.py | turovod/Otus | 57433c6944bca155177b07ff361139ff30f7f692 | [
"MIT"
] | null | null | null | 14_Lesson14/test_marking/test_example1.py | turovod/Otus | 57433c6944bca155177b07ff361139ff30f7f692 | [
"MIT"
] | null | null | null | 14_Lesson14/test_marking/test_example1.py | turovod/Otus | 57433c6944bca155177b07ff361139ff30f7f692 | [
"MIT"
] | null | null | null | import pytest
@pytest.mark.webtest
def test_send_http():
print('========== Hello *********************************')
assert True
def test_something_quick():
pass
def test_another():
pass
class TestClass(object):
def test_method(self):
pass
# Run marked tests
# pytest -v -m webtest
# pytest -v -m "not webtest"
# Using -k expr to select tests based on their name
# pytest -v -k http
# pytest -k "not send_http" -v
# pytest -k "http or quick" -v
#
| 15.677419 | 63 | 0.598765 | import pytest
@pytest.mark.webtest
def test_send_http():
print('========== Hello *********************************')
assert True
def test_something_quick():
pass
def test_another():
pass
class TestClass(object):
def test_method(self):
pass
| true | true |
f7346b86ad9084de9d0cfc75d3a59e51f223fb80 | 1,905 | py | Python | dashboard/documents/models.py | yakky/channeled-dashboard | b78df31e2c46e04e3cae002329bc0d82516e9dfc | [
"BSD-3-Clause"
] | 19 | 2018-02-16T11:36:12.000Z | 2020-04-08T21:04:43.000Z | dashboard/documents/models.py | yakky/channeled-dashboard | b78df31e2c46e04e3cae002329bc0d82516e9dfc | [
"BSD-3-Clause"
] | 2 | 2018-02-25T07:04:49.000Z | 2018-05-19T13:30:16.000Z | dashboard/documents/models.py | yakky/channeled-dashboard | b78df31e2c46e04e3cae002329bc0d82516e9dfc | [
"BSD-3-Clause"
] | 4 | 2018-04-20T12:45:13.000Z | 2018-12-29T00:30:49.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from ckeditor.fields import RichTextField
from django.conf import settings
from django.db import models
from django.db.models import ImageField
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from knocker.mixins import KnockerModel
from meta.models import ModelMeta
from model_utils.models import TimeStampedModel
@python_2_unicode_compatible
class Document(KnockerModel, ModelMeta, TimeStampedModel):
class Status(object):
read = 'read'
update = 'update'
list = 'documents'
title = models.CharField(_('title'), max_length=767)
slug = models.SlugField(_('slug'), max_length=767, unique=True)
image = ImageField(_('image'), null=True)
abstract = RichTextField(_('abstract'), null=True)
text = RichTextField(_('content'), null=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), null=True, on_delete=models.PROTECT)
class Meta:
verbose_name = _('document')
verbose_name_plural = _('documents')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('document-detail', kwargs={'slug': self.slug})
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
return super(Document, self).save(*args, **kwargs)
@staticmethod
def group(slug, phase):
return f'documents-{slug}-{phase}'
@staticmethod
def cache_key(slug, phase, **kwargs):
return f'documents-{slug}-{phase}'
def get_knocker_icon(self):
return self.image.url
def get_knocker_language(self):
return 'en'
def get_knocker_message(self):
return self.title
| 31.229508 | 119 | 0.707612 |
from __future__ import absolute_import, print_function, unicode_literals
from ckeditor.fields import RichTextField
from django.conf import settings
from django.db import models
from django.db.models import ImageField
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from knocker.mixins import KnockerModel
from meta.models import ModelMeta
from model_utils.models import TimeStampedModel
@python_2_unicode_compatible
class Document(KnockerModel, ModelMeta, TimeStampedModel):
class Status(object):
read = 'read'
update = 'update'
list = 'documents'
title = models.CharField(_('title'), max_length=767)
slug = models.SlugField(_('slug'), max_length=767, unique=True)
image = ImageField(_('image'), null=True)
abstract = RichTextField(_('abstract'), null=True)
text = RichTextField(_('content'), null=True)
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), null=True, on_delete=models.PROTECT)
class Meta:
verbose_name = _('document')
verbose_name_plural = _('documents')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('document-detail', kwargs={'slug': self.slug})
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
return super(Document, self).save(*args, **kwargs)
@staticmethod
def group(slug, phase):
return f'documents-{slug}-{phase}'
@staticmethod
def cache_key(slug, phase, **kwargs):
return f'documents-{slug}-{phase}'
def get_knocker_icon(self):
return self.image.url
def get_knocker_language(self):
return 'en'
def get_knocker_message(self):
return self.title
| true | true |
f7346c299948070a8b602bfe7035fed5598063f3 | 450 | py | Python | django_file_form/migrations/0007_auto_20210119_0104.py | kosior/django-file-form | bc9ae606f55b297254e945c856d4878e45408af0 | [
"Apache-2.0"
] | 133 | 2015-01-22T09:01:58.000Z | 2022-03-24T07:24:52.000Z | django_file_form/migrations/0007_auto_20210119_0104.py | kosior/django-file-form | bc9ae606f55b297254e945c856d4878e45408af0 | [
"Apache-2.0"
] | 371 | 2015-01-14T01:34:45.000Z | 2022-03-23T12:16:01.000Z | django_file_form/migrations/0007_auto_20210119_0104.py | kosior/django-file-form | bc9ae606f55b297254e945c856d4878e45408af0 | [
"Apache-2.0"
] | 42 | 2015-01-21T11:24:42.000Z | 2022-03-22T12:10:01.000Z | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("django_file_form", "0006_auto_20200501_0908"),
]
operations = [
migrations.RenameModel(
new_name="TemporaryUploadedFile",
old_name="UploadedFile",
),
migrations.AlterModelTable(
name="TemporaryUploadedFile",
table="django_file_form_uploadedfile",
),
]
| 23.684211 | 56 | 0.613333 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("django_file_form", "0006_auto_20200501_0908"),
]
operations = [
migrations.RenameModel(
new_name="TemporaryUploadedFile",
old_name="UploadedFile",
),
migrations.AlterModelTable(
name="TemporaryUploadedFile",
table="django_file_form_uploadedfile",
),
]
| true | true |
f7346c42994a39187ca1937fec6e475095c90ecc | 3,029 | py | Python | src/fewie/dataset_processors/roberta.py | DFKI-NLP/fewie | a47d2a02bc51cf951fc294de43bdb7557bc574b4 | [
"MIT"
] | null | null | null | src/fewie/dataset_processors/roberta.py | DFKI-NLP/fewie | a47d2a02bc51cf951fc294de43bdb7557bc574b4 | [
"MIT"
] | null | null | null | src/fewie/dataset_processors/roberta.py | DFKI-NLP/fewie | a47d2a02bc51cf951fc294de43bdb7557bc574b4 | [
"MIT"
] | null | null | null | import random
from typing import Dict, List, Union
from transformers import AutoTokenizer
import datasets
from fewie.dataset_processors.processor import DatasetProcessor
class RobertaProcessor(DatasetProcessor):
def __init__(
self,
tokenizer_name_or_path: str,
text_column_name: str,
label_column_name: str,
label_to_id: Dict[str, int],
max_length: int = 128,
label_all_tokens: bool = False,
padding: str = "max_length",
add_prefix_space: bool = True,
) -> None:
self.tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, add_prefix_space=True
)
self.text_column_name = text_column_name
self.label_column_name = label_column_name
self.label_to_id = label_to_id
self.max_length = max_length
self.label_all_tokens = label_all_tokens
self.padding = padding
self.prefix_space = add_prefix_space
@property
def feature_columns(self) -> List[str]:
return ["input_ids", "attention_mask", "labels"]
def __call__(
self, dataset: Union[datasets.Dataset, datasets.DatasetDict]
) -> Union[datasets.Dataset, datasets.DatasetDict]:
return dataset.map(self.tokenize_and_align_labels, batched=True)
def tokenize_and_align_labels(self, examples):
tokenized_inputs = self.tokenizer(
examples[self.text_column_name],
padding=self.padding,
max_length=self.max_length,
truncation=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
# roberta requirement:
# add_prefix_space=True
)
labels = []
for i, label in enumerate(examples[self.label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(self.label_to_id[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(
self.label_to_id[label[word_idx]]
if self.label_all_tokens
else -100
)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
| 37.8625 | 116 | 0.611753 | import random
from typing import Dict, List, Union
from transformers import AutoTokenizer
import datasets
from fewie.dataset_processors.processor import DatasetProcessor
class RobertaProcessor(DatasetProcessor):
def __init__(
self,
tokenizer_name_or_path: str,
text_column_name: str,
label_column_name: str,
label_to_id: Dict[str, int],
max_length: int = 128,
label_all_tokens: bool = False,
padding: str = "max_length",
add_prefix_space: bool = True,
) -> None:
self.tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path, add_prefix_space=True
)
self.text_column_name = text_column_name
self.label_column_name = label_column_name
self.label_to_id = label_to_id
self.max_length = max_length
self.label_all_tokens = label_all_tokens
self.padding = padding
self.prefix_space = add_prefix_space
@property
def feature_columns(self) -> List[str]:
return ["input_ids", "attention_mask", "labels"]
def __call__(
self, dataset: Union[datasets.Dataset, datasets.DatasetDict]
) -> Union[datasets.Dataset, datasets.DatasetDict]:
return dataset.map(self.tokenize_and_align_labels, batched=True)
def tokenize_and_align_labels(self, examples):
tokenized_inputs = self.tokenizer(
examples[self.text_column_name],
padding=self.padding,
max_length=self.max_length,
truncation=True,
is_split_into_words=True,
)
labels = []
for i, label in enumerate(examples[self.label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
label_ids.append(self.label_to_id[label[word_idx]])
else:
label_ids.append(
self.label_to_id[label[word_idx]]
if self.label_all_tokens
else -100
)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
| true | true |
f7346cbff5dcac90a09a02cd98fb63b2ddd48424 | 1,459 | py | Python | examples/src/python/bolt/half_ack_bolt.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 2 | 2016-07-04T07:10:31.000Z | 2018-03-28T16:59:02.000Z | examples/src/python/bolt/half_ack_bolt.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 1 | 2019-05-08T22:30:16.000Z | 2019-05-08T22:30:16.000Z | examples/src/python/bolt/half_ack_bolt.py | Munyola/incubator-heron | 4aa106c6eaef9c60ed2d692e41998adda8115e6f | [
"Apache-2.0"
] | 1 | 2017-06-05T17:55:45.000Z | 2017-06-05T17:55:45.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''half ack bolt'''
from heronpy.api.bolt.bolt import Bolt
class HalfAckBolt(Bolt):
"""Half of data tuples will be acked and the other half will be failed"""
# pylint: disable=unused-argument
def initialize(self, config, context):
self.total = 0
def process(self, tup):
self.total += 1
if self.total % 2 == 0:
self.logger.debug("Failing a tuple: %s" % str(tup))
self.fail(tup)
else:
self.logger.debug("Acking a tuple: %s" % str(tup))
self.ack(tup)
def process_tick(self, tup):
self.log("Got tick tuple!")
self.log("Total received: %d" % self.total)
| 34.738095 | 75 | 0.701165 |
from heronpy.api.bolt.bolt import Bolt
class HalfAckBolt(Bolt):
def initialize(self, config, context):
self.total = 0
def process(self, tup):
self.total += 1
if self.total % 2 == 0:
self.logger.debug("Failing a tuple: %s" % str(tup))
self.fail(tup)
else:
self.logger.debug("Acking a tuple: %s" % str(tup))
self.ack(tup)
def process_tick(self, tup):
self.log("Got tick tuple!")
self.log("Total received: %d" % self.total)
| true | true |
f7346e64245ed66025ffb65bc7e60868f6ab8e23 | 5,018 | py | Python | graphene_django/debug/sql/tracking.py | mebel-akvareli/graphene-django | 23008ad22094f3e7b8fb26b73811ce49b20cca25 | [
"MIT"
] | 4,038 | 2016-09-18T01:45:22.000Z | 2022-03-31T01:06:57.000Z | graphene_django/debug/sql/tracking.py | mebel-akvareli/graphene-django | 23008ad22094f3e7b8fb26b73811ce49b20cca25 | [
"MIT"
] | 1,104 | 2016-09-19T20:10:22.000Z | 2022-03-30T17:37:46.000Z | graphene_django/debug/sql/tracking.py | mebel-akvareli/graphene-django | 23008ad22094f3e7b8fb26b73811ce49b20cca25 | [
"MIT"
] | 791 | 2016-09-18T13:48:11.000Z | 2022-03-29T08:32:06.000Z | # Code obtained from django-debug-toolbar sql panel tracking
from __future__ import absolute_import, unicode_literals
import json
from threading import local
from time import time
from django.utils.encoding import force_str
from .types import DjangoDebugSQL
class SQLQueryTriggered(Exception):
"""Thrown when template panel triggers a query"""
class ThreadLocalState(local):
def __init__(self):
self.enabled = True
@property
def Wrapper(self):
if self.enabled:
return NormalCursorWrapper
return ExceptionCursorWrapper
def recording(self, v):
self.enabled = v
state = ThreadLocalState()
recording = state.recording # export function
def wrap_cursor(connection, panel):
if not hasattr(connection, "_graphene_cursor"):
connection._graphene_cursor = connection.cursor
def cursor():
return state.Wrapper(connection._graphene_cursor(), connection, panel)
connection.cursor = cursor
return cursor
def unwrap_cursor(connection):
if hasattr(connection, "_graphene_cursor"):
previous_cursor = connection._graphene_cursor
connection.cursor = previous_cursor
del connection._graphene_cursor
class ExceptionCursorWrapper(object):
"""
Wraps a cursor and raises an exception on any operation.
Used in Templates panel.
"""
def __init__(self, cursor, db, logger):
pass
def __getattr__(self, attr):
raise SQLQueryTriggered()
class NormalCursorWrapper(object):
"""
Wraps a cursor and logs queries.
"""
def __init__(self, cursor, db, logger):
self.cursor = cursor
# Instance of a BaseDatabaseWrapper subclass
self.db = db
# logger must implement a ``record`` method
self.logger = logger
def _quote_expr(self, element):
if isinstance(element, str):
return "'%s'" % force_str(element).replace("'", "''")
else:
return repr(element)
def _quote_params(self, params):
if not params:
return params
if isinstance(params, dict):
return dict((key, self._quote_expr(value)) for key, value in params.items())
return list(map(self._quote_expr, params))
def _decode(self, param):
try:
return force_str(param, strings_only=True)
except UnicodeDecodeError:
return "(encoded string)"
def _record(self, method, sql, params):
start_time = time()
try:
return method(sql, params)
finally:
stop_time = time()
duration = stop_time - start_time
_params = ""
try:
_params = json.dumps(list(map(self._decode, params)))
except Exception:
pass # object not JSON serializable
alias = getattr(self.db, "alias", "default")
conn = self.db.connection
vendor = getattr(conn, "vendor", "unknown")
params = {
"vendor": vendor,
"alias": alias,
"sql": self.db.ops.last_executed_query(
self.cursor, sql, self._quote_params(params)
),
"duration": duration,
"raw_sql": sql,
"params": _params,
"start_time": start_time,
"stop_time": stop_time,
"is_slow": duration > 10,
"is_select": sql.lower().strip().startswith("select"),
}
if vendor == "postgresql":
# If an erroneous query was ran on the connection, it might
# be in a state where checking isolation_level raises an
# exception.
try:
iso_level = conn.isolation_level
except conn.InternalError:
iso_level = "unknown"
params.update(
{
"trans_id": self.logger.get_transaction_id(alias),
"trans_status": conn.get_transaction_status(),
"iso_level": iso_level,
"encoding": conn.encoding,
}
)
_sql = DjangoDebugSQL(**params)
# We keep `sql` to maintain backwards compatibility
self.logger.object.sql.append(_sql)
def callproc(self, procname, params=None):
return self._record(self.cursor.callproc, procname, params)
def execute(self, sql, params=None):
return self._record(self.cursor.execute, sql, params)
def executemany(self, sql, param_list):
return self._record(self.cursor.executemany, sql, param_list)
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| 29.517647 | 88 | 0.585293 |
from __future__ import absolute_import, unicode_literals
import json
from threading import local
from time import time
from django.utils.encoding import force_str
from .types import DjangoDebugSQL
class SQLQueryTriggered(Exception):
class ThreadLocalState(local):
def __init__(self):
self.enabled = True
@property
def Wrapper(self):
if self.enabled:
return NormalCursorWrapper
return ExceptionCursorWrapper
def recording(self, v):
self.enabled = v
state = ThreadLocalState()
recording = state.recording
def wrap_cursor(connection, panel):
if not hasattr(connection, "_graphene_cursor"):
connection._graphene_cursor = connection.cursor
def cursor():
return state.Wrapper(connection._graphene_cursor(), connection, panel)
connection.cursor = cursor
return cursor
def unwrap_cursor(connection):
if hasattr(connection, "_graphene_cursor"):
previous_cursor = connection._graphene_cursor
connection.cursor = previous_cursor
del connection._graphene_cursor
class ExceptionCursorWrapper(object):
def __init__(self, cursor, db, logger):
pass
def __getattr__(self, attr):
raise SQLQueryTriggered()
class NormalCursorWrapper(object):
def __init__(self, cursor, db, logger):
self.cursor = cursor
self.db = db
self.logger = logger
def _quote_expr(self, element):
if isinstance(element, str):
return "'%s'" % force_str(element).replace("'", "''")
else:
return repr(element)
def _quote_params(self, params):
if not params:
return params
if isinstance(params, dict):
return dict((key, self._quote_expr(value)) for key, value in params.items())
return list(map(self._quote_expr, params))
def _decode(self, param):
try:
return force_str(param, strings_only=True)
except UnicodeDecodeError:
return "(encoded string)"
def _record(self, method, sql, params):
start_time = time()
try:
return method(sql, params)
finally:
stop_time = time()
duration = stop_time - start_time
_params = ""
try:
_params = json.dumps(list(map(self._decode, params)))
except Exception:
pass # object not JSON serializable
alias = getattr(self.db, "alias", "default")
conn = self.db.connection
vendor = getattr(conn, "vendor", "unknown")
params = {
"vendor": vendor,
"alias": alias,
"sql": self.db.ops.last_executed_query(
self.cursor, sql, self._quote_params(params)
),
"duration": duration,
"raw_sql": sql,
"params": _params,
"start_time": start_time,
"stop_time": stop_time,
"is_slow": duration > 10,
"is_select": sql.lower().strip().startswith("select"),
}
if vendor == "postgresql":
# If an erroneous query was ran on the connection, it might
# be in a state where checking isolation_level raises an
# exception.
try:
iso_level = conn.isolation_level
except conn.InternalError:
iso_level = "unknown"
params.update(
{
"trans_id": self.logger.get_transaction_id(alias),
"trans_status": conn.get_transaction_status(),
"iso_level": iso_level,
"encoding": conn.encoding,
}
)
_sql = DjangoDebugSQL(**params)
# We keep `sql` to maintain backwards compatibility
self.logger.object.sql.append(_sql)
def callproc(self, procname, params=None):
return self._record(self.cursor.callproc, procname, params)
def execute(self, sql, params=None):
return self._record(self.cursor.execute, sql, params)
def executemany(self, sql, param_list):
return self._record(self.cursor.executemany, sql, param_list)
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| true | true |
f7346ed754373bd37ef39e959ebaa2ff7c5c5a13 | 16,988 | py | Python | spire/mesh/controllers.py | siq/spire | 6365590277e9a6bfb6e4e0df5b2b47dba0f71711 | [
"Linux-OpenIB"
] | null | null | null | spire/mesh/controllers.py | siq/spire | 6365590277e9a6bfb6e4e0df5b2b47dba0f71711 | [
"Linux-OpenIB"
] | 1 | 2016-09-15T16:19:27.000Z | 2016-09-15T16:20:06.000Z | spire/mesh/controllers.py | siq/spire | 6365590277e9a6bfb6e4e0df5b2b47dba0f71711 | [
"Linux-OpenIB"
] | null | null | null | import re
from mesh.constants import OK, RETURNING
from mesh.exceptions import GoneError, NotFoundError
from mesh.standard import Controller
from sqlalchemy.sql import asc, column, desc, func, literal_column, not_, select
from spire.core import Configurable, Unit
from spire.schema import NoResultFound
__all__ = ('ModelController', 'ProxyController', 'UnitController', 'support_returning')
EMPTY = []
class FieldFilter(object):
def __init__(self, controller, data):
if not data:
self.fields = controller.default_fields
return
if 'fields' in data:
self.fields = set(data['fields'])
return
self.fields = controller.default_fields.copy()
if 'include' in data:
self.fields.update(data['include'])
if 'exclude' in data:
self.fields.difference_update(data['exclude'])
def __contains__(self, field):
return (field in self.fields)
class FilterOperators(object):
WILDCARD_EXPR = re.compile(r'([%_])')
def equal_op(self, query, column, value):
return query.filter(column == value)
def iequal_op(self, query, column, value):
return query.filter(func.lower(column) == value)
def not_op(self, query, column, value):
return query.filter(column != value)
def inot_op(self, query, column, value):
return query.filter(func.lower(column) != value)
def prefix_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.like(value + '%'))
def iprefix_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.ilike(value + '%'))
def suffix_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.like('%' + value))
def isuffix_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.ilike('%' + value))
def contains_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.like('%' + value + '%'))
def icontains_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.ilike('%' + value + '%'))
def gt_op(self, query, column, value):
return query.filter(column > value)
def gte_op(self, query, column, value):
return query.filter(column >= value)
def lt_op(self, query, column, value):
return query.filter(column < value)
def lte_op(self, query, column, value):
return query.filter(column <= value)
def null_op(self, query, column, value):
if value:
return query.filter(column == None)
else:
return query.filter(column != None)
def in_op(self, query, column, value):
return query.filter(column.in_(value))
def notin_op(self, query, column, value):
return query.filter(not_(column.in_(value)))
def parse_attr_mapping(mapping):
if isinstance(mapping, basestring):
mapping = mapping.split(' ')
if isinstance(mapping, (list, tuple)):
pairs = {}
for pair in mapping:
if isinstance(pair, (list, tuple)):
pairs[pair[0]] = pair[1]
else:
pairs[pair] = pair
mapping = pairs
return mapping
class ModelController(Unit, Controller):
"""A mesh controller for spire.schema models."""
default_fields = None
schema = None
mapping = None
model = None
polymorphic_mapping = None
polymorphic_on = None
operators = FilterOperators()
@classmethod
def __construct__(cls):
Controller.__construct__()
if cls.resource:
cls._composite_key = cls.resource.composite_key
cls._id_field = cls.resource.id_field.name
cls.default_fields = set()
for name, field in cls.resource.schema.iteritems():
if field.is_identifier or not field.deferred:
cls.default_fields.add(name)
attr = cls.polymorphic_on
if attr and not isinstance(attr, tuple):
cls.polymorphic_on = (attr, attr)
mapping = cls.polymorphic_mapping
if mapping:
for identity, submapping in mapping.items():
mapping[identity] = parse_attr_mapping(submapping)
else:
mapping = cls.mapping
if mapping is None:
mapping = cls.resource.filter_schema().keys()
cls.mapping = parse_attr_mapping(mapping)
def acquire(self, subject):
if self._composite_key:
try:
subject = subject.split(';')
except Exception:
return None
try:
return self.schema.session.query(self.model).get(subject)
except NoResultFound:
return None
def create(self, request, response, subject, data):
returning = data.pop(RETURNING, None)
instance = self.model.polymorphic_create(self._construct_model(data))
self._annotate_model(request, instance, data)
self.schema.session.add(instance)
self.schema.session.commit()
response(self._construct_returning(instance, returning))
def delete(self, request, response, subject, data):
subject.session.delete(subject)
subject.session.commit()
response({'id': self._get_id_value(subject)})
def get(self, request, response, subject, data):
resource = self._construct_resource(request, subject, data)
self._annotate_resources(request, [resource], data)
response(resource)
def load(self, request, response, subject, data):
candidates = data['identifiers']
if not candidates:
return response([])
identifiers = []
for i, identifier in enumerate(candidates):
identifiers.append("(%d, '%s')" % (i, str(identifier)))
expr = select([column('rank'), column('id')],
from_obj="(values %s) as subset(rank, id)" % ', '.join(identifiers))
query = self._annotate_query(
request, self.schema.session.query(self.model), data)
query = (query
.join(expr.cte('__subset__'),
literal_column('__subset__.id')==self.model.id)
.order_by(literal_column('__subset__.rank')))
resources = []
instances = list(query.all())
instance = (instances.pop(0) if instances else None)
for id in candidates:
if instance:
if instance.id == id:
resources.append(self._construct_resource(request, instance, data))
if instances:
instance = instances.pop(0)
else:
instance = None
else:
resources.append(None)
else:
resources.append(None)
self._annotate_resources(request, resources, data)
response(resources)
def put(self, request, response, subject, data):
if subject:
self.update(request, response, subject, data)
else:
data['id'] = request.subject
self.create(request, response, subject, data)
def query(self, request, response, subject, data):
data = data or {}
query = self.schema.session.query(self.model)
filters = data.get('query')
if filters:
query = self._construct_filters(query, filters)
query = self._annotate_query(request, query, data)
total = query.count()
if data.get('total'):
return response({'total': total})
if 'sort' in data:
query = self._construct_sorting(query, data['sort'])
if 'limit' in data:
query = query.limit(data['limit'])
if 'offset' in data:
query = query.offset(data['offset'])
resources = []
for instance in query.all():
resources.append(self._construct_resource(request, instance, data))
self._annotate_resources(request, resources, data)
response({'total': total, 'resources': resources})
def update(self, request, response, subject, data):
returning = data.pop(RETURNING, None)
if data:
subject.update_with_mapping(self._construct_model(data))
self._annotate_model(request, subject, data)
subject.session.commit()
response(self._construct_returning(subject, returning))
def _annotate_filter(self, query, filter, value):
pass
def _annotate_model(self, request, model, data):
pass
def _annotate_query(self, request, query, data):
return query
def _annotate_resource(self, request, model, resource, data):
pass
def _annotate_resources(self, request, resources, data):
pass
def _construct_filters(self, query, filters):
model = self.model
mapping = self._get_mapping(model)
operators = self.operators
for filter, value in filters.iteritems():
annotation = self._annotate_filter(query, filter, value)
if annotation:
query = annotation
continue
attr, operator = filter, 'equal'
if '__' in filter:
attr, operator = filter.rsplit('__', 1)
column = getattr(model, mapping[attr])
if not column:
continue
constructor = getattr(operators, operator + '_op')
query = constructor(query, column, value)
return query
def _construct_model(self, data):
mapping = self.mapping
if self.polymorphic_on:
mapping = self.polymorphic_mapping[data[self.polymorphic_on[0]]]
model = {}
for name, attr in mapping.iteritems():
if name in data:
model[attr] = data[name]
if self._composite_key and 'id' in model:
del model['id']
return model
def _construct_resource(self, request, model, data, **resource):
mapping = self._get_mapping(model)
fields = FieldFilter(self, data)
resource['id'] = self._get_id_value(model)
for name, attr in mapping.iteritems():
if name in fields and name != 'id':
try:
resource[name] = getattr(model, attr)
except AttributeError:
pass
self._annotate_resource(request, model, resource, data)
return resource
def _construct_returning(self, model, returning, response=None):
if response is None:
response = {}
if 'id' not in response:
response['id'] = self._get_id_value(model)
if returning:
mapping = self._get_mapping(model)
for name in returning:
if name in mapping and name not in response:
try:
response[name] = getattr(model, mapping[name])
except AttributeError:
pass
return response
def _construct_sorting(self, query, sorting):
model = self.model
mapping = self._get_mapping(model)
columns = []
for attr in sorting:
direction = asc
if attr[-1] == '+':
attr = attr[:-1]
elif attr[-1] == '-':
attr = attr[:-1]
direction = desc
column = getattr(model, mapping[attr])
if not column:
continue
columns.append(direction(column))
return query.order_by(*columns)
def _get_id_value(self, model):
mapping = self._get_mapping(model)
if self._composite_key:
values = []
for key in self._composite_key:
values.append(getattr(model, mapping[key]))
return ';'.join(values)
else:
return getattr(model, mapping['id'])
def _get_mapping(self, model):
if self.polymorphic_on:
identity = getattr(model, self.polymorphic_on[1])
return self.polymorphic_mapping[identity]
else:
return self.mapping
def support_returning(method):
def wrapper(self, request, response, subject, data):
returning = data.pop(RETURNING, None)
model = method(self, request, response, subject, data)
response(self._construct_returning(model, returning))
return wrapper
class ProxyController(Unit, Controller):
"""A mesh controller for mesh proxy models."""
proxy_model = None
mapping = None
@classmethod
def __construct__(cls):
Controller.__construct__()
if cls.resource:
mapping = cls.mapping
if mapping is None:
mapping = cls.resource.filter_schema.keys()
cls.mapping = parse_attr_mapping(mapping)
cls.id_field = cls.resource.id_field
def acquire(self, subject):
try:
return self.proxy_model.get(subject)
except GoneError:
return None
def create(self, request, response, subject, data):
proxy_model = self._construct_proxy_model(data)
self._annotate_proxy_model(request, proxy_model, data)
subject = self.proxy_model.create(proxy_model)
id_field = self.id_field
response({id_field: self._get_proxy_model_value(subject, id_field)})
def delete(self, request, response, subject, data):
subject.destroy()
id_field = self.id_field
response({id_field: self._get_proxy_model_value(subject, id_field)})
def get(self, request, response, subject, data):
resource = self._construct_resource(request, subject, data)
self._annotate_resource(request, resource, subject, data)
response(self._prune_resource(resource, data))
def put(self, request, response, subject, data):
if subject:
self.update(request, response, subject, data)
else:
data[self.id_field] = request.subject
self.create(request, response, subject, data)
def query(self, request, response, subject, data):
data = data or {}
if 'query' in data:
data['query'] = self._construct_filter(data['query'])
try:
query_results = self.proxy_model.query(**data).all()
except NotFoundError:
query_results = []
total = 0
status = OK
else:
total = query_results.total
status = query_results.status
resources = []
for result in query_results:
resource = self._construct_resource(request, result, data)
self._annotate_resource(request, resource, result, data)
resources.append(self._prune_resource(resource, data))
response(status=status,
content={'resources': resources, 'total': total})
def update(self, request, response, subject, data):
if data:
proxy_data = self._construct_proxy_model(data)
self._annotate_proxy_model(request, proxy_data, data)
subject.update(proxy_data)
id_field = self.id_field
response({id_field: self._get_proxy_model_value(subject, id_field)})
def _construct_filter(self, filters):
mapping = self.mapping
subject_filters = {}
for filter_operand, value in filters.iteritems():
filter_operands = filter_operand.rsplit('__', 1)
filter_operands[0] = mapping[filter_operands[0]]
subject_filters['__'.join(filter_operands)] = value
return subject_filters
def _construct_resource(self, request, subject, data):
resource = {}
for res_field, proxy_field in self.mapping.iteritems():
try:
resource[res_field] = getattr(subject, proxy_field)
except AttributeError:
continue
return resource
def _construct_proxy_model(self, data):
subject = {}
mapping = self.mapping
for field, value in data.iteritems():
subject_field = mapping[field]
subject[subject_field] = value
return subject
def _get_proxy_model_value(self, subject, field):
model_field = self.mapping[field]
return getattr(subject, model_field)
def _annotate_resource(self, request, resource, proxy_model, data):
pass
def _annotate_proxy_model(self, request, proxy_data, data):
pass
class UnitController(Unit, Controller, Configurable):
"""A generic unit controller."""
| 33.050584 | 87 | 0.597598 | import re
from mesh.constants import OK, RETURNING
from mesh.exceptions import GoneError, NotFoundError
from mesh.standard import Controller
from sqlalchemy.sql import asc, column, desc, func, literal_column, not_, select
from spire.core import Configurable, Unit
from spire.schema import NoResultFound
__all__ = ('ModelController', 'ProxyController', 'UnitController', 'support_returning')
EMPTY = []
class FieldFilter(object):
def __init__(self, controller, data):
if not data:
self.fields = controller.default_fields
return
if 'fields' in data:
self.fields = set(data['fields'])
return
self.fields = controller.default_fields.copy()
if 'include' in data:
self.fields.update(data['include'])
if 'exclude' in data:
self.fields.difference_update(data['exclude'])
def __contains__(self, field):
return (field in self.fields)
class FilterOperators(object):
WILDCARD_EXPR = re.compile(r'([%_])')
def equal_op(self, query, column, value):
return query.filter(column == value)
def iequal_op(self, query, column, value):
return query.filter(func.lower(column) == value)
def not_op(self, query, column, value):
return query.filter(column != value)
def inot_op(self, query, column, value):
return query.filter(func.lower(column) != value)
def prefix_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.like(value + '%'))
def iprefix_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.ilike(value + '%'))
def suffix_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.like('%' + value))
def isuffix_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.ilike('%' + value))
def contains_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.like('%' + value + '%'))
def icontains_op(self, query, column, value):
value = self.WILDCARD_EXPR.sub(r'\\\1', value)
return query.filter(column.ilike('%' + value + '%'))
def gt_op(self, query, column, value):
return query.filter(column > value)
def gte_op(self, query, column, value):
return query.filter(column >= value)
def lt_op(self, query, column, value):
return query.filter(column < value)
def lte_op(self, query, column, value):
return query.filter(column <= value)
def null_op(self, query, column, value):
if value:
return query.filter(column == None)
else:
return query.filter(column != None)
def in_op(self, query, column, value):
return query.filter(column.in_(value))
def notin_op(self, query, column, value):
return query.filter(not_(column.in_(value)))
def parse_attr_mapping(mapping):
if isinstance(mapping, basestring):
mapping = mapping.split(' ')
if isinstance(mapping, (list, tuple)):
pairs = {}
for pair in mapping:
if isinstance(pair, (list, tuple)):
pairs[pair[0]] = pair[1]
else:
pairs[pair] = pair
mapping = pairs
return mapping
class ModelController(Unit, Controller):
default_fields = None
schema = None
mapping = None
model = None
polymorphic_mapping = None
polymorphic_on = None
operators = FilterOperators()
@classmethod
def __construct__(cls):
Controller.__construct__()
if cls.resource:
cls._composite_key = cls.resource.composite_key
cls._id_field = cls.resource.id_field.name
cls.default_fields = set()
for name, field in cls.resource.schema.iteritems():
if field.is_identifier or not field.deferred:
cls.default_fields.add(name)
attr = cls.polymorphic_on
if attr and not isinstance(attr, tuple):
cls.polymorphic_on = (attr, attr)
mapping = cls.polymorphic_mapping
if mapping:
for identity, submapping in mapping.items():
mapping[identity] = parse_attr_mapping(submapping)
else:
mapping = cls.mapping
if mapping is None:
mapping = cls.resource.filter_schema().keys()
cls.mapping = parse_attr_mapping(mapping)
def acquire(self, subject):
if self._composite_key:
try:
subject = subject.split(';')
except Exception:
return None
try:
return self.schema.session.query(self.model).get(subject)
except NoResultFound:
return None
def create(self, request, response, subject, data):
returning = data.pop(RETURNING, None)
instance = self.model.polymorphic_create(self._construct_model(data))
self._annotate_model(request, instance, data)
self.schema.session.add(instance)
self.schema.session.commit()
response(self._construct_returning(instance, returning))
def delete(self, request, response, subject, data):
subject.session.delete(subject)
subject.session.commit()
response({'id': self._get_id_value(subject)})
def get(self, request, response, subject, data):
resource = self._construct_resource(request, subject, data)
self._annotate_resources(request, [resource], data)
response(resource)
def load(self, request, response, subject, data):
candidates = data['identifiers']
if not candidates:
return response([])
identifiers = []
for i, identifier in enumerate(candidates):
identifiers.append("(%d, '%s')" % (i, str(identifier)))
expr = select([column('rank'), column('id')],
from_obj="(values %s) as subset(rank, id)" % ', '.join(identifiers))
query = self._annotate_query(
request, self.schema.session.query(self.model), data)
query = (query
.join(expr.cte('__subset__'),
literal_column('__subset__.id')==self.model.id)
.order_by(literal_column('__subset__.rank')))
resources = []
instances = list(query.all())
instance = (instances.pop(0) if instances else None)
for id in candidates:
if instance:
if instance.id == id:
resources.append(self._construct_resource(request, instance, data))
if instances:
instance = instances.pop(0)
else:
instance = None
else:
resources.append(None)
else:
resources.append(None)
self._annotate_resources(request, resources, data)
response(resources)
def put(self, request, response, subject, data):
if subject:
self.update(request, response, subject, data)
else:
data['id'] = request.subject
self.create(request, response, subject, data)
def query(self, request, response, subject, data):
data = data or {}
query = self.schema.session.query(self.model)
filters = data.get('query')
if filters:
query = self._construct_filters(query, filters)
query = self._annotate_query(request, query, data)
total = query.count()
if data.get('total'):
return response({'total': total})
if 'sort' in data:
query = self._construct_sorting(query, data['sort'])
if 'limit' in data:
query = query.limit(data['limit'])
if 'offset' in data:
query = query.offset(data['offset'])
resources = []
for instance in query.all():
resources.append(self._construct_resource(request, instance, data))
self._annotate_resources(request, resources, data)
response({'total': total, 'resources': resources})
def update(self, request, response, subject, data):
returning = data.pop(RETURNING, None)
if data:
subject.update_with_mapping(self._construct_model(data))
self._annotate_model(request, subject, data)
subject.session.commit()
response(self._construct_returning(subject, returning))
def _annotate_filter(self, query, filter, value):
pass
def _annotate_model(self, request, model, data):
pass
def _annotate_query(self, request, query, data):
return query
def _annotate_resource(self, request, model, resource, data):
pass
def _annotate_resources(self, request, resources, data):
pass
def _construct_filters(self, query, filters):
model = self.model
mapping = self._get_mapping(model)
operators = self.operators
for filter, value in filters.iteritems():
annotation = self._annotate_filter(query, filter, value)
if annotation:
query = annotation
continue
attr, operator = filter, 'equal'
if '__' in filter:
attr, operator = filter.rsplit('__', 1)
column = getattr(model, mapping[attr])
if not column:
continue
constructor = getattr(operators, operator + '_op')
query = constructor(query, column, value)
return query
def _construct_model(self, data):
mapping = self.mapping
if self.polymorphic_on:
mapping = self.polymorphic_mapping[data[self.polymorphic_on[0]]]
model = {}
for name, attr in mapping.iteritems():
if name in data:
model[attr] = data[name]
if self._composite_key and 'id' in model:
del model['id']
return model
def _construct_resource(self, request, model, data, **resource):
mapping = self._get_mapping(model)
fields = FieldFilter(self, data)
resource['id'] = self._get_id_value(model)
for name, attr in mapping.iteritems():
if name in fields and name != 'id':
try:
resource[name] = getattr(model, attr)
except AttributeError:
pass
self._annotate_resource(request, model, resource, data)
return resource
def _construct_returning(self, model, returning, response=None):
if response is None:
response = {}
if 'id' not in response:
response['id'] = self._get_id_value(model)
if returning:
mapping = self._get_mapping(model)
for name in returning:
if name in mapping and name not in response:
try:
response[name] = getattr(model, mapping[name])
except AttributeError:
pass
return response
def _construct_sorting(self, query, sorting):
model = self.model
mapping = self._get_mapping(model)
columns = []
for attr in sorting:
direction = asc
if attr[-1] == '+':
attr = attr[:-1]
elif attr[-1] == '-':
attr = attr[:-1]
direction = desc
column = getattr(model, mapping[attr])
if not column:
continue
columns.append(direction(column))
return query.order_by(*columns)
def _get_id_value(self, model):
mapping = self._get_mapping(model)
if self._composite_key:
values = []
for key in self._composite_key:
values.append(getattr(model, mapping[key]))
return ';'.join(values)
else:
return getattr(model, mapping['id'])
def _get_mapping(self, model):
if self.polymorphic_on:
identity = getattr(model, self.polymorphic_on[1])
return self.polymorphic_mapping[identity]
else:
return self.mapping
def support_returning(method):
def wrapper(self, request, response, subject, data):
returning = data.pop(RETURNING, None)
model = method(self, request, response, subject, data)
response(self._construct_returning(model, returning))
return wrapper
class ProxyController(Unit, Controller):
proxy_model = None
mapping = None
@classmethod
def __construct__(cls):
Controller.__construct__()
if cls.resource:
mapping = cls.mapping
if mapping is None:
mapping = cls.resource.filter_schema.keys()
cls.mapping = parse_attr_mapping(mapping)
cls.id_field = cls.resource.id_field
def acquire(self, subject):
try:
return self.proxy_model.get(subject)
except GoneError:
return None
def create(self, request, response, subject, data):
proxy_model = self._construct_proxy_model(data)
self._annotate_proxy_model(request, proxy_model, data)
subject = self.proxy_model.create(proxy_model)
id_field = self.id_field
response({id_field: self._get_proxy_model_value(subject, id_field)})
def delete(self, request, response, subject, data):
subject.destroy()
id_field = self.id_field
response({id_field: self._get_proxy_model_value(subject, id_field)})
def get(self, request, response, subject, data):
resource = self._construct_resource(request, subject, data)
self._annotate_resource(request, resource, subject, data)
response(self._prune_resource(resource, data))
def put(self, request, response, subject, data):
if subject:
self.update(request, response, subject, data)
else:
data[self.id_field] = request.subject
self.create(request, response, subject, data)
def query(self, request, response, subject, data):
data = data or {}
if 'query' in data:
data['query'] = self._construct_filter(data['query'])
try:
query_results = self.proxy_model.query(**data).all()
except NotFoundError:
query_results = []
total = 0
status = OK
else:
total = query_results.total
status = query_results.status
resources = []
for result in query_results:
resource = self._construct_resource(request, result, data)
self._annotate_resource(request, resource, result, data)
resources.append(self._prune_resource(resource, data))
response(status=status,
content={'resources': resources, 'total': total})
def update(self, request, response, subject, data):
if data:
proxy_data = self._construct_proxy_model(data)
self._annotate_proxy_model(request, proxy_data, data)
subject.update(proxy_data)
id_field = self.id_field
response({id_field: self._get_proxy_model_value(subject, id_field)})
def _construct_filter(self, filters):
mapping = self.mapping
subject_filters = {}
for filter_operand, value in filters.iteritems():
filter_operands = filter_operand.rsplit('__', 1)
filter_operands[0] = mapping[filter_operands[0]]
subject_filters['__'.join(filter_operands)] = value
return subject_filters
def _construct_resource(self, request, subject, data):
resource = {}
for res_field, proxy_field in self.mapping.iteritems():
try:
resource[res_field] = getattr(subject, proxy_field)
except AttributeError:
continue
return resource
def _construct_proxy_model(self, data):
subject = {}
mapping = self.mapping
for field, value in data.iteritems():
subject_field = mapping[field]
subject[subject_field] = value
return subject
def _get_proxy_model_value(self, subject, field):
model_field = self.mapping[field]
return getattr(subject, model_field)
def _annotate_resource(self, request, resource, proxy_model, data):
pass
def _annotate_proxy_model(self, request, proxy_data, data):
pass
class UnitController(Unit, Controller, Configurable):
| true | true |
f7346f85460093720e9ba594db667860b2ae00a2 | 5,121 | py | Python | databricks_cicd/conf/conf.py | man40/databricks-cicd | f2adade3a4b815cade74b961fa77bdfad42f3ec8 | [
"Apache-2.0"
] | 1 | 2021-12-11T17:11:34.000Z | 2021-12-11T17:11:34.000Z | databricks_cicd/conf/conf.py | man40/databricks-cicd | f2adade3a4b815cade74b961fa77bdfad42f3ec8 | [
"Apache-2.0"
] | null | null | null | databricks_cicd/conf/conf.py | man40/databricks-cicd | f2adade3a4b815cade74b961fa77bdfad42f3ec8 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) databricks-cicd 2021 man40 (man40dev@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from os import path as op
from configparser import ConfigParser
from textwrap import indent
_log = logging.getLogger(__name__)
class ConfBase:
def __repr__(self):
return '\n'.join(f'{k}: {self._indent(v)}' for k, v in self.__dict__.items() if not k.startswith('_'))
@staticmethod
def _parse_int(value) -> int:
return eval(value) if isinstance(value, str) else int(value)
@staticmethod
def _parse_list(value) -> list:
return [] if value is None else [v for v in value.split('\n') if v]
@staticmethod
def _indent(obj):
if isinstance(obj, ConfBase):
return f'\n{indent(str(obj), " ")}'
return obj
class Conf(ConfBase):
def __init__(self, cmd_args: dict, config_file: str):
default_config_file = op.join(op.dirname(__file__), 'default.ini')
parser = ConfigParser()
parser.read(default_config_file)
override_config_file = config_file
if override_config_file:
assert op.isfile(override_config_file), f'Config file was not found in: {override_config_file}'
parser.read(override_config_file)
parser.read_dict(cmd_args)
self._section = 'global'
self.workspace_host = parser[self._section].get('workspace_host')
self.deploying_user_name = parser[self._section].get('deploying_user_name')
self.deploying_user_id = None
self.local_path = parser[self._section].get('local_path')
self.dry_run = parser[self._section].getboolean('dry_run')
self.name_prefix = parser[self._section].get('name_prefix')
self.deploy_safety_limit = self._parse_int(parser[self._section].get('deploy_safety_limit'))
self.rate_limit_timeout = self._parse_int(parser[self._section].get('rate_limit_timeout'))
self.rate_limit_attempts = self._parse_int(parser[self._section].get('rate_limit_attempts'))
self.workspace = ConfWorkspace(parser)
self.instance_pools = ConfInstancePools(parser)
self.clusters = ConfClusters(parser)
self.jobs = ConfJobs(parser)
self.dbfs = ConfDBFS(parser)
class ConfWorkspace(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'workspace'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.target_path = parser[self._section].get('target_path')
assert self.target_path != '/', 'Cannot deploy in the workspace root folder!'
class ConfInstancePools(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'instance_pools'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.ignore_attributes = self._parse_list(parser[self._section].get('ignore_attributes'))
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfClusters(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'clusters'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.ignore_attributes = self._parse_list(parser[self._section].get('ignore_attributes'))
self.ignore_attributes_with_instance_pool = self._parse_list(
parser[self._section].get('ignore_attributes_with_instance_pool'))
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfJobs(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'jobs'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfDBFS(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'dbfs'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.compare_contents = parser[self._section].getboolean('compare_contents')
self.target_path = parser[self._section].get('target_path')
self.transfer_block_size = eval(parser[self._section].get('transfer_block_size'))
assert self.target_path != '/', 'Cannot deploy in the dbfs root folder!'
| 43.033613 | 110 | 0.702988 |
import logging
from os import path as op
from configparser import ConfigParser
from textwrap import indent
_log = logging.getLogger(__name__)
class ConfBase:
def __repr__(self):
return '\n'.join(f'{k}: {self._indent(v)}' for k, v in self.__dict__.items() if not k.startswith('_'))
@staticmethod
def _parse_int(value) -> int:
return eval(value) if isinstance(value, str) else int(value)
@staticmethod
def _parse_list(value) -> list:
return [] if value is None else [v for v in value.split('\n') if v]
@staticmethod
def _indent(obj):
if isinstance(obj, ConfBase):
return f'\n{indent(str(obj), " ")}'
return obj
class Conf(ConfBase):
def __init__(self, cmd_args: dict, config_file: str):
default_config_file = op.join(op.dirname(__file__), 'default.ini')
parser = ConfigParser()
parser.read(default_config_file)
override_config_file = config_file
if override_config_file:
assert op.isfile(override_config_file), f'Config file was not found in: {override_config_file}'
parser.read(override_config_file)
parser.read_dict(cmd_args)
self._section = 'global'
self.workspace_host = parser[self._section].get('workspace_host')
self.deploying_user_name = parser[self._section].get('deploying_user_name')
self.deploying_user_id = None
self.local_path = parser[self._section].get('local_path')
self.dry_run = parser[self._section].getboolean('dry_run')
self.name_prefix = parser[self._section].get('name_prefix')
self.deploy_safety_limit = self._parse_int(parser[self._section].get('deploy_safety_limit'))
self.rate_limit_timeout = self._parse_int(parser[self._section].get('rate_limit_timeout'))
self.rate_limit_attempts = self._parse_int(parser[self._section].get('rate_limit_attempts'))
self.workspace = ConfWorkspace(parser)
self.instance_pools = ConfInstancePools(parser)
self.clusters = ConfClusters(parser)
self.jobs = ConfJobs(parser)
self.dbfs = ConfDBFS(parser)
class ConfWorkspace(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'workspace'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.target_path = parser[self._section].get('target_path')
assert self.target_path != '/', 'Cannot deploy in the workspace root folder!'
class ConfInstancePools(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'instance_pools'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.ignore_attributes = self._parse_list(parser[self._section].get('ignore_attributes'))
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfClusters(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'clusters'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.ignore_attributes = self._parse_list(parser[self._section].get('ignore_attributes'))
self.ignore_attributes_with_instance_pool = self._parse_list(
parser[self._section].get('ignore_attributes_with_instance_pool'))
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfJobs(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'jobs'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.strip_attributes = self._parse_list(parser[self._section].get('strip_attributes'))
class ConfDBFS(ConfBase):
def __init__(self, parser: ConfigParser):
self._section = 'dbfs'
self.deploy = parser[self._section].getboolean('deploy')
self.local_sub_dir = parser[self._section].get('local_sub_dir')
self.compare_contents = parser[self._section].getboolean('compare_contents')
self.target_path = parser[self._section].get('target_path')
self.transfer_block_size = eval(parser[self._section].get('transfer_block_size'))
assert self.target_path != '/', 'Cannot deploy in the dbfs root folder!'
| true | true |
f7346ffb2493e91ef38c97544db2cda7417b56a5 | 538 | py | Python | django_runner/management/commands/runner.py | eagletmt/sentry-runner | a764d439356e601bf224235d48d88316fd2bc8e4 | [
"MIT"
] | null | null | null | django_runner/management/commands/runner.py | eagletmt/sentry-runner | a764d439356e601bf224235d48d88316fd2bc8e4 | [
"MIT"
] | null | null | null | django_runner/management/commands/runner.py | eagletmt/sentry-runner | a764d439356e601bf224235d48d88316fd2bc8e4 | [
"MIT"
] | null | null | null | from __future__ import print_function
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Run arbitrary script within Django environment'
option_list = BaseCommand.option_list + (
make_option('--file', dest='file', help='Path to Python script'),
)
def handle(self, **options):
file = options['file']
if file is None:
raise CommandError('--file option is required')
execfile(options['file'])
| 29.888889 | 73 | 0.685874 | from __future__ import print_function
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Run arbitrary script within Django environment'
option_list = BaseCommand.option_list + (
make_option('--file', dest='file', help='Path to Python script'),
)
def handle(self, **options):
file = options['file']
if file is None:
raise CommandError('--file option is required')
execfile(options['file'])
| true | true |
f73470c7ecfe1e2c2e64e9938f9d472191f701fd | 2,465 | py | Python | modules/utils/constants.py | eugenet12/covidresearchviewer | 1e86ba385efe35b1e4d406ef516158b34726bea5 | [
"MIT"
] | null | null | null | modules/utils/constants.py | eugenet12/covidresearchviewer | 1e86ba385efe35b1e4d406ef516158b34726bea5 | [
"MIT"
] | 1 | 2022-02-27T09:48:05.000Z | 2022-02-27T09:48:05.000Z | modules/utils/constants.py | eugenet12/covidresearchviewer | 1e86ba385efe35b1e4d406ef516158b34726bea5 | [
"MIT"
] | null | null | null | import os
DATA_DIR = os.environ.get(
"COVID_WEBAPP_DATA_DIR", "/home/ubuntu/efs-mnt/latest_new/"
)
# elasticsearch index
ES_COL_TO_TYPE = {
"cord_uid": {
"type": "keyword"
},
"title":{
"type": "text",
},
"abstract": {
"type": "text",
},
"text": {
"type": "text"
},
"authors": {
"type": "keyword"
},
"journal": {
"type": "keyword"
},
"url": {
"type": "text"
},
"publish_date": {
"type": "date"
},
"publish_date_for_web": {
"type": "keyword"
},
"embedding": {
"type": "float"
},
"language": {
"type": "keyword"
},
"topic_diagnosis": {
"type": "boolean"
},
"topic_epidemiology": {
"type": "boolean"
},
"topic_prevention": {
"type": "boolean"
},
"topic_transmission": {
"type": "boolean"
},
"topic_treatment": {
"type": "boolean"
},
"topic_vaccine": {
"type": "boolean"
},
"topics": {
"type": "keyword"
},
"scibert_summary_short": {
"type": "text"
},
"scibert_summary_short_cleaned": {
"type": "text"
},
"top_keywords": {
"type": "keyword"
},
"is_peer_reviewed": {
"type": "boolean"
},
"summary_length": {
"type": "long"
},
"is_clinical_paper": {
"type": "boolean"
},
}
ES_INDEX = "cord19-docs"
ES_URL = "https://search-es-covid-research-n6etnstkyvx6k2oxrlavq66sia.us-east-2.es.amazonaws.com/"
TREATMENT_ES_COL_TO_TYPE = {
"clinical_trial_id": {
"type": "keyword"
},
"date_last_updated": {
"type": "date"
},
"developer": {
"type": "keyword"
},
"fda_approval": {
"type": "text"
},
"funder": {
"type": "keyword"
},
"has_emerg_use_auth": {
"type": "keyword"
},
"name": {
"type": "keyword"
},
"next_steps": {
"type": "text"
},
"phase": {
"type": "keyword",
},
"product_category": {
"type": "keyword",
},
"product_description": {
"type": "text"
},
"published_results": {
"type": "text"
},
"stage": {
"type": "keyword"
},
"aliases": {
"type": "keyword"
},
"num_paper_mentions": {
"type": "long"
},
}
TREATMENT_ES_INDEX = "cord19-treatments" | 18.674242 | 98 | 0.457606 | import os
DATA_DIR = os.environ.get(
"COVID_WEBAPP_DATA_DIR", "/home/ubuntu/efs-mnt/latest_new/"
)
ES_COL_TO_TYPE = {
"cord_uid": {
"type": "keyword"
},
"title":{
"type": "text",
},
"abstract": {
"type": "text",
},
"text": {
"type": "text"
},
"authors": {
"type": "keyword"
},
"journal": {
"type": "keyword"
},
"url": {
"type": "text"
},
"publish_date": {
"type": "date"
},
"publish_date_for_web": {
"type": "keyword"
},
"embedding": {
"type": "float"
},
"language": {
"type": "keyword"
},
"topic_diagnosis": {
"type": "boolean"
},
"topic_epidemiology": {
"type": "boolean"
},
"topic_prevention": {
"type": "boolean"
},
"topic_transmission": {
"type": "boolean"
},
"topic_treatment": {
"type": "boolean"
},
"topic_vaccine": {
"type": "boolean"
},
"topics": {
"type": "keyword"
},
"scibert_summary_short": {
"type": "text"
},
"scibert_summary_short_cleaned": {
"type": "text"
},
"top_keywords": {
"type": "keyword"
},
"is_peer_reviewed": {
"type": "boolean"
},
"summary_length": {
"type": "long"
},
"is_clinical_paper": {
"type": "boolean"
},
}
ES_INDEX = "cord19-docs"
ES_URL = "https://search-es-covid-research-n6etnstkyvx6k2oxrlavq66sia.us-east-2.es.amazonaws.com/"
TREATMENT_ES_COL_TO_TYPE = {
"clinical_trial_id": {
"type": "keyword"
},
"date_last_updated": {
"type": "date"
},
"developer": {
"type": "keyword"
},
"fda_approval": {
"type": "text"
},
"funder": {
"type": "keyword"
},
"has_emerg_use_auth": {
"type": "keyword"
},
"name": {
"type": "keyword"
},
"next_steps": {
"type": "text"
},
"phase": {
"type": "keyword",
},
"product_category": {
"type": "keyword",
},
"product_description": {
"type": "text"
},
"published_results": {
"type": "text"
},
"stage": {
"type": "keyword"
},
"aliases": {
"type": "keyword"
},
"num_paper_mentions": {
"type": "long"
},
}
TREATMENT_ES_INDEX = "cord19-treatments" | true | true |
f73471baedfbfff986fac762f9e29473bd174fd7 | 1,381 | py | Python | DeckOfCards.py | crippe-90/DeckOfCards | fb4ec62f6f49ce70fdfa9fda705b42c4884028d4 | [
"MIT"
] | null | null | null | DeckOfCards.py | crippe-90/DeckOfCards | fb4ec62f6f49ce70fdfa9fda705b42c4884028d4 | [
"MIT"
] | null | null | null | DeckOfCards.py | crippe-90/DeckOfCards | fb4ec62f6f49ce70fdfa9fda705b42c4884028d4 | [
"MIT"
] | null | null | null | #author: Christoffer Norell
#contact: christoffernorell@yahoo.se
#This is a simple simulator of a deck of cards I made for fun.
#The values in the dictionaries are there for better comparison during games.
import random
#Using dictionaries to represent values.
#The color-values was taken from bridge-order:
#http://pokerterms.com/bridge-order.html
colors = [{'Hearts': 0 },{'Diamonds': 1},{'Clubs': 2},{'Spades':3}]
values = [{'Two':2},{'Three': 3},{'Four':4},{'Five':5},{'Six': 6},{'Seven': 7}, {'Eight': 8}, {'Nine': 9} , {'Ten': 10},{'Jack': 11} , {'Queen':12}, {'King':13}
, {'Ace':14}]
class Card():
def __init__(self,value,color):
self.color = color
self.value = value
def show(self):
return (self.color, self.value)
class Deck():
def __init__(self):
self.deck = []
for x in range(len(colors)):
for y in range(len(values)):
self.deck.append(Card(colors[x],values[y]))
def shuffle(self):
random.shuffle(self.deck)
def hand_card(self):
card = self.deck.pop()
return card
def hand_cards(self, amount):
tmp = []
if amount <= len(self.deck):
for x in range(amount):
tmp.append(self.hand_card())
return tmp
else:
print("out of cards")
return None
| 27.078431 | 160 | 0.572049 |
import random
colors = [{'Hearts': 0 },{'Diamonds': 1},{'Clubs': 2},{'Spades':3}]
values = [{'Two':2},{'Three': 3},{'Four':4},{'Five':5},{'Six': 6},{'Seven': 7}, {'Eight': 8}, {'Nine': 9} , {'Ten': 10},{'Jack': 11} , {'Queen':12}, {'King':13}
, {'Ace':14}]
class Card():
def __init__(self,value,color):
self.color = color
self.value = value
def show(self):
return (self.color, self.value)
class Deck():
def __init__(self):
self.deck = []
for x in range(len(colors)):
for y in range(len(values)):
self.deck.append(Card(colors[x],values[y]))
def shuffle(self):
random.shuffle(self.deck)
def hand_card(self):
card = self.deck.pop()
return card
def hand_cards(self, amount):
tmp = []
if amount <= len(self.deck):
for x in range(amount):
tmp.append(self.hand_card())
return tmp
else:
print("out of cards")
return None
| true | true |
f73471efadcfb128c68e6e8891fc124ff271a658 | 41,230 | py | Python | python/ccxt/idex.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 4 | 2021-01-10T09:14:17.000Z | 2022-02-15T19:09:52.000Z | python/ccxt/idex.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 1 | 2020-05-08T09:19:46.000Z | 2020-09-12T14:55:58.000Z | python/ccxt/idex.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 4 | 2021-06-02T16:40:35.000Z | 2022-03-14T04:50:31.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
class idex(Exchange):
def describe(self):
return self.deep_extend(super(idex, self).describe(), {
'id': 'idex',
'name': 'IDEX',
'countries': ['US'],
'rateLimit': 1500,
'certified': True,
'requiresWeb3': True,
'has': {
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchMarkets': True,
'fetchBalance': True,
'createOrder': True,
'cancelOrder': True,
'fetchOpenOrders': True,
'fetchTransactions': True,
'fetchTrades': True,
'fetchMyTrades': True,
'withdraw': True,
'fetchOHLCV': False,
},
'timeframes': {
'1m': 'M1',
'3m': 'M3',
'5m': 'M5',
'15m': 'M15',
'30m': 'M30', # default
'1h': 'H1',
'4h': 'H4',
'1d': 'D1',
'1w': 'D7',
'1M': '1M',
},
'urls': {
'test': 'https://api.idex.market',
'logo': 'https://user-images.githubusercontent.com/1294454/63693236-3415e380-c81c-11e9-8600-ba1634f1407d.jpg',
'api': 'https://api.idex.market',
'www': 'https://idex.market',
'doc': [
'https://docs.idex.market/',
],
},
'api': {
'public': {
'post': [
'returnTicker',
'returnCurrenciesWithPairs', # undocumented
'returnCurrencies',
'return24Volume',
'returnBalances',
'returnCompleteBalances', # shows amount in orders as well as total
'returnDepositsWithdrawals',
'returnOpenOrders',
'returnOrderBook',
'returnOrderStatus',
'returnOrderTrades',
'returnTradeHistory',
'returnTradeHistoryMeta', # not documented
'returnContractAddress',
'returnNextNonce',
],
},
'private': {
'post': [
'order',
'cancel',
'trade',
'withdraw',
],
},
},
'options': {
'contractAddress': None, # 0x2a0c0DBEcC7E4D658f48E01e3fA353F44050c208
'orderNonce': None,
},
'exceptions': {
'Invalid order signature. Please try again.': AuthenticationError,
'You have insufficient funds to match self order. If you believe self is a mistake please refresh and try again.': InsufficientFunds,
'Order no longer available.': InvalidOrder,
},
'requiredCredentials': {
'walletAddress': True,
'privateKey': True,
'apiKey': False,
'secret': False,
},
'commonCurrencies': {
'FT': 'Fabric Token',
'MT': 'Monarch',
'ONE': 'Menlo One',
'PLA': 'PlayChip',
'WAX': 'WAXP',
},
})
def fetch_markets(self, params={}):
# idex does not have an endpoint for markets
# instead we generate the markets from the endpoint for currencies
request = {
'includeDelisted': True,
}
markets = self.publicPostReturnCurrenciesWithPairs(self.extend(request, params))
currenciesById = {}
currencies = markets['tokens']
for i in range(0, len(currencies)):
currency = currencies[i]
currenciesById[currency['symbol']] = currency
result = []
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
quotes = markets['pairs']
keys = list(quotes.keys())
for i in range(0, len(keys)):
quoteId = keys[i]
bases = quotes[quoteId]
quote = self.safe_currency_code(quoteId)
quoteCurrency = currenciesById[quoteId]
for j in range(0, len(bases)):
baseId = bases[j]
id = quoteId + '_' + baseId
base = self.safe_currency_code(baseId)
symbol = base + '/' + quote
baseCurrency = currenciesById[baseId]
baseAddress = baseCurrency['address']
quoteAddress = quoteCurrency['address']
precision = {
'price': self.safe_integer(quoteCurrency, 'decimals'),
'amount': self.safe_integer(baseCurrency, 'decimals'),
}
result.append({
'symbol': symbol,
'precision': precision,
'base': base,
'quote': quote,
'baseId': baseAddress,
'quoteId': quoteAddress,
'limits': limits,
'id': id,
'info': baseCurrency,
'tierBased': False,
})
return result
def parse_ticker(self, ticker, market=None):
#
# {
# last: '0.0016550916',
# high: 'N/A',
# low: 'N/A',
# lowestAsk: '0.0016743368',
# highestBid: '0.001163726270773897',
# percentChange: '0',
# baseVolume: '0',
# quoteVolume: '0'
# }
#
symbol = None
if market:
symbol = market['symbol']
baseVolume = self.safe_float(ticker, 'baseVolume')
quoteVolume = self.safe_float(ticker, 'quoteVolume')
last = self.safe_float(ticker, 'last')
percentage = self.safe_float(ticker, 'percentChange')
return {
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': quoteVolume,
'quoteVolume': baseVolume,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicPostReturnTicker(params)
# {ETH_BOUNCY:
# {last: '0.000000004000088005',
# high: 'N/A',
# low: 'N/A',
# lowestAsk: '0.00000000599885995',
# highestBid: '0.000000001400500103',
# percentChange: '0',
# baseVolume: '0',
# quoteVolume: '0'},
# ETH_NBAI:
# {last: '0.0000032',
# high: 'N/A',
# low: 'N/A',
# lowestAsk: '0.000004000199999502',
# highestBid: '0.0000016002',
# percentChange: '0',
# baseVolume: '0',
# quoteVolume: '0'},}
ids = list(response.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
symbol = None
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
quoteId, baseId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
market = {'symbol': symbol}
ticker = response[id]
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicPostReturnTicker(self.extend(request, params))
# {last: '0.0016550916',
# high: 'N/A',
# low: 'N/A',
# lowestAsk: '0.0016743368',
# highestBid: '0.001163726270773897',
# percentChange: '0',
# baseVolume: '0',
# quoteVolume: '0'}
return self.parse_ticker(response, market)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
id = market['quote'] + '_' + market['base']
request = {
'market': id,
'count': 100, # the default will only return one trade
}
if limit is not None:
request['count'] = limit
response = self.publicPostReturnOrderBook(self.extend(request, params))
#
# {
# "asks": [
# {
# "price": "0.001675282799999",
# "amount": "206.163978911921061732",
# "total": "0.345382967850497906",
# "orderHash": "0xfdf12c124a6a7fa4a8e1866b324da888c8e1b3ad209f5050d3a23df3397a5cb7",
# "params": {
# "tokenBuy": "0x0000000000000000000000000000000000000000",
# "buySymbol": "ETH",
# "buyPrecision": 18,
# "amountBuy": "345382967850497906",
# "tokenSell": "0xb98d4c97425d9908e66e53a6fdf673acca0be986",
# "sellSymbol": "ABT",
# "sellPrecision": 18,
# "amountSell": "206163978911921061732",
# "expires": 10000,
# "nonce": 13489307413,
# "user": "0x9e8ef79316a4a79bbf55a5f9c16b3e068fff65c6"
# }
# }
# ],
# "bids": [
# {
# "price": "0.001161865193232242",
# "amount": "854.393661648355",
# "total": "0.992690256787469029",
# "orderHash": "0x2f2baaf982085e4096f9e23e376214885fa74b2939497968e92222716fc2c86d",
# "params": {
# "tokenBuy": "0xb98d4c97425d9908e66e53a6fdf673acca0be986",
# "buySymbol": "ABT",
# "buyPrecision": 18,
# "amountBuy": "854393661648355000000",
# "tokenSell": "0x0000000000000000000000000000000000000000",
# "sellSymbol": "ETH",
# "sellPrecision": 18,
# "amountSell": "992690256787469029",
# "expires": 10000,
# "nonce": 18155189676,
# "user": "0xb631284dd7b74a846af5b37766ceb1f85d53eca4"
# }
# }
# ]
# }
#
return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'amount')
def parse_bid_ask(self, bidAsk, priceKey=0, amountKey=1):
price = self.safe_float(bidAsk, priceKey)
amount = self.safe_float(bidAsk, amountKey)
info = bidAsk
return [price, amount, info]
def fetch_balance(self, params={}):
request = {
'address': self.walletAddress,
}
response = self.publicPostReturnCompleteBalances(self.extend(request, params))
#
# {
# ETH: {available: '0.0167', onOrders: '0.1533'}
# }
#
result = {
'info': response,
}
keys = list(response.keys())
for i in range(0, len(keys)):
currency = keys[i]
balance = response[currency]
code = self.safe_currency_code(currency)
result[code] = {
'free': self.safe_float(balance, 'available'),
'used': self.safe_float(balance, 'onOrders'),
}
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.check_required_dependencies()
self.load_markets()
market = self.market(symbol)
if type == 'limit':
expires = 100000
contractAddress = self.get_contract_address()
tokenBuy = None
tokenSell = None
amountBuy = None
amountSell = None
quoteAmount = float(price) * float(amount)
if side == 'buy':
tokenBuy = market['baseId']
tokenSell = market['quoteId']
amountBuy = self.to_wei(amount, market['precision']['amount'])
amountSell = self.to_wei(quoteAmount, 18)
else:
tokenBuy = market['quoteId']
tokenSell = market['baseId']
amountBuy = self.to_wei(quoteAmount, 18)
amountSell = self.to_wei(amount, market['precision']['amount'])
nonce = self.get_nonce()
orderToHash = {
'contractAddress': contractAddress,
'tokenBuy': tokenBuy,
'amountBuy': amountBuy,
'tokenSell': tokenSell,
'amountSell': amountSell,
'expires': expires,
'nonce': nonce,
'address': self.walletAddress,
}
orderHash = self.get_idex_create_order_hash(orderToHash)
signature = self.sign_message(orderHash, self.privateKey)
request = {
'tokenBuy': tokenBuy,
'amountBuy': amountBuy,
'tokenSell': tokenSell,
'amountSell': amountSell,
'address': self.walletAddress,
'nonce': nonce,
'expires': expires,
}
response = self.privatePostOrder(self.extend(request, signature)) # self.extend(request, params) will cause invalid signature
# {orderNumber: 1562323021,
# orderHash:
# '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4',
# timestamp: 1564041428,
# price: '0.00073',
# amount: '210',
# total: '0.1533',
# type: 'buy',
# params:
# {tokenBuy: '0x763fa6806e1acf68130d2d0f0df754c93cc546b2',
# buyPrecision: 18,
# amountBuy: '210000000000000000000',
# tokenSell: '0x0000000000000000000000000000000000000000',
# sellPrecision: 18,
# amountSell: '153300000000000000',
# expires: 100000,
# nonce: 1,
# user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'}}
return self.parse_order(response, market)
elif type == 'market':
if not ('orderHash' in params):
raise ArgumentsRequired(self.id + ' market order requires an order structure such as that in fetchOrderBook()[\'bids\'][0][2], fetchOrder()[\'info\'], or fetchOpenOrders()[0][\'info\']')
# {price: '0.000132247803328924',
# amount: '19980',
# total: '2.6423111105119',
# orderHash:
# '0x5fb3452b3d13fc013585b51c91c43a0fbe4298c211243763c49437848c274749',
# params:
# {tokenBuy: '0x0000000000000000000000000000000000000000',
# buySymbol: 'ETH',
# buyPrecision: 18,
# amountBuy: '2642311110511900000',
# tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae',
# sellSymbol: 'IDEX',
# sellPrecision: 18,
# amountSell: '19980000000000000000000',
# expires: 10000,
# nonce: 1564656561510,
# user: '0xc3f8304270e49b8e8197bfcfd8567b83d9e4479b'}}
orderToSign = {
'orderHash': params['orderHash'],
'amount': params['params']['amountBuy'],
'address': params['params']['user'],
'nonce': params['params']['nonce'],
}
orderHash = self.get_idex_market_order_hash(orderToSign)
signature = self.sign_message(orderHash, self.privateKey)
signedOrder = self.extend(orderToSign, signature)
signedOrder['address'] = self.walletAddress
signedOrder['nonce'] = self.get_nonce()
# [{
# "amount": "0.07",
# "date": "2017-10-13 16:25:36",
# "total": "0.49",
# "market": "ETH_DVIP",
# "type": "buy",
# "price": "7",
# "orderHash": "0xcfe4018c59e50e0e1964c979e6213ce5eb8c751cbc98a44251eb48a0985adc52",
# "uuid": "250d51a0-b033-11e7-9984-a9ab79bb8f35"
# }]
response = self.privatePostTrade(signedOrder)
return self.parse_orders(response, market)
def get_nonce(self):
if self.options['orderNonce'] is None:
response = self.publicPostReturnNextNonce({
'address': self.walletAddress,
})
return self.safe_integer(response, 'nonce')
else:
result = self.options['orderNonce']
self.options['orderNonce'] = self.sum(self.options['orderNonce'], 1)
return result
def get_contract_address(self):
if self.options['contractAddress'] is not None:
return self.options['contractAddress']
response = self.publicPostReturnContractAddress()
self.options['contractAddress'] = self.safe_string(response, 'address')
return self.options['contractAddress']
def cancel_order(self, orderId, symbol=None, params={}):
nonce = self.get_nonce()
orderToHash = {
'orderHash': orderId,
'nonce': nonce,
}
orderHash = self.get_idex_cancel_order_hash(orderToHash)
signature = self.sign_message(orderHash, self.privateKey)
request = {
'orderHash': orderId,
'address': self.walletAddress,
'nonce': nonce,
}
response = self.privatePostCancel(self.extend(request, signature))
# {success: 1}
if 'success' in response:
return {
'info': response,
}
else:
raise ExchangeError(self.id + ' cancel order failed ' + self.json(response))
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'address': self.walletAddress,
}
if since is not None:
request['start'] = int(int(math.floor(since / 1000)))
response = self.publicPostReturnDepositsWithdrawals(self.extend(request, params))
# {deposits:
# [{currency: 'ETH',
# amount: '0.05',
# timestamp: 1563953513,
# transactionHash:
# '0xd6eefd81c7efc9beeb35b924d6db3c93a78bf7eac082ba87e107ad4e94bccdcf',
# depositNumber: 1586430},
# {currency: 'ETH',
# amount: '0.12',
# timestamp: 1564040359,
# transactionHash:
# '0x2ecbb3ab72b6f79fc7a9058c39dce28f913152748c1507d13ab1759e965da3ca',
# depositNumber: 1587341}],
# withdrawals:
# [{currency: 'ETH',
# amount: '0.149',
# timestamp: 1564060001,
# transactionHash:
# '0xab555fc301779dd92fd41ccd143b1d72776ae7b5acfc59ca44a1d376f68fda15',
# withdrawalNumber: 1444070,
# status: 'COMPLETE'}]}
deposits = self.parse_transactions(response['deposits'], currency, since, limit)
withdrawals = self.parse_transactions(response['withdrawals'], currency, since, limit)
return self.array_concat(deposits, withdrawals)
def parse_transaction(self, item, currency=None):
# {currency: 'ETH',
# amount: '0.05',
# timestamp: 1563953513,
# transactionHash:
# '0xd6eefd81c7efc9beeb35b924d6db3c93a78bf7eac082ba87e107ad4e94bccdcf',
# depositNumber: 1586430}
amount = self.safe_float(item, 'amount')
timestamp = self.safe_timestamp(item, 'timestamp')
txhash = self.safe_string(item, 'transactionHash')
id = None
type = None
status = None
addressFrom = None
addressTo = None
if 'depositNumber' in item:
id = self.safe_string(item, 'depositNumber')
type = 'deposit'
addressFrom = self.walletAddress
addressTo = self.options['contractAddress']
elif 'withdrawalNumber' in item:
id = self.safe_string(item, 'withdrawalNumber')
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(item, 'status'))
addressFrom = self.options['contractAddress']
addressTo = self.walletAddress
code = self.safe_currency_code(self.safe_string(item, 'currency'))
return {
'info': item,
'id': id,
'txid': txhash,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': code,
'amount': amount,
'status': status,
'type': type,
'updated': None,
'comment': None,
'addressFrom': addressFrom,
'tagFrom': None,
'addressTo': addressTo,
'tagTo': None,
'fee': {
'currency': code,
'cost': None,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
'COMPLETE': 'ok',
}
return self.safe_string(statuses, status)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if self.walletAddress is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a walletAddress')
self.load_markets()
request = {
'address': self.walletAddress,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = self.publicPostReturnOpenOrders(self.extend(request, params))
# [{timestamp: 1564041428,
# orderHash:
# '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4',
# orderNumber: 1562323021,
# market: 'ETH_LIT',
# type: 'buy',
# params:
# {tokenBuy: '0x763fa6806e1acf68130d2d0f0df754c93cc546b2',
# buySymbol: 'LIT',
# buyPrecision: 18,
# amountBuy: '210000000000000000000',
# tokenSell: '0x0000000000000000000000000000000000000000',
# sellSymbol: 'ETH',
# sellPrecision: 18,
# amountSell: '153300000000000000',
# expires: 100000,
# nonce: 1,
# user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'},
# price: '0.00073',
# amount: '210',
# status: 'open',
# total: '0.1533'}]
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'orderHash': id,
}
response = self.publicPostReturnOrderStatus(self.extend(request, params))
# {filled: '0',
# initialAmount: '210',
# timestamp: 1564041428,
# orderHash:
# '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4',
# orderNumber: 1562323021,
# market: 'ETH_LIT',
# type: 'buy',
# params:
# {tokenBuy: '0x763fa6806e1acf68130d2d0f0df754c93cc546b2',
# buySymbol: 'LIT',
# buyPrecision: 18,
# amountBuy: '210000000000000000000',
# tokenSell: '0x0000000000000000000000000000000000000000',
# sellSymbol: 'ETH',
# sellPrecision: 18,
# amountSell: '153300000000000000',
# expires: 100000,
# nonce: 1,
# user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'},
# price: '0.00073',
# amount: '210',
# status: 'open',
# total: '0.1533'}
return self.parse_order(response, market)
def parse_order(self, order, market=None):
# {filled: '0',
# initialAmount: '210',
# timestamp: 1564041428,
# orderHash:
# '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4',
# orderNumber: 1562323021,
# market: 'ETH_LIT',
# type: 'buy',
# params:
# {tokenBuy: '0x763fa6806e1acf68130d2d0f0df754c93cc546b2',
# buySymbol: 'LIT',
# buyPrecision: 18,
# amountBuy: '210000000000000000000',
# tokenSell: '0x0000000000000000000000000000000000000000',
# sellSymbol: 'ETH',
# sellPrecision: 18,
# amountSell: '153300000000000000',
# expires: 100000,
# nonce: 1,
# user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'},
# price: '0.00073',
# amount: '210',
# status: 'open',
# total: '0.1533'}
timestamp = self.safe_timestamp(order, 'timestamp')
side = self.safe_string(order, 'type')
symbol = None
amount = None
remaining = None
if 'initialAmount' in order:
amount = self.safe_float(order, 'initialAmount')
remaining = self.safe_float(order, 'amount')
else:
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'filled')
price = self.safe_float(order, 'price')
cost = self.safe_float(order, 'total')
if (cost is not None) and (filled is not None) and not cost:
cost = filled * price
if 'market' in order:
marketId = order['market']
symbol = self.markets_by_id[marketId]['symbol']
elif (side is not None) and ('params' in order):
params = order['params']
buy = self.safe_currency_code(self.safe_string(params, 'tokenBuy'))
sell = self.safe_currency_code(self.safe_string(params, 'tokenSell'))
if buy is not None and sell is not None:
symbol = (buy + '/' + sell) if (side == 'buy') else (sell + '/' + buy)
if symbol is None and market is not None:
symbol = market['symbol']
id = self.safe_string(order, 'orderHash')
status = self.parse_order_status(self.safe_string(order, 'status'))
return {
'info': order,
'id': id,
'clientOrderId': None,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'side': side,
'amount': amount,
'price': price,
'type': 'limit',
'filled': filled,
'remaining': remaining,
'cost': cost,
'status': status,
'lastTradeTimestamp': None,
'average': None,
'trades': None,
'fee': None,
}
def parse_order_status(self, status):
statuses = {
'open': 'open',
}
return self.safe_string(statuses, status, status)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if self.walletAddress is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a walletAddress')
self.load_markets()
request = {
'address': self.walletAddress,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
if limit is not None:
request['start'] = int(int(math.floor(limit)))
response = self.publicPostReturnTradeHistory(self.extend(request, params))
# {ETH_IDEX:
# [{type: 'buy',
# date: '2019-07-25 11:24:41',
# amount: '347.833140025692348611',
# total: '0.050998794333719943',
# uuid: 'cbdff960-aece-11e9-b566-c5d69c3be671',
# tid: 4320867,
# timestamp: 1564053881,
# price: '0.000146618560640751',
# taker: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1',
# maker: '0x1a961bc2e0d619d101f5f92a6be752132d7606e6',
# orderHash:
# '0xbec6485613a15be619c04c1425e8e821ebae42b88fa95ac4dfe8ba2beb363ee4',
# transactionHash:
# '0xf094e07b329ac8046e8f34db358415863c41daa36765c05516f4cf4f5b403ad1',
# tokenBuy: '0x0000000000000000000000000000000000000000',
# buyerFee: '0.695666280051384697',
# gasFee: '28.986780264563232993',
# sellerFee: '0.00005099879433372',
# tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae',
# usdValue: '11.336926687304238214'}]}
#
# if a symbol is specified in the request:
#
# [{type: 'buy',
# date: '2019-07-25 11:24:41',
# amount: '347.833140025692348611',
# total: '0.050998794333719943',
# uuid: 'cbdff960-aece-11e9-b566-c5d69c3be671',
# tid: 4320867,
# timestamp: 1564053881,
# price: '0.000146618560640751',
# taker: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1',
# maker: '0x1a961bc2e0d619d101f5f92a6be752132d7606e6',
# orderHash:
# '0xbec6485613a15be619c04c1425e8e821ebae42b88fa95ac4dfe8ba2beb363ee4',
# transactionHash:
# '0xf094e07b329ac8046e8f34db358415863c41daa36765c05516f4cf4f5b403ad1',
# tokenBuy: '0x0000000000000000000000000000000000000000',
# buyerFee: '0.695666280051384697',
# gasFee: '28.986780264563232993',
# sellerFee: '0.00005099879433372',
# tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae',
# usdValue: '11.336926687304238214'}]
if isinstance(response, list):
return self.parse_trades(response, market, since, limit)
else:
result = []
marketIds = list(response.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
trades = response[marketId]
parsed = self.parse_trades(trades, market, since, limit)
result = self.array_concat(result, parsed)
return result
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
if limit is not None:
request['start'] = int(int(math.floor(limit)))
response = self.publicPostReturnTradeHistory(self.extend(request, params))
# [{type: 'buy',
# date: '2019-07-25 11:24:41',
# amount: '347.833140025692348611',
# total: '0.050998794333719943',
# uuid: 'cbdff960-aece-11e9-b566-c5d69c3be671',
# tid: 4320867,
# timestamp: 1564053881,
# price: '0.000146618560640751',
# taker: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1',
# maker: '0x1a961bc2e0d619d101f5f92a6be752132d7606e6',
# orderHash:
# '0xbec6485613a15be619c04c1425e8e821ebae42b88fa95ac4dfe8ba2beb363ee4',
# transactionHash:
# '0xf094e07b329ac8046e8f34db358415863c41daa36765c05516f4cf4f5b403ad1',
# tokenBuy: '0x0000000000000000000000000000000000000000',
# buyerFee: '0.695666280051384697',
# gasFee: '28.986780264563232993',
# sellerFee: '0.00005099879433372',
# tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae',
# usdValue: '11.336926687304238214'}]
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
# {type: 'buy',
# date: '2019-07-25 11:24:41',
# amount: '347.833140025692348611',
# total: '0.050998794333719943',
# uuid: 'cbdff960-aece-11e9-b566-c5d69c3be671',
# tid: 4320867,
# timestamp: 1564053881,
# price: '0.000146618560640751',
# taker: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1',
# maker: '0x1a961bc2e0d619d101f5f92a6be752132d7606e6',
# orderHash:
# '0xbec6485613a15be619c04c1425e8e821ebae42b88fa95ac4dfe8ba2beb363ee4',
# transactionHash:
# '0xf094e07b329ac8046e8f34db358415863c41daa36765c05516f4cf4f5b403ad1',
# tokenBuy: '0x0000000000000000000000000000000000000000',
# buyerFee: '0.695666280051384697',
# gasFee: '28.986780264563232993',
# sellerFee: '0.00005099879433372',
# tokenSell: '0xb705268213d593b8fd88d3fdeff93aff5cbdcfae',
# usdValue: '11.336926687304238214'}
side = self.safe_string(trade, 'type')
feeCurrency = None
symbol = None
maker = self.safe_string(trade, 'maker')
takerOrMaker = None
if maker is not None and self.walletAddress is not None:
if maker.lower() == self.walletAddress.lower():
takerOrMaker = 'maker'
else:
takerOrMaker = 'taker'
buy = self.safe_currency_code(self.safe_string(trade, 'tokenBuy'))
sell = self.safe_currency_code(self.safe_string(trade, 'tokenSell'))
# get ready to be mind-boggled
feeSide = None
if buy is not None and sell is not None:
if side == 'buy':
feeSide = 'buyerFee'
if takerOrMaker == 'maker':
symbol = buy + '/' + sell
feeCurrency = buy
else:
symbol = sell + '/' + buy
feeCurrency = sell
else:
feeSide = 'sellerFee'
if takerOrMaker == 'maker':
symbol = sell + '/' + buy
feeCurrency = buy
else:
symbol = buy + '/' + sell
feeCurrency = sell
if symbol is None and market is not None:
symbol = market['symbol']
timestamp = self.safe_timestamp(trade, 'timestamp')
id = self.safe_string(trade, 'tid')
amount = self.safe_float(trade, 'amount')
price = self.safe_float(trade, 'price')
cost = self.safe_float(trade, 'total')
feeCost = self.safe_float(trade, feeSide)
if feeCost < 0:
gasFee = self.safe_float(trade, 'gasFee')
feeCost = self.sum(gasFee, feeCost)
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
if feeCost is not None and amount is not None:
feeCurrencyAmount = cost if (feeCurrency == 'ETH') else amount
fee['rate'] = feeCost / feeCurrencyAmount
orderId = self.safe_string(trade, 'orderHash')
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'side': side,
'order': orderId,
'symbol': symbol,
'type': 'limit',
}
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_required_dependencies()
self.check_address(address)
self.load_markets()
currency = self.currency(code)
tokenAddress = currency['id']
nonce = self.get_nonce()
amount = self.to_wei(amount, currency['precision'])
requestToHash = {
'contractAddress': self.get_contract_address(),
'token': tokenAddress,
'amount': amount,
'address': address,
'nonce': nonce,
}
hash = self.get_idex_withdraw_hash(requestToHash)
signature = self.sign_message(hash, self.privateKey)
request = {
'address': address,
'amount': amount,
'token': tokenAddress,
'nonce': nonce,
}
response = self.privatePostWithdraw(self.extend(request, signature))
# {amount: '0'}
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
body = self.json(params) # all methods are POST
url = self.urls['api'] + '/' + path
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
if api == 'private':
self.check_required_credentials()
headers['API-Key'] = self.apiKey
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_idex_create_order_hash(self, order):
return self.solidity_sha3([
order['contractAddress'], # address
order['tokenBuy'], # address
order['amountBuy'], # uint256
order['tokenSell'], # address
order['amountSell'], # uint256
order['expires'], # uint256
order['nonce'], # uint256
order['address'], # address
])
def get_idex_cancel_order_hash(self, order):
return self.solidity_sha3([
order['orderHash'], # address
order['nonce'], # uint256
])
def get_idex_market_order_hash(self, order):
return self.solidity_sha3([
order['orderHash'], # address
order['amount'], # uint256
order['address'], # address
order['nonce'], # uint256
])
def get_idex_withdraw_hash(self, request):
return self.solidity_sha3([
request['contractAddress'], # address
request['token'], # uint256
request['amount'], # uint256
request['address'], # address
request['nonce'], # uint256
])
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if 'error' in response:
if response['error'] in self.exceptions:
raise self.exceptions[response['error']](self.id + ' ' + response['error'])
raise ExchangeError(self.id + ' ' + body)
| 40.263672 | 202 | 0.512976 |
ge import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
class idex(Exchange):
def describe(self):
return self.deep_extend(super(idex, self).describe(), {
'id': 'idex',
'name': 'IDEX',
'countries': ['US'],
'rateLimit': 1500,
'certified': True,
'requiresWeb3': True,
'has': {
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchMarkets': True,
'fetchBalance': True,
'createOrder': True,
'cancelOrder': True,
'fetchOpenOrders': True,
'fetchTransactions': True,
'fetchTrades': True,
'fetchMyTrades': True,
'withdraw': True,
'fetchOHLCV': False,
},
'timeframes': {
'1m': 'M1',
'3m': 'M3',
'5m': 'M5',
'15m': 'M15',
'30m': 'M30',
'1h': 'H1',
'4h': 'H4',
'1d': 'D1',
'1w': 'D7',
'1M': '1M',
},
'urls': {
'test': 'https://api.idex.market',
'logo': 'https://user-images.githubusercontent.com/1294454/63693236-3415e380-c81c-11e9-8600-ba1634f1407d.jpg',
'api': 'https://api.idex.market',
'www': 'https://idex.market',
'doc': [
'https://docs.idex.market/',
],
},
'api': {
'public': {
'post': [
'returnTicker',
'returnCurrenciesWithPairs',
'returnCurrencies',
'return24Volume',
'returnBalances',
'returnCompleteBalances',
'returnDepositsWithdrawals',
'returnOpenOrders',
'returnOrderBook',
'returnOrderStatus',
'returnOrderTrades',
'returnTradeHistory',
'returnTradeHistoryMeta',
'returnContractAddress',
'returnNextNonce',
],
},
'private': {
'post': [
'order',
'cancel',
'trade',
'withdraw',
],
},
},
'options': {
'contractAddress': None,
'orderNonce': None,
},
'exceptions': {
'Invalid order signature. Please try again.': AuthenticationError,
'You have insufficient funds to match self order. If you believe self is a mistake please refresh and try again.': InsufficientFunds,
'Order no longer available.': InvalidOrder,
},
'requiredCredentials': {
'walletAddress': True,
'privateKey': True,
'apiKey': False,
'secret': False,
},
'commonCurrencies': {
'FT': 'Fabric Token',
'MT': 'Monarch',
'ONE': 'Menlo One',
'PLA': 'PlayChip',
'WAX': 'WAXP',
},
})
def fetch_markets(self, params={}):
request = {
'includeDelisted': True,
}
markets = self.publicPostReturnCurrenciesWithPairs(self.extend(request, params))
currenciesById = {}
currencies = markets['tokens']
for i in range(0, len(currencies)):
currency = currencies[i]
currenciesById[currency['symbol']] = currency
result = []
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
quotes = markets['pairs']
keys = list(quotes.keys())
for i in range(0, len(keys)):
quoteId = keys[i]
bases = quotes[quoteId]
quote = self.safe_currency_code(quoteId)
quoteCurrency = currenciesById[quoteId]
for j in range(0, len(bases)):
baseId = bases[j]
id = quoteId + '_' + baseId
base = self.safe_currency_code(baseId)
symbol = base + '/' + quote
baseCurrency = currenciesById[baseId]
baseAddress = baseCurrency['address']
quoteAddress = quoteCurrency['address']
precision = {
'price': self.safe_integer(quoteCurrency, 'decimals'),
'amount': self.safe_integer(baseCurrency, 'decimals'),
}
result.append({
'symbol': symbol,
'precision': precision,
'base': base,
'quote': quote,
'baseId': baseAddress,
'quoteId': quoteAddress,
'limits': limits,
'id': id,
'info': baseCurrency,
'tierBased': False,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = None
if market:
symbol = market['symbol']
baseVolume = self.safe_float(ticker, 'baseVolume')
quoteVolume = self.safe_float(ticker, 'quoteVolume')
last = self.safe_float(ticker, 'last')
percentage = self.safe_float(ticker, 'percentChange')
return {
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': quoteVolume,
'quoteVolume': baseVolume,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicPostReturnTicker(params)
ids = list(response.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
symbol = None
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
quoteId, baseId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
market = {'symbol': symbol}
ticker = response[id]
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicPostReturnTicker(self.extend(request, params))
return self.parse_ticker(response, market)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
id = market['quote'] + '_' + market['base']
request = {
'market': id,
'count': 100,
}
if limit is not None:
request['count'] = limit
response = self.publicPostReturnOrderBook(self.extend(request, params))
return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'amount')
def parse_bid_ask(self, bidAsk, priceKey=0, amountKey=1):
price = self.safe_float(bidAsk, priceKey)
amount = self.safe_float(bidAsk, amountKey)
info = bidAsk
return [price, amount, info]
def fetch_balance(self, params={}):
request = {
'address': self.walletAddress,
}
response = self.publicPostReturnCompleteBalances(self.extend(request, params))
result = {
'info': response,
}
keys = list(response.keys())
for i in range(0, len(keys)):
currency = keys[i]
balance = response[currency]
code = self.safe_currency_code(currency)
result[code] = {
'free': self.safe_float(balance, 'available'),
'used': self.safe_float(balance, 'onOrders'),
}
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.check_required_dependencies()
self.load_markets()
market = self.market(symbol)
if type == 'limit':
expires = 100000
contractAddress = self.get_contract_address()
tokenBuy = None
tokenSell = None
amountBuy = None
amountSell = None
quoteAmount = float(price) * float(amount)
if side == 'buy':
tokenBuy = market['baseId']
tokenSell = market['quoteId']
amountBuy = self.to_wei(amount, market['precision']['amount'])
amountSell = self.to_wei(quoteAmount, 18)
else:
tokenBuy = market['quoteId']
tokenSell = market['baseId']
amountBuy = self.to_wei(quoteAmount, 18)
amountSell = self.to_wei(amount, market['precision']['amount'])
nonce = self.get_nonce()
orderToHash = {
'contractAddress': contractAddress,
'tokenBuy': tokenBuy,
'amountBuy': amountBuy,
'tokenSell': tokenSell,
'amountSell': amountSell,
'expires': expires,
'nonce': nonce,
'address': self.walletAddress,
}
orderHash = self.get_idex_create_order_hash(orderToHash)
signature = self.sign_message(orderHash, self.privateKey)
request = {
'tokenBuy': tokenBuy,
'amountBuy': amountBuy,
'tokenSell': tokenSell,
'amountSell': amountSell,
'address': self.walletAddress,
'nonce': nonce,
'expires': expires,
}
response = self.privatePostOrder(self.extend(request, signature))
return self.parse_order(response, market)
elif type == 'market':
if not ('orderHash' in params):
raise ArgumentsRequired(self.id + ' market order requires an order structure such as that in fetchOrderBook()[\'bids\'][0][2], fetchOrder()[\'info\'], or fetchOpenOrders()[0][\'info\']')
orderToSign = {
'orderHash': params['orderHash'],
'amount': params['params']['amountBuy'],
'address': params['params']['user'],
'nonce': params['params']['nonce'],
}
orderHash = self.get_idex_market_order_hash(orderToSign)
signature = self.sign_message(orderHash, self.privateKey)
signedOrder = self.extend(orderToSign, signature)
signedOrder['address'] = self.walletAddress
signedOrder['nonce'] = self.get_nonce()
response = self.privatePostTrade(signedOrder)
return self.parse_orders(response, market)
def get_nonce(self):
if self.options['orderNonce'] is None:
response = self.publicPostReturnNextNonce({
'address': self.walletAddress,
})
return self.safe_integer(response, 'nonce')
else:
result = self.options['orderNonce']
self.options['orderNonce'] = self.sum(self.options['orderNonce'], 1)
return result
def get_contract_address(self):
if self.options['contractAddress'] is not None:
return self.options['contractAddress']
response = self.publicPostReturnContractAddress()
self.options['contractAddress'] = self.safe_string(response, 'address')
return self.options['contractAddress']
def cancel_order(self, orderId, symbol=None, params={}):
nonce = self.get_nonce()
orderToHash = {
'orderHash': orderId,
'nonce': nonce,
}
orderHash = self.get_idex_cancel_order_hash(orderToHash)
signature = self.sign_message(orderHash, self.privateKey)
request = {
'orderHash': orderId,
'address': self.walletAddress,
'nonce': nonce,
}
response = self.privatePostCancel(self.extend(request, signature))
if 'success' in response:
return {
'info': response,
}
else:
raise ExchangeError(self.id + ' cancel order failed ' + self.json(response))
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'address': self.walletAddress,
}
if since is not None:
request['start'] = int(int(math.floor(since / 1000)))
response = self.publicPostReturnDepositsWithdrawals(self.extend(request, params))
deposits = self.parse_transactions(response['deposits'], currency, since, limit)
withdrawals = self.parse_transactions(response['withdrawals'], currency, since, limit)
return self.array_concat(deposits, withdrawals)
def parse_transaction(self, item, currency=None):
amount = self.safe_float(item, 'amount')
timestamp = self.safe_timestamp(item, 'timestamp')
txhash = self.safe_string(item, 'transactionHash')
id = None
type = None
status = None
addressFrom = None
addressTo = None
if 'depositNumber' in item:
id = self.safe_string(item, 'depositNumber')
type = 'deposit'
addressFrom = self.walletAddress
addressTo = self.options['contractAddress']
elif 'withdrawalNumber' in item:
id = self.safe_string(item, 'withdrawalNumber')
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(item, 'status'))
addressFrom = self.options['contractAddress']
addressTo = self.walletAddress
code = self.safe_currency_code(self.safe_string(item, 'currency'))
return {
'info': item,
'id': id,
'txid': txhash,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': code,
'amount': amount,
'status': status,
'type': type,
'updated': None,
'comment': None,
'addressFrom': addressFrom,
'tagFrom': None,
'addressTo': addressTo,
'tagTo': None,
'fee': {
'currency': code,
'cost': None,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
'COMPLETE': 'ok',
}
return self.safe_string(statuses, status)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if self.walletAddress is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a walletAddress')
self.load_markets()
request = {
'address': self.walletAddress,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = self.publicPostReturnOpenOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'orderHash': id,
}
response = self.publicPostReturnOrderStatus(self.extend(request, params))
return self.parse_order(response, market)
def parse_order(self, order, market=None):
timestamp = self.safe_timestamp(order, 'timestamp')
side = self.safe_string(order, 'type')
symbol = None
amount = None
remaining = None
if 'initialAmount' in order:
amount = self.safe_float(order, 'initialAmount')
remaining = self.safe_float(order, 'amount')
else:
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'filled')
price = self.safe_float(order, 'price')
cost = self.safe_float(order, 'total')
if (cost is not None) and (filled is not None) and not cost:
cost = filled * price
if 'market' in order:
marketId = order['market']
symbol = self.markets_by_id[marketId]['symbol']
elif (side is not None) and ('params' in order):
params = order['params']
buy = self.safe_currency_code(self.safe_string(params, 'tokenBuy'))
sell = self.safe_currency_code(self.safe_string(params, 'tokenSell'))
if buy is not None and sell is not None:
symbol = (buy + '/' + sell) if (side == 'buy') else (sell + '/' + buy)
if symbol is None and market is not None:
symbol = market['symbol']
id = self.safe_string(order, 'orderHash')
status = self.parse_order_status(self.safe_string(order, 'status'))
return {
'info': order,
'id': id,
'clientOrderId': None,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'side': side,
'amount': amount,
'price': price,
'type': 'limit',
'filled': filled,
'remaining': remaining,
'cost': cost,
'status': status,
'lastTradeTimestamp': None,
'average': None,
'trades': None,
'fee': None,
}
def parse_order_status(self, status):
statuses = {
'open': 'open',
}
return self.safe_string(statuses, status, status)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if self.walletAddress is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a walletAddress')
self.load_markets()
request = {
'address': self.walletAddress,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
if limit is not None:
request['start'] = int(int(math.floor(limit)))
response = self.publicPostReturnTradeHistory(self.extend(request, params))
if isinstance(response, list):
return self.parse_trades(response, market, since, limit)
else:
result = []
marketIds = list(response.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
trades = response[marketId]
parsed = self.parse_trades(trades, market, since, limit)
result = self.array_concat(result, parsed)
return result
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
if limit is not None:
request['start'] = int(int(math.floor(limit)))
response = self.publicPostReturnTradeHistory(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
side = self.safe_string(trade, 'type')
feeCurrency = None
symbol = None
maker = self.safe_string(trade, 'maker')
takerOrMaker = None
if maker is not None and self.walletAddress is not None:
if maker.lower() == self.walletAddress.lower():
takerOrMaker = 'maker'
else:
takerOrMaker = 'taker'
buy = self.safe_currency_code(self.safe_string(trade, 'tokenBuy'))
sell = self.safe_currency_code(self.safe_string(trade, 'tokenSell'))
feeSide = None
if buy is not None and sell is not None:
if side == 'buy':
feeSide = 'buyerFee'
if takerOrMaker == 'maker':
symbol = buy + '/' + sell
feeCurrency = buy
else:
symbol = sell + '/' + buy
feeCurrency = sell
else:
feeSide = 'sellerFee'
if takerOrMaker == 'maker':
symbol = sell + '/' + buy
feeCurrency = buy
else:
symbol = buy + '/' + sell
feeCurrency = sell
if symbol is None and market is not None:
symbol = market['symbol']
timestamp = self.safe_timestamp(trade, 'timestamp')
id = self.safe_string(trade, 'tid')
amount = self.safe_float(trade, 'amount')
price = self.safe_float(trade, 'price')
cost = self.safe_float(trade, 'total')
feeCost = self.safe_float(trade, feeSide)
if feeCost < 0:
gasFee = self.safe_float(trade, 'gasFee')
feeCost = self.sum(gasFee, feeCost)
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
if feeCost is not None and amount is not None:
feeCurrencyAmount = cost if (feeCurrency == 'ETH') else amount
fee['rate'] = feeCost / feeCurrencyAmount
orderId = self.safe_string(trade, 'orderHash')
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'side': side,
'order': orderId,
'symbol': symbol,
'type': 'limit',
}
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_required_dependencies()
self.check_address(address)
self.load_markets()
currency = self.currency(code)
tokenAddress = currency['id']
nonce = self.get_nonce()
amount = self.to_wei(amount, currency['precision'])
requestToHash = {
'contractAddress': self.get_contract_address(),
'token': tokenAddress,
'amount': amount,
'address': address,
'nonce': nonce,
}
hash = self.get_idex_withdraw_hash(requestToHash)
signature = self.sign_message(hash, self.privateKey)
request = {
'address': address,
'amount': amount,
'token': tokenAddress,
'nonce': nonce,
}
response = self.privatePostWithdraw(self.extend(request, signature))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
body = self.json(params)
url = self.urls['api'] + '/' + path
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
if api == 'private':
self.check_required_credentials()
headers['API-Key'] = self.apiKey
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_idex_create_order_hash(self, order):
return self.solidity_sha3([
order['contractAddress'],
order['tokenBuy'],
order['amountBuy'],
order['tokenSell'],
order['amountSell'],
order['expires'],
order['nonce'],
order['address'],
])
def get_idex_cancel_order_hash(self, order):
return self.solidity_sha3([
order['orderHash'],
order['nonce'],
])
def get_idex_market_order_hash(self, order):
return self.solidity_sha3([
order['orderHash'],
order['amount'],
order['address'],
order['nonce'],
])
def get_idex_withdraw_hash(self, request):
return self.solidity_sha3([
request['contractAddress'],
request['token'],
request['amount'],
request['address'],
request['nonce'],
])
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if 'error' in response:
if response['error'] in self.exceptions:
raise self.exceptions[response['error']](self.id + ' ' + response['error'])
raise ExchangeError(self.id + ' ' + body)
| true | true |
f734731852fc4bff361b7a445260efff13f13871 | 3,019 | py | Python | MagTag_Project_Selector/code.py | dastels/Adafruit_Learning_System_Guides | 9c59432a546c55ac9e40085c5f816cd09096fda2 | [
"MIT"
] | 1 | 2021-01-05T02:08:27.000Z | 2021-01-05T02:08:27.000Z | MagTag_Project_Selector/code.py | dastels/Adafruit_Learning_System_Guides | 9c59432a546c55ac9e40085c5f816cd09096fda2 | [
"MIT"
] | 1 | 2020-10-16T15:30:22.000Z | 2020-10-16T15:30:22.000Z | MagTag_Project_Selector/code.py | dastels/Adafruit_Learning_System_Guides | 9c59432a546c55ac9e40085c5f816cd09096fda2 | [
"MIT"
] | 1 | 2020-10-16T15:23:04.000Z | 2020-10-16T15:23:04.000Z | # Based on code written by @DavidGlaude on Twitter
# https://twitter.com/DavidGlaude/status/1340365817138044933
# https://gist.github.com/dglaude/4bf8d0a13c9c8ca8b05d6c0e9176bd20
import time
import alarm
import displayio
import board
import adafruit_imageload
from adafruit_display_shapes.rect import Rect
from adafruit_magtag.magtag import Graphics
from digitalio import DigitalInOut, Direction, Pull
projects = [
"weather",
"spacex",
"covid",
"showerthoughts",
"tides",
"year",
"showtimes",
"slideshow",
]
btnA = DigitalInOut(board.D15)
btnA.direction = Direction.INPUT
btnA.pull = Pull.UP
btnB = DigitalInOut(board.D14)
btnB.direction = Direction.INPUT
btnB.pull = Pull.UP
btnC = DigitalInOut(board.D12)
btnC.direction = Direction.INPUT
btnC.pull = Pull.UP
btnD = DigitalInOut(board.D11)
btnD.direction = Direction.INPUT
btnD.pull = Pull.UP
graphics = Graphics(auto_refresh=False)
display = graphics.display
group = displayio.Group(max_size=14)
selector = False
if not btnA.value or not btnB.value or not btnC.value or not btnD.value:
selector = True
if selector:
background = Rect(0, 0, 296, 128, fill=0xFFFFFF)
group.append(background)
for i in range(8):
sprite_sheet, palette = adafruit_imageload.load(
f"/bmps/{projects[i]}.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette,
)
sprite = displayio.TileGrid(
sprite_sheet,
pixel_shader=palette,
width=1,
height=1,
tile_width=62,
tile_height=54,
x=6 + 74 * (i % 4),
y=6 + 62 * (i // 4),
)
group.append(sprite)
rect = Rect(4, 4, 66, 58, outline=0x000000, stroke=2)
group.append(rect)
display.show(group)
display.refresh()
time.sleep(5)
print("Ready")
selected = 0
while True:
if not btnA.value and not btnD.value:
alarm.sleep_memory[0] = selected
break
if not btnA.value and selected != 0 and selected != 4:
selected -= 1
rect.x -= 74
display.refresh()
print("left")
time.sleep(5)
continue
if not btnB.value and selected > 3:
selected -= 4
rect.y -= 62
display.refresh()
print("up")
time.sleep(5)
continue
if not btnC.value and selected < 4:
selected += 4
rect.y += 62
display.refresh()
print("down")
time.sleep(5)
continue
if not btnD.value and selected != 3 and selected != 7:
selected += 1
rect.x += 74
display.refresh()
print("right")
time.sleep(5)
continue
btnA.deinit()
btnB.deinit()
btnC.deinit()
btnD.deinit()
print("Starting ", projects[int(alarm.sleep_memory[0])])
__import__("/projects/" + projects[int(alarm.sleep_memory[0])])
| 24.745902 | 72 | 0.595561 |
import time
import alarm
import displayio
import board
import adafruit_imageload
from adafruit_display_shapes.rect import Rect
from adafruit_magtag.magtag import Graphics
from digitalio import DigitalInOut, Direction, Pull
projects = [
"weather",
"spacex",
"covid",
"showerthoughts",
"tides",
"year",
"showtimes",
"slideshow",
]
btnA = DigitalInOut(board.D15)
btnA.direction = Direction.INPUT
btnA.pull = Pull.UP
btnB = DigitalInOut(board.D14)
btnB.direction = Direction.INPUT
btnB.pull = Pull.UP
btnC = DigitalInOut(board.D12)
btnC.direction = Direction.INPUT
btnC.pull = Pull.UP
btnD = DigitalInOut(board.D11)
btnD.direction = Direction.INPUT
btnD.pull = Pull.UP
graphics = Graphics(auto_refresh=False)
display = graphics.display
group = displayio.Group(max_size=14)
selector = False
if not btnA.value or not btnB.value or not btnC.value or not btnD.value:
selector = True
if selector:
background = Rect(0, 0, 296, 128, fill=0xFFFFFF)
group.append(background)
for i in range(8):
sprite_sheet, palette = adafruit_imageload.load(
f"/bmps/{projects[i]}.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette,
)
sprite = displayio.TileGrid(
sprite_sheet,
pixel_shader=palette,
width=1,
height=1,
tile_width=62,
tile_height=54,
x=6 + 74 * (i % 4),
y=6 + 62 * (i // 4),
)
group.append(sprite)
rect = Rect(4, 4, 66, 58, outline=0x000000, stroke=2)
group.append(rect)
display.show(group)
display.refresh()
time.sleep(5)
print("Ready")
selected = 0
while True:
if not btnA.value and not btnD.value:
alarm.sleep_memory[0] = selected
break
if not btnA.value and selected != 0 and selected != 4:
selected -= 1
rect.x -= 74
display.refresh()
print("left")
time.sleep(5)
continue
if not btnB.value and selected > 3:
selected -= 4
rect.y -= 62
display.refresh()
print("up")
time.sleep(5)
continue
if not btnC.value and selected < 4:
selected += 4
rect.y += 62
display.refresh()
print("down")
time.sleep(5)
continue
if not btnD.value and selected != 3 and selected != 7:
selected += 1
rect.x += 74
display.refresh()
print("right")
time.sleep(5)
continue
btnA.deinit()
btnB.deinit()
btnC.deinit()
btnD.deinit()
print("Starting ", projects[int(alarm.sleep_memory[0])])
__import__("/projects/" + projects[int(alarm.sleep_memory[0])])
| true | true |
f734735280ad987f4230935eb5071713885ad7f4 | 207 | py | Python | src/app/beer_garden/api/http/schemas/v1/role.py | scott-taubman/beer-garden | bac825849f7791e14064942566fbec63a83e6f87 | [
"MIT"
] | null | null | null | src/app/beer_garden/api/http/schemas/v1/role.py | scott-taubman/beer-garden | bac825849f7791e14064942566fbec63a83e6f87 | [
"MIT"
] | null | null | null | src/app/beer_garden/api/http/schemas/v1/role.py | scott-taubman/beer-garden | bac825849f7791e14064942566fbec63a83e6f87 | [
"MIT"
] | null | null | null | from brewtils.schemas import RoleSchema
from marshmallow import Schema, fields
class RoleListSchema(Schema):
"""Schema for listing multiple roles"""
roles = fields.List(fields.Nested(RoleSchema))
| 23 | 50 | 0.768116 | from brewtils.schemas import RoleSchema
from marshmallow import Schema, fields
class RoleListSchema(Schema):
roles = fields.List(fields.Nested(RoleSchema))
| true | true |
f734735728c02e2f8721f9bfa11ee29045230912 | 9,456 | py | Python | accelbyte_py_sdk/api/platform/operations/item/update_app.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/platform/operations/item/update_app.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/platform/operations/item/update_app.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | # Auto-generated at 2021-09-27T17:01:29.359679+08:00
# from: Justice Platform Service (3.24.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import AppUpdate
from ...models import ErrorEntity
from ...models import FullAppInfo
from ...models import ValidationErrorEntity
class UpdateApp(Operation):
"""Update an app (updateApp)
Properties:
url: /platform/admin/namespaces/{namespace}/items/{itemId}/app
method: PUT
tags: Item
consumes: ["application/json"]
produces: ["application/json"]
security: bearer
body: (body) OPTIONAL AppUpdate in body
namespace: (namespace) REQUIRED str in path
item_id: (itemId) REQUIRED str in path
store_id: (storeId) REQUIRED str in query
Responses:
200: OK - FullAppInfo (successful operation)
404: Not Found - ErrorEntity (ErrorMessage: 30341 | Description: Item [{itemId}] does not exist in namespace [{namespace}])
409: Conflict - ErrorEntity (ErrorCode: 30173 | ErrorMessage: Published store can't modify content)
422: Unprocessable Entity - ValidationErrorEntity (ErrorCode: 20002 | ErrorMessage: validation error)
"""
# region fields
_url: str = "/platform/admin/namespaces/{namespace}/items/{itemId}/app"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: AppUpdate # OPTIONAL in [body]
namespace: str # REQUIRED in [path]
item_id: str # REQUIRED in [path]
store_id: str # REQUIRED in [query]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
# query params
result += "?" + "&".join([f"{k}={v}" for k, v in self.get_query_params().items()])
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"namespace",
"item_id",
"store_id",
]
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
"query": self.get_query_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "item_id"):
result["itemId"] = self.item_id
return result
def get_query_params(self) -> dict:
result = {}
if hasattr(self, "store_id"):
result["storeId"] = self.store_id
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "namespace") or self.namespace is None:
return False
if not hasattr(self, "item_id") or self.item_id is None:
return False
if not hasattr(self, "store_id") or self.store_id is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_body(self, value: AppUpdate) -> UpdateApp:
self.body = value
return self
def with_namespace(self, value: str) -> UpdateApp:
self.namespace = value
return self
def with_item_id(self, value: str) -> UpdateApp:
self.item_id = value
return self
def with_store_id(self, value: str) -> UpdateApp:
self.store_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = AppUpdate()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "item_id") and self.item_id:
result["itemId"] = str(self.item_id)
elif include_empty:
result["itemId"] = str()
if hasattr(self, "store_id") and self.store_id:
result["storeId"] = str(self.store_id)
elif include_empty:
result["storeId"] = str()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, FullAppInfo], Union[None, ErrorEntity, ValidationErrorEntity]]:
"""Parse the given response.
200: OK - FullAppInfo (successful operation)
404: Not Found - ErrorEntity (ErrorMessage: 30341 | Description: Item [{itemId}] does not exist in namespace [{namespace}])
409: Conflict - ErrorEntity (ErrorCode: 30173 | ErrorMessage: Published store can't modify content)
422: Unprocessable Entity - ValidationErrorEntity (ErrorCode: 20002 | ErrorMessage: validation error)
"""
if code == 200:
return FullAppInfo.create_from_dict(content), None
if code == 404:
return None, ErrorEntity.create_from_dict(content)
if code == 409:
return None, ErrorEntity.create_from_dict(content)
if code == 422:
return None, ValidationErrorEntity.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
item_id: str,
store_id: str,
body: Optional[AppUpdate] = None,
) -> UpdateApp:
instance = cls()
instance.namespace = namespace
instance.item_id = item_id
instance.store_id = store_id
if body is not None:
instance.body = body
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> UpdateApp:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = AppUpdate.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = AppUpdate()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "itemId" in dict_ and dict_["itemId"] is not None:
instance.item_id = str(dict_["itemId"])
elif include_empty:
instance.item_id = str()
if "storeId" in dict_ and dict_["storeId"] is not None:
instance.store_id = str(dict_["storeId"])
elif include_empty:
instance.store_id = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"itemId": "item_id",
"storeId": "store_id",
}
# endregion static methods
| 31.415282 | 157 | 0.605436 |
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import AppUpdate
from ...models import ErrorEntity
from ...models import FullAppInfo
from ...models import ValidationErrorEntity
class UpdateApp(Operation):
_url: str = "/platform/admin/namespaces/{namespace}/items/{itemId}/app"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: AppUpdate
namespace: str
item_id: str
store_id: str
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
result += "?" + "&".join([f"{k}={v}" for k, v in self.get_query_params().items()])
return result
def get_all_required_fields(self) -> List[str]:
return [
"namespace",
"item_id",
"store_id",
]
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
"query": self.get_query_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "item_id"):
result["itemId"] = self.item_id
return result
def get_query_params(self) -> dict:
result = {}
if hasattr(self, "store_id"):
result["storeId"] = self.store_id
return result
def is_valid(self) -> bool:
if not hasattr(self, "namespace") or self.namespace is None:
return False
if not hasattr(self, "item_id") or self.item_id is None:
return False
if not hasattr(self, "store_id") or self.store_id is None:
return False
return True
def with_body(self, value: AppUpdate) -> UpdateApp:
self.body = value
return self
def with_namespace(self, value: str) -> UpdateApp:
self.namespace = value
return self
def with_item_id(self, value: str) -> UpdateApp:
self.item_id = value
return self
def with_store_id(self, value: str) -> UpdateApp:
self.store_id = value
return self
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = AppUpdate()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "item_id") and self.item_id:
result["itemId"] = str(self.item_id)
elif include_empty:
result["itemId"] = str()
if hasattr(self, "store_id") and self.store_id:
result["storeId"] = str(self.store_id)
elif include_empty:
result["storeId"] = str()
return result
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, FullAppInfo], Union[None, ErrorEntity, ValidationErrorEntity]]:
if code == 200:
return FullAppInfo.create_from_dict(content), None
if code == 404:
return None, ErrorEntity.create_from_dict(content)
if code == 409:
return None, ErrorEntity.create_from_dict(content)
if code == 422:
return None, ValidationErrorEntity.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
@classmethod
def create(
cls,
namespace: str,
item_id: str,
store_id: str,
body: Optional[AppUpdate] = None,
) -> UpdateApp:
instance = cls()
instance.namespace = namespace
instance.item_id = item_id
instance.store_id = store_id
if body is not None:
instance.body = body
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> UpdateApp:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = AppUpdate.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = AppUpdate()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "itemId" in dict_ and dict_["itemId"] is not None:
instance.item_id = str(dict_["itemId"])
elif include_empty:
instance.item_id = str()
if "storeId" in dict_ and dict_["storeId"] is not None:
instance.store_id = str(dict_["storeId"])
elif include_empty:
instance.store_id = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"itemId": "item_id",
"storeId": "store_id",
}
| true | true |
f734740036bba3f47f055d090d680ae439c77013 | 5,376 | py | Python | atores.py | rafarios20/pythonbirds | 520642c6e9f50dfe94b95255fb6690c987c6397f | [
"MIT"
] | 1 | 2022-03-22T14:39:23.000Z | 2022-03-22T14:39:23.000Z | atores.py | rafarios20/pythonbirds | 520642c6e9f50dfe94b95255fb6690c987c6397f | [
"MIT"
] | null | null | null | atores.py | rafarios20/pythonbirds | 520642c6e9f50dfe94b95255fb6690c987c6397f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import math
DESTRUIDO = 'Destruido'
ATIVO = 'Ativo'
GRAVIDADE = 10 # m/s^2
class Ator:
"""
Classe que representa um ator. Ele representa um ponto cartesiano na tela.
"""
_caracter_ativo = 'A'
_caracter_destruido = ' '
def __init__(self, x=0, y=0):
"""
Método de inicialização da classe. Deve inicializar os parâmetros x, y, caracter e status
:param x: Posição horizontal inicial do ator
:param y: Posição vertical inicial do ator
"""
self.y = y
self.x = x
self.status = ATIVO
def caracter(self):
return self._caracter_ativo if self.status == ATIVO else self._caracter_destruido
def calcular_posicao(self, tempo):
"""
Método que calcula a posição do ator em determinado tempo.
Deve-se imaginar que o tempo começa em 0 e avança de 0,01 segundos
:param tempo: o tempo do jogo
:return: posição x, y do ator
"""
return self.x, self.y
def colidir(self, outro_ator, intervalo=1):
"""
Método que executa lógica de colisão entre dois atores.
Só deve haver colisão se os dois atores tiverem seus status ativos.
Para colisão, é considerado um quadrado, com lado igual ao parâmetro intervalo, em volta do ponto onde se
encontra o ator. Se os atores estiverem dentro desse mesmo quadrado, seus status devem ser alterados para
destruido, seus caracteres para destruido também.
:param outro_ator: Ator a ser considerado na colisão
:param intervalo: Intervalo a ser considerado
:return:
"""
if self.status==ATIVO and outro_ator.status==ATIVO:
delta_x=abs(self.x - outro_ator.x)
delta_y=abs(self.y - outro_ator.y)
if delta_x <= intervalo and delta_y <= intervalo:
self.status=outro_ator.status=DESTRUIDO
class Obstaculo(Ator):
_caracter_ativo = 'O'
class Porco(Ator):
_caracter_ativo = '@'
_caracter_destruido = '+'
class DuploLancamentoExcecao(Exception):
pass
class Passaro(Ator):
velocidade_escalar = 10
def __init__(self, x=0, y=0):
"""
Método de inicialização de pássaro.
Deve chamar a inicialização de ator. Além disso, deve armazenar a posição inicial e incializar o tempo de
lançamento e angulo de lançamento
:param x:
:param y:
"""
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None # radianos
def foi_lancado(self):
"""
Método que retorna verdaeira se o pássaro já foi lançado e falso caso contrário
:return: booleano
"""
return not self._tempo_de_lancamento is None
def colidir_com_chao(self):
"""
Método que executa lógica de colisão com o chão. Toda vez que y for menor ou igual a 0,
o status dos Passaro deve ser alterado para destruido, bem como o seu caracter
"""
if self.y <=0:
self.status = DESTRUIDO
def calcular_posicao(self, tempo):
"""
Método que cálcula a posição do passaro de acordo com o tempo.
Antes do lançamento o pássaro deve retornar o valor de sua posição inicial
Depois do lançamento o pássaro deve calcular de acordo com sua posição inicial, velocidade escalar,
ângulo de lancamento, gravidade (constante GRAVIDADE) e o tempo do jogo.
Após a colisão, ou seja, ter seus status destruido, o pássaro deve apenas retornar a última posição calculada.
:param tempo: tempo de jogo a ser calculada a posição
:return: posição x, y
"""
if self._esta_voando():
delta_t = tempo - self._tempo_de_lancamento
self._calcular_posicao_vertical(delta_t)
self._calcular_posicao_horizontal(delta_t)
return super().calcular_posicao(tempo)
def lancar(self, angulo, tempo_de_lancamento):
"""
Lógica que lança o pássaro. Deve armazenar o ângulo e o tempo de lançamento para posteriores cálculo.
O ângulo é passado em graus e deve ser transformado em radianos
:param angulo:
:param tempo_de_lancamento:
:return:
"""
self._angulo_de_lancamento = math.radians(angulo)
self._tempo_de_lancamento = tempo_de_lancamento
def _calcular_posicao_vertical(self, delta_t):
y_atual = self._y_inicial
angulo_radianos=self._angulo_de_lancamento
y_atual += self.velocidade_escalar * delta_t * math.sin(angulo_radianos)
y_atual -= (GRAVIDADE * (delta_t**2)) / 2
self.y = y_atual
def _calcular_posicao_horizontal(self, delta_t):
x_atual = self._x_inicial
angulo_radianos=self._angulo_de_lancamento
x_atual += self.velocidade_escalar * delta_t * math.cos(angulo_radianos)
self.x = x_atual
def _esta_voando(self):
return self.foi_lancado() and self.status == ATIVO
class PassaroAmarelo(Passaro):
_caracter_ativo = 'A'
_caracter_destruido = 'a'
velocidade_escalar = 30
class PassaroVermelho(Passaro):
_caracter_ativo = 'V'
_caracter_destruido = 'v'
velocidade_escalar = 20 | 31.075145 | 118 | 0.652158 |
from __future__ import unicode_literals
import math
DESTRUIDO = 'Destruido'
ATIVO = 'Ativo'
GRAVIDADE = 10
class Ator:
_caracter_ativo = 'A'
_caracter_destruido = ' '
def __init__(self, x=0, y=0):
self.y = y
self.x = x
self.status = ATIVO
def caracter(self):
return self._caracter_ativo if self.status == ATIVO else self._caracter_destruido
def calcular_posicao(self, tempo):
return self.x, self.y
def colidir(self, outro_ator, intervalo=1):
if self.status==ATIVO and outro_ator.status==ATIVO:
delta_x=abs(self.x - outro_ator.x)
delta_y=abs(self.y - outro_ator.y)
if delta_x <= intervalo and delta_y <= intervalo:
self.status=outro_ator.status=DESTRUIDO
class Obstaculo(Ator):
_caracter_ativo = 'O'
class Porco(Ator):
_caracter_ativo = '@'
_caracter_destruido = '+'
class DuploLancamentoExcecao(Exception):
pass
class Passaro(Ator):
velocidade_escalar = 10
def __init__(self, x=0, y=0):
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None
def foi_lancado(self):
return not self._tempo_de_lancamento is None
def colidir_com_chao(self):
if self.y <=0:
self.status = DESTRUIDO
def calcular_posicao(self, tempo):
if self._esta_voando():
delta_t = tempo - self._tempo_de_lancamento
self._calcular_posicao_vertical(delta_t)
self._calcular_posicao_horizontal(delta_t)
return super().calcular_posicao(tempo)
def lancar(self, angulo, tempo_de_lancamento):
self._angulo_de_lancamento = math.radians(angulo)
self._tempo_de_lancamento = tempo_de_lancamento
def _calcular_posicao_vertical(self, delta_t):
y_atual = self._y_inicial
angulo_radianos=self._angulo_de_lancamento
y_atual += self.velocidade_escalar * delta_t * math.sin(angulo_radianos)
y_atual -= (GRAVIDADE * (delta_t**2)) / 2
self.y = y_atual
def _calcular_posicao_horizontal(self, delta_t):
x_atual = self._x_inicial
angulo_radianos=self._angulo_de_lancamento
x_atual += self.velocidade_escalar * delta_t * math.cos(angulo_radianos)
self.x = x_atual
def _esta_voando(self):
return self.foi_lancado() and self.status == ATIVO
class PassaroAmarelo(Passaro):
_caracter_ativo = 'A'
_caracter_destruido = 'a'
velocidade_escalar = 30
class PassaroVermelho(Passaro):
_caracter_ativo = 'V'
_caracter_destruido = 'v'
velocidade_escalar = 20 | true | true |
f734740a655264107a15bef1d806f6c58a8663e9 | 8,313 | py | Python | lib/assimp/test/regression/gen_db.py | Extrosoph/CITS3003-project | 2f0470b64eafc98864bfc14f5e7a9ad7e6fa837b | [
"MIT"
] | null | null | null | lib/assimp/test/regression/gen_db.py | Extrosoph/CITS3003-project | 2f0470b64eafc98864bfc14f5e7a9ad7e6fa837b | [
"MIT"
] | null | null | null | lib/assimp/test/regression/gen_db.py | Extrosoph/CITS3003-project | 2f0470b64eafc98864bfc14f5e7a9ad7e6fa837b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- Coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# Open Asset Import Library (ASSIMP)
# ---------------------------------------------------------------------------
#
# Copyright (c) 2006-2020, ASSIMP Development Team
#
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# * Neither the name of the ASSIMP team, nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior
# written permission of the ASSIMP Development Team.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------------------------------------------------------------------------
"""
Generate the regression database db.zip from the files in the <root>/test/models
directory. Older databases are overwritten with no prompt but can be restored
using Git as needed.
Use --help for usage.
On Windows, use ``py run.py <arguments>`` to make sure command line parameters
are forwarded to the script.
"""
import sys
import os
import subprocess
import zipfile
import settings
import utils
usage = """gen_db [assimp_binary] [-i=...] [-e=...] [-p] [-n]
The assimp_cmd (or assimp) binary to use is specified by the first
command line argument and defaults to ``assimp``.
To build, set ``ASSIMP_BUILD_ASSIMP_TOOLS=ON`` in CMake. If generating
configs for an IDE, make sure to build the assimp_cmd project.
-i,--include: List of file extensions to update dumps for. If omitted,
all file extensions are updated except those in `exclude`.
Example: -ixyz,abc
-i.xyz,.abc
--include=xyz,abc
-e,--exclude: Merged with settings.exclude_extensions to produce a
list of all file extensions to ignore. If dumps exist,
they are not altered. If not, theu are not created.
-p,--preview: Preview list of file extensions touched by the update.
Dont' change anything.
-n,--nozip: Don't pack to ZIP archive. Keep all dumps in individual files.
"""
# -------------------------------------------------------------------------------
def process_dir(d, outfile, file_filter):
""" Generate small dump records for all files in 'd' """
print("Processing directory " + d)
num = 0
for f in os.listdir(d):
fullp = os.path.join(d, f)
if os.path.isdir(fullp) and not f == ".svn":
num += process_dir(fullp, outfile, file_filter)
continue
if file_filter(f):
for pp in settings.pp_configs_to_test:
num += 1
print("DUMP " + fullp + "\n post-processing: " + pp)
outf = os.path.join(os.getcwd(), settings.database_name,
utils.hashing(fullp, pp))
cmd = [ assimp_bin_path, "dump", fullp, outf, "-b", "-s", "-l" ] + pp.split()
outfile.write("assimp dump "+"-"*80+"\n")
outfile.flush()
if subprocess.call(cmd, stdout=outfile, stderr=outfile, shell=False):
print("Failure processing " + fullp)
# spit out an empty file to indicate that this failure is expected
with open(outf,'wb') as f:
pass
return num
# -------------------------------------------------------------------------------
def make_zip():
"""Zip the contents of ./<settings.database_name>
to <settings.database_name>.zip using DEFLATE
compression to minimize the file size. """
num = 0
zipout = zipfile.ZipFile(settings.database_name + ".zip", "w", zipfile.ZIP_DEFLATED)
for f in os.listdir(settings.database_name):
p = os.path.join(settings.database_name, f)
zipout.write(p, f)
if settings.remove_old:
os.remove(p)
num += 1
if settings.remove_old:
os.rmdir(settings.database_name)
bad = zipout.testzip()
assert bad is None
print("="*60)
print("Database contains {0} entries".format(num))
# -------------------------------------------------------------------------------
def extract_zip():
"""Unzip <settings.database_name>.zip to
./<settings.database_name>"""
try:
zipout = zipfile.ZipFile(settings.database_name + ".zip", "r", 0)
zipout.extractall(path=settings.database_name)
except (RuntimeError,IOError) as r:
print(r)
print("failed to extract previous ZIP contents. "\
"DB is generated from scratch.")
# -------------------------------------------------------------------------------
def gen_db(ext_list,outfile):
"""Generate the crash dump database in
./<settings.database_name>"""
try:
os.mkdir(settings.database_name)
except OSError:
pass
num = 0
for tp in settings.model_directories:
num += process_dir(tp, outfile,
lambda x: os.path.splitext(x)[1].lower() in ext_list and not x in settings.files_to_ignore)
print("="*60)
print("Updated {0} entries".format(num))
# -------------------------------------------------------------------------------
if __name__ == "__main__":
def clean(f):
f = f.strip("* \'")
return "."+f if f[:1] != '.' else f
if len(sys.argv) <= 1 or sys.argv[1] == "--help" or sys.argv[1] == "-h":
print(usage)
sys.exit(0)
assimp_bin_path = sys.argv[1]
ext_list, preview, nozip = None, False, False
for m in sys.argv[2:]:
if m[:10]=="--exclude=":
settings.exclude_extensions += map(clean, m[10:].split(","))
elif m[:2]=="-e":
settings.exclude_extensions += map(clean, m[2:].split(","))
elif m[:10]=="--include=":
ext_list = m[10:].split(",")
elif m[:2]=="-i":
ext_list = m[2:].split(",")
elif m=="-p" or m == "--preview":
preview = True
elif m=="-n" or m == "--nozip":
nozip = True
else:
print("Unrecognized parameter: " + m)
sys.exit(-1)
outfile = open(os.path.join("..", "results", "gen_regression_db_output.txt"), "w")
if ext_list is None:
(ext_list, err) = subprocess.Popen([assimp_bin_path, "listext"],
stdout=subprocess.PIPE).communicate()
ext_list = str(ext_list.strip()).lower().split(";")
# todo: Fix for multi dot extensions like .skeleton.xml
ext_list = list(filter(lambda f: not f in settings.exclude_extensions,
map(clean, ext_list)))
print('File extensions processed: ' + ', '.join(ext_list))
if preview:
sys.exit(1)
extract_zip()
gen_db(ext_list,outfile)
make_zip()
print("="*60)
input("Press any key to continue")
sys.exit(0)
# vim: ai ts=4 sts=4 et sw=4
| 36.783186 | 104 | 0.563094 |
import sys
import os
import subprocess
import zipfile
import settings
import utils
usage = """gen_db [assimp_binary] [-i=...] [-e=...] [-p] [-n]
The assimp_cmd (or assimp) binary to use is specified by the first
command line argument and defaults to ``assimp``.
To build, set ``ASSIMP_BUILD_ASSIMP_TOOLS=ON`` in CMake. If generating
configs for an IDE, make sure to build the assimp_cmd project.
-i,--include: List of file extensions to update dumps for. If omitted,
all file extensions are updated except those in `exclude`.
Example: -ixyz,abc
-i.xyz,.abc
--include=xyz,abc
-e,--exclude: Merged with settings.exclude_extensions to produce a
list of all file extensions to ignore. If dumps exist,
they are not altered. If not, theu are not created.
-p,--preview: Preview list of file extensions touched by the update.
Dont' change anything.
-n,--nozip: Don't pack to ZIP archive. Keep all dumps in individual files.
"""
def process_dir(d, outfile, file_filter):
print("Processing directory " + d)
num = 0
for f in os.listdir(d):
fullp = os.path.join(d, f)
if os.path.isdir(fullp) and not f == ".svn":
num += process_dir(fullp, outfile, file_filter)
continue
if file_filter(f):
for pp in settings.pp_configs_to_test:
num += 1
print("DUMP " + fullp + "\n post-processing: " + pp)
outf = os.path.join(os.getcwd(), settings.database_name,
utils.hashing(fullp, pp))
cmd = [ assimp_bin_path, "dump", fullp, outf, "-b", "-s", "-l" ] + pp.split()
outfile.write("assimp dump "+"-"*80+"\n")
outfile.flush()
if subprocess.call(cmd, stdout=outfile, stderr=outfile, shell=False):
print("Failure processing " + fullp)
with open(outf,'wb') as f:
pass
return num
def make_zip():
num = 0
zipout = zipfile.ZipFile(settings.database_name + ".zip", "w", zipfile.ZIP_DEFLATED)
for f in os.listdir(settings.database_name):
p = os.path.join(settings.database_name, f)
zipout.write(p, f)
if settings.remove_old:
os.remove(p)
num += 1
if settings.remove_old:
os.rmdir(settings.database_name)
bad = zipout.testzip()
assert bad is None
print("="*60)
print("Database contains {0} entries".format(num))
def extract_zip():
try:
zipout = zipfile.ZipFile(settings.database_name + ".zip", "r", 0)
zipout.extractall(path=settings.database_name)
except (RuntimeError,IOError) as r:
print(r)
print("failed to extract previous ZIP contents. "\
"DB is generated from scratch.")
def gen_db(ext_list,outfile):
try:
os.mkdir(settings.database_name)
except OSError:
pass
num = 0
for tp in settings.model_directories:
num += process_dir(tp, outfile,
lambda x: os.path.splitext(x)[1].lower() in ext_list and not x in settings.files_to_ignore)
print("="*60)
print("Updated {0} entries".format(num))
if __name__ == "__main__":
def clean(f):
f = f.strip("* \'")
return "."+f if f[:1] != '.' else f
if len(sys.argv) <= 1 or sys.argv[1] == "--help" or sys.argv[1] == "-h":
print(usage)
sys.exit(0)
assimp_bin_path = sys.argv[1]
ext_list, preview, nozip = None, False, False
for m in sys.argv[2:]:
if m[:10]=="--exclude=":
settings.exclude_extensions += map(clean, m[10:].split(","))
elif m[:2]=="-e":
settings.exclude_extensions += map(clean, m[2:].split(","))
elif m[:10]=="--include=":
ext_list = m[10:].split(",")
elif m[:2]=="-i":
ext_list = m[2:].split(",")
elif m=="-p" or m == "--preview":
preview = True
elif m=="-n" or m == "--nozip":
nozip = True
else:
print("Unrecognized parameter: " + m)
sys.exit(-1)
outfile = open(os.path.join("..", "results", "gen_regression_db_output.txt"), "w")
if ext_list is None:
(ext_list, err) = subprocess.Popen([assimp_bin_path, "listext"],
stdout=subprocess.PIPE).communicate()
ext_list = str(ext_list.strip()).lower().split(";")
# todo: Fix for multi dot extensions like .skeleton.xml
ext_list = list(filter(lambda f: not f in settings.exclude_extensions,
map(clean, ext_list)))
print('File extensions processed: ' + ', '.join(ext_list))
if preview:
sys.exit(1)
extract_zip()
gen_db(ext_list,outfile)
make_zip()
print("="*60)
input("Press any key to continue")
sys.exit(0)
# vim: ai ts=4 sts=4 et sw=4
| true | true |
f734747084533080ace588764d0f46e026291f6a | 5,346 | py | Python | sdk/cognitiveservices/azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/operations/endpoint_settings_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/cognitiveservices/azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/operations/endpoint_settings_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/cognitiveservices/azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/operations/endpoint_settings_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class EndpointSettingsOperations(object):
"""EndpointSettingsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_settings(
self, custom_headers=None, raw=False, **operation_config):
"""Gets endpoint settings for an endpoint.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: EndpointSettingsDTO or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.knowledge.qnamaker.models.EndpointSettingsDTO
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.knowledge.qnamaker.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_settings.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EndpointSettingsDTO', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_settings.metadata = {'url': '/endpointSettings'}
def update_settings(
self, active_learning=None, custom_headers=None, raw=False, **operation_config):
"""Updates endpoint settings for an endpoint.
:param active_learning: Active Learning settings of the endpoint.
:type active_learning:
~azure.cognitiveservices.knowledge.qnamaker.models.EndpointSettingsDTOActiveLearning
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.knowledge.qnamaker.models.ErrorResponseException>`
"""
endpoint_settings_payload = models.EndpointSettingsDTO(active_learning=active_learning)
# Construct URL
url = self.update_settings.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(endpoint_settings_payload, 'EndpointSettingsDTO')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
update_settings.metadata = {'url': '/endpointSettings'}
| 39.308824 | 114 | 0.671156 |
from msrest.pipeline import ClientRawResponse
from .. import models
class EndpointSettingsOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_settings(
self, custom_headers=None, raw=False, **operation_config):
url = self.get_settings.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EndpointSettingsDTO', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_settings.metadata = {'url': '/endpointSettings'}
def update_settings(
self, active_learning=None, custom_headers=None, raw=False, **operation_config):
endpoint_settings_payload = models.EndpointSettingsDTO(active_learning=active_learning)
url = self.update_settings.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
body_content = self._serialize.body(endpoint_settings_payload, 'EndpointSettingsDTO')
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
update_settings.metadata = {'url': '/endpointSettings'}
| true | true |
f734747f196e735eb6f0bbb5e4bff9965f84a668 | 11,715 | py | Python | mars/services/task/api/web.py | yuyiming/mars | 5e6990d1ea022444dd646c56697e596ef5d7e747 | [
"Apache-2.0"
] | 1 | 2022-02-24T08:39:26.000Z | 2022-02-24T08:39:26.000Z | mars/services/task/api/web.py | yuyiming/mars | 5e6990d1ea022444dd646c56697e596ef5d7e747 | [
"Apache-2.0"
] | null | null | null | mars/services/task/api/web.py | yuyiming/mars | 5e6990d1ea022444dd646c56697e596ef5d7e747 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import base64
import json
from typing import Callable, List, Optional, Union
from ....core import TileableGraph, Tileable
from ....utils import serialize_serializable, deserialize_serializable
from ...web import web_api, MarsServiceWebAPIHandler, MarsWebAPIClientMixin
from ..core import TaskResult, TaskStatus
from .core import AbstractTaskAPI
def _json_serial_task_result(result: Optional[TaskResult]):
if result is None:
return {}
return {
"task_id": result.task_id,
"session_id": result.session_id,
"stage_id": result.stage_id,
"start_time": result.start_time,
"end_time": result.end_time,
"progress": result.progress,
"status": result.status.value,
"error": base64.b64encode(serialize_serializable(result.error)).decode()
if result.error is not None
else None,
"traceback": base64.b64encode(serialize_serializable(result.traceback)).decode()
if result.traceback is not None
else None,
"profiling": result.profiling,
}
def _json_deserial_task_result(d: dict) -> Optional[TaskResult]:
if not d:
return None
if d["error"] is not None:
d["error"] = deserialize_serializable(base64.b64decode(d["error"]))
if d["traceback"] is not None:
d["traceback"] = deserialize_serializable(base64.b64decode(d["traceback"]))
d["status"] = TaskStatus(d["status"])
return TaskResult(**d)
class TaskWebAPIHandler(MarsServiceWebAPIHandler):
_root_pattern = "/api/session/(?P<session_id>[^/]+)/task"
async def _get_oscar_task_api(self, session_id: str):
from .oscar import TaskAPI
return await self._get_api_by_key(TaskAPI, session_id)
@web_api("", method="post")
async def submit_tileable_graph(self, session_id: str):
body_args = (
deserialize_serializable(self.request.body) if self.request.body else None
)
task_name = body_args.get("task_name", None) or None
fuse_enabled = body_args.get("fuse")
graph = body_args["graph"]
extra_config = body_args.get("extra_config", None)
if extra_config:
extra_config = deserialize_serializable(extra_config)
oscar_api = await self._get_oscar_task_api(session_id)
task_id = await oscar_api.submit_tileable_graph(
graph,
task_name=task_name,
fuse_enabled=fuse_enabled,
extra_config=extra_config,
)
self.write(task_id)
@web_api("", method="get")
async def get_task_results(self, session_id: str):
progress = bool(int(self.get_argument("progress", "0")))
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_task_results(progress=progress)
self.write(json.dumps({"tasks": [_json_serial_task_result(r) for r in res]}))
@web_api(
"(?P<task_id>[^/]+)", method="get", arg_filter={"action": "fetch_tileables"}
)
async def get_fetch_tileables(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_fetch_tileables(task_id)
self.write(serialize_serializable(res))
@web_api("(?P<task_id>[^/]+)", method="get")
async def get_task_result(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_task_result(task_id)
self.write(json.dumps(_json_serial_task_result(res)))
@web_api(
"(?P<task_id>[^/]+)/tileable_graph",
method="get",
arg_filter={"action": "get_tileable_graph_as_json"},
)
async def get_tileable_graph_as_json(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_tileable_graph_as_json(task_id)
self.write(json.dumps(res))
@web_api("(?P<task_id>[^/]+)/tileable_detail", method="get")
async def get_tileable_details(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_tileable_details(task_id)
self.write(json.dumps(res))
@web_api("(?P<task_id>[^/]+)/(?P<tileable_id>[^/]+)/subtask", method="get")
async def get_tileable_subtasks(
self, session_id: str, task_id: str, tileable_id: str
):
with_input_output = self.get_argument("with_input_output", "false") == "true"
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_tileable_subtasks(
task_id, tileable_id, with_input_output
)
self.write(json.dumps(res))
@web_api("(?P<task_id>[^/]+)", method="get", arg_filter={"action": "progress"})
async def get_task_progress(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_task_progress(task_id)
self.write(str(res))
@web_api("", method="get", arg_filter={"action": "last_idle_time"})
async def get_last_idle_time(self, session_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_last_idle_time()
if res:
self.write(str(res))
@web_api("(?P<task_id>[^/]+)", method="get", arg_filter={"action": "wait"})
async def wait_task(self, session_id: str, task_id: str):
timeout = self.get_argument("timeout", None) or None
timeout = float(timeout) if timeout is not None else None
oscar_api = await self._get_oscar_task_api(session_id)
if timeout:
try:
res = await asyncio.wait_for(
asyncio.shield(oscar_api.wait_task(task_id, timeout)),
timeout=timeout,
)
self.write(json.dumps(_json_serial_task_result(res)))
except asyncio.TimeoutError:
self.write(json.dumps({}))
else:
res = await oscar_api.wait_task(task_id, timeout)
self.write(json.dumps(_json_serial_task_result(res)))
@web_api("(?P<task_id>[^/]+)", method="delete")
async def cancel_task(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
await oscar_api.cancel_task(task_id)
web_handlers = {TaskWebAPIHandler.get_root_pattern(): TaskWebAPIHandler}
class WebTaskAPI(AbstractTaskAPI, MarsWebAPIClientMixin):
def __init__(
self, session_id: str, address: str, request_rewriter: Callable = None
):
self._session_id = session_id
self._address = address.rstrip("/")
self.request_rewriter = request_rewriter
async def get_task_results(self, progress: bool = False) -> List[TaskResult]:
path = f"{self._address}/api/session/{self._session_id}/task"
params = {"progress": int(progress)}
res = await self._request_url("GET", path, params=params)
return [
_json_deserial_task_result(d)
for d in json.loads(res.body.decode())["tasks"]
]
async def submit_tileable_graph(
self,
graph: TileableGraph,
task_name: str = None,
fuse_enabled: bool = True,
extra_config: dict = None,
) -> str:
path = f"{self._address}/api/session/{self._session_id}/task"
extra_config_ser = (
serialize_serializable(extra_config) if extra_config else None
)
body = serialize_serializable(
{
"task_name": task_name if task_name else "",
"fuse": fuse_enabled,
"graph": graph,
"extra_config": extra_config_ser,
}
)
res = await self._request_url(
path=path,
method="POST",
headers={"Content-Type": "application/octet-stream"},
data=body,
)
return res.body.decode().strip()
async def get_fetch_tileables(self, task_id: str) -> List[Tileable]:
path = (
f"{self._address}/api/session/{self._session_id}/task/{task_id}"
f"?action=fetch_tileables"
)
res = await self._request_url("GET", path)
return deserialize_serializable(res.body)
async def get_task_result(self, task_id: str) -> TaskResult:
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}"
res = await self._request_url("GET", path)
return _json_deserial_task_result(json.loads(res.body.decode()))
async def get_task_progress(self, task_id: str) -> float:
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}"
params = dict(action="progress")
res = await self._request_url("GET", path, params=params)
return float(res.body.decode())
async def get_last_idle_time(self) -> Union[float, None]:
path = f"{self._address}/api/session/{self._session_id}/task"
params = dict(action="last_idle_time")
res = await self._request_url("GET", path, params=params)
content = res.body.decode()
return float(content) if content else None
async def wait_task(self, task_id: str, timeout: float = None):
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}"
# increase client timeout to handle network overhead during entire request
client_timeout = timeout + 3 if timeout else 0
params = {"action": "wait", "timeout": "" if timeout is None else str(timeout)}
res = await self._request_url(
"GET", path, params=params, request_timeout=client_timeout
)
return _json_deserial_task_result(json.loads(res.body.decode()))
async def cancel_task(self, task_id: str):
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}"
await self._request_url(path=path, method="DELETE")
async def get_tileable_graph_as_json(self, task_id: str):
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}/tileable_graph"
params = dict(action="get_tileable_graph_as_json")
res = await self._request_url(path=path, params=params, method="GET")
return json.loads(res.body.decode())
async def get_tileable_details(self, task_id: str):
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}/tileable_detail"
res = await self._request_url(path=path, method="GET")
return json.loads(res.body.decode())
async def get_tileable_subtasks(
self, task_id: str, tileable_id: str, with_input_output: bool
):
with_input_output = "true" if with_input_output else "false"
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}/{tileable_id}/subtask"
params = {
"action": "fetch_graph",
"with_input_output": with_input_output,
}
res = await self._request_url(path=path, params=params, method="GET")
return json.loads(res.body.decode())
| 41.105263 | 101 | 0.654545 |
import asyncio
import base64
import json
from typing import Callable, List, Optional, Union
from ....core import TileableGraph, Tileable
from ....utils import serialize_serializable, deserialize_serializable
from ...web import web_api, MarsServiceWebAPIHandler, MarsWebAPIClientMixin
from ..core import TaskResult, TaskStatus
from .core import AbstractTaskAPI
def _json_serial_task_result(result: Optional[TaskResult]):
if result is None:
return {}
return {
"task_id": result.task_id,
"session_id": result.session_id,
"stage_id": result.stage_id,
"start_time": result.start_time,
"end_time": result.end_time,
"progress": result.progress,
"status": result.status.value,
"error": base64.b64encode(serialize_serializable(result.error)).decode()
if result.error is not None
else None,
"traceback": base64.b64encode(serialize_serializable(result.traceback)).decode()
if result.traceback is not None
else None,
"profiling": result.profiling,
}
def _json_deserial_task_result(d: dict) -> Optional[TaskResult]:
if not d:
return None
if d["error"] is not None:
d["error"] = deserialize_serializable(base64.b64decode(d["error"]))
if d["traceback"] is not None:
d["traceback"] = deserialize_serializable(base64.b64decode(d["traceback"]))
d["status"] = TaskStatus(d["status"])
return TaskResult(**d)
class TaskWebAPIHandler(MarsServiceWebAPIHandler):
_root_pattern = "/api/session/(?P<session_id>[^/]+)/task"
async def _get_oscar_task_api(self, session_id: str):
from .oscar import TaskAPI
return await self._get_api_by_key(TaskAPI, session_id)
@web_api("", method="post")
async def submit_tileable_graph(self, session_id: str):
body_args = (
deserialize_serializable(self.request.body) if self.request.body else None
)
task_name = body_args.get("task_name", None) or None
fuse_enabled = body_args.get("fuse")
graph = body_args["graph"]
extra_config = body_args.get("extra_config", None)
if extra_config:
extra_config = deserialize_serializable(extra_config)
oscar_api = await self._get_oscar_task_api(session_id)
task_id = await oscar_api.submit_tileable_graph(
graph,
task_name=task_name,
fuse_enabled=fuse_enabled,
extra_config=extra_config,
)
self.write(task_id)
@web_api("", method="get")
async def get_task_results(self, session_id: str):
progress = bool(int(self.get_argument("progress", "0")))
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_task_results(progress=progress)
self.write(json.dumps({"tasks": [_json_serial_task_result(r) for r in res]}))
@web_api(
"(?P<task_id>[^/]+)", method="get", arg_filter={"action": "fetch_tileables"}
)
async def get_fetch_tileables(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_fetch_tileables(task_id)
self.write(serialize_serializable(res))
@web_api("(?P<task_id>[^/]+)", method="get")
async def get_task_result(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_task_result(task_id)
self.write(json.dumps(_json_serial_task_result(res)))
@web_api(
"(?P<task_id>[^/]+)/tileable_graph",
method="get",
arg_filter={"action": "get_tileable_graph_as_json"},
)
async def get_tileable_graph_as_json(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_tileable_graph_as_json(task_id)
self.write(json.dumps(res))
@web_api("(?P<task_id>[^/]+)/tileable_detail", method="get")
async def get_tileable_details(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_tileable_details(task_id)
self.write(json.dumps(res))
@web_api("(?P<task_id>[^/]+)/(?P<tileable_id>[^/]+)/subtask", method="get")
async def get_tileable_subtasks(
self, session_id: str, task_id: str, tileable_id: str
):
with_input_output = self.get_argument("with_input_output", "false") == "true"
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_tileable_subtasks(
task_id, tileable_id, with_input_output
)
self.write(json.dumps(res))
@web_api("(?P<task_id>[^/]+)", method="get", arg_filter={"action": "progress"})
async def get_task_progress(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_task_progress(task_id)
self.write(str(res))
@web_api("", method="get", arg_filter={"action": "last_idle_time"})
async def get_last_idle_time(self, session_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
res = await oscar_api.get_last_idle_time()
if res:
self.write(str(res))
@web_api("(?P<task_id>[^/]+)", method="get", arg_filter={"action": "wait"})
async def wait_task(self, session_id: str, task_id: str):
timeout = self.get_argument("timeout", None) or None
timeout = float(timeout) if timeout is not None else None
oscar_api = await self._get_oscar_task_api(session_id)
if timeout:
try:
res = await asyncio.wait_for(
asyncio.shield(oscar_api.wait_task(task_id, timeout)),
timeout=timeout,
)
self.write(json.dumps(_json_serial_task_result(res)))
except asyncio.TimeoutError:
self.write(json.dumps({}))
else:
res = await oscar_api.wait_task(task_id, timeout)
self.write(json.dumps(_json_serial_task_result(res)))
@web_api("(?P<task_id>[^/]+)", method="delete")
async def cancel_task(self, session_id: str, task_id: str):
oscar_api = await self._get_oscar_task_api(session_id)
await oscar_api.cancel_task(task_id)
web_handlers = {TaskWebAPIHandler.get_root_pattern(): TaskWebAPIHandler}
class WebTaskAPI(AbstractTaskAPI, MarsWebAPIClientMixin):
def __init__(
self, session_id: str, address: str, request_rewriter: Callable = None
):
self._session_id = session_id
self._address = address.rstrip("/")
self.request_rewriter = request_rewriter
async def get_task_results(self, progress: bool = False) -> List[TaskResult]:
path = f"{self._address}/api/session/{self._session_id}/task"
params = {"progress": int(progress)}
res = await self._request_url("GET", path, params=params)
return [
_json_deserial_task_result(d)
for d in json.loads(res.body.decode())["tasks"]
]
async def submit_tileable_graph(
self,
graph: TileableGraph,
task_name: str = None,
fuse_enabled: bool = True,
extra_config: dict = None,
) -> str:
path = f"{self._address}/api/session/{self._session_id}/task"
extra_config_ser = (
serialize_serializable(extra_config) if extra_config else None
)
body = serialize_serializable(
{
"task_name": task_name if task_name else "",
"fuse": fuse_enabled,
"graph": graph,
"extra_config": extra_config_ser,
}
)
res = await self._request_url(
path=path,
method="POST",
headers={"Content-Type": "application/octet-stream"},
data=body,
)
return res.body.decode().strip()
async def get_fetch_tileables(self, task_id: str) -> List[Tileable]:
path = (
f"{self._address}/api/session/{self._session_id}/task/{task_id}"
f"?action=fetch_tileables"
)
res = await self._request_url("GET", path)
return deserialize_serializable(res.body)
async def get_task_result(self, task_id: str) -> TaskResult:
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}"
res = await self._request_url("GET", path)
return _json_deserial_task_result(json.loads(res.body.decode()))
async def get_task_progress(self, task_id: str) -> float:
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}"
params = dict(action="progress")
res = await self._request_url("GET", path, params=params)
return float(res.body.decode())
async def get_last_idle_time(self) -> Union[float, None]:
path = f"{self._address}/api/session/{self._session_id}/task"
params = dict(action="last_idle_time")
res = await self._request_url("GET", path, params=params)
content = res.body.decode()
return float(content) if content else None
async def wait_task(self, task_id: str, timeout: float = None):
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}"
client_timeout = timeout + 3 if timeout else 0
params = {"action": "wait", "timeout": "" if timeout is None else str(timeout)}
res = await self._request_url(
"GET", path, params=params, request_timeout=client_timeout
)
return _json_deserial_task_result(json.loads(res.body.decode()))
async def cancel_task(self, task_id: str):
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}"
await self._request_url(path=path, method="DELETE")
async def get_tileable_graph_as_json(self, task_id: str):
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}/tileable_graph"
params = dict(action="get_tileable_graph_as_json")
res = await self._request_url(path=path, params=params, method="GET")
return json.loads(res.body.decode())
async def get_tileable_details(self, task_id: str):
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}/tileable_detail"
res = await self._request_url(path=path, method="GET")
return json.loads(res.body.decode())
async def get_tileable_subtasks(
self, task_id: str, tileable_id: str, with_input_output: bool
):
with_input_output = "true" if with_input_output else "false"
path = f"{self._address}/api/session/{self._session_id}/task/{task_id}/{tileable_id}/subtask"
params = {
"action": "fetch_graph",
"with_input_output": with_input_output,
}
res = await self._request_url(path=path, params=params, method="GET")
return json.loads(res.body.decode())
| true | true |
f73474c4af496a73b74a83ea08cf1b082fce5527 | 25,920 | py | Python | tensorflow/python/distribute/collective_all_reduce_strategy.py | Arushacked/tensorflow | 9abd61ae0b2d239d3060cdd3d46b54a105159828 | [
"Apache-2.0"
] | 1 | 2020-06-21T07:20:55.000Z | 2020-06-21T07:20:55.000Z | tensorflow/python/distribute/collective_all_reduce_strategy.py | Arushacked/tensorflow | 9abd61ae0b2d239d3060cdd3d46b54a105159828 | [
"Apache-2.0"
] | 1 | 2022-02-10T02:22:42.000Z | 2022-02-10T02:22:42.000Z | tensorflow/python/distribute/collective_all_reduce_strategy.py | Arushacked/tensorflow | 9abd61ae0b2d239d3060cdd3d46b54a105159828 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import weakref
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
# TODO(yuefengz): support in-graph replication.
@tf_export("distribute.experimental.MultiWorkerMirroredStrategy", v1=[])
class CollectiveAllReduceStrategy(distribute_lib.Strategy):
"""A distribution strategy for synchronous training on multiple workers.
This strategy implements synchronous distributed training across multiple
workers, each with potentially multiple GPUs. Similar to
`tf.distribute.MirroredStrategy`, it creates copies of all variables in the
model on each device across all workers.
It uses CollectiveOps's implementation of multi-worker all-reduce to
to keep variables in sync. A collective op is a single op in the
TensorFlow graph which can automatically choose an all-reduce algorithm in
the TensorFlow runtime according to hardware, network topology and tensor
sizes.
By default it uses all local GPUs or CPU for single-worker training.
When 'TF_CONFIG' environment variable is set, it parses cluster_spec,
task_type and task_id from 'TF_CONFIG' and turns into a multi-worker strategy
which mirrored models on GPUs of all machines in a cluster. In the current
implementation, it uses all GPUs in a cluster and it assumes all workers have
the same number of GPUs.
You can also pass a `distribute.cluster_resolver.ClusterResolver` instance
when instantiating the strategy. The task_type, task_id etc. will be parsed
from the resolver instance instead of from the `TF_CONFIG` env var.
It supports both eager mode and graph mode. However, for eager mode, it has to
set up the eager context in its constructor and therefore all ops in eager
mode have to run after the strategy object is created.
"""
# TODO(anjalisridhar): Update our guides with examples showing how we can use
# the cluster_resolver argument.
def __init__(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO,
cluster_resolver=None):
"""Creates the strategy.
Args:
communication: optional Enum of type
`distribute.experimental.CollectiveCommunication`. This provides a way
for the user to override the choice of collective op communication.
Possible values include `AUTO`, `RING`, and `NCCL`.
cluster_resolver: optional `distribute.cluster_resolver.ClusterResolver`
object. The default ClusterResolver that is used is the
TFConfigClusterResolver which is instantiated from the TF_CONFIG env
var.
"""
# TODO(b/150151677): consider move communication to CollectiveHints.
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
communication=communication,
cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MultiWorkerMirroredStrategy")
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended._num_gpus_per_worker)
@classmethod
def _from_local_devices(
cls,
devices,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO):
"""A convenience method to create an object with a list of devices."""
obj = cls(communication)
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access
return obj
def scope(self): # pylint: disable=useless-super-delegation
"""Returns a context manager selecting this Strategy as current.
Inside a `with strategy.scope():` code block, this thread
will use a variable creator set by `strategy`, and will
enter its "cross-replica context".
In `MultiWorkerMirroredStrategy`, all variables created inside
`strategy.scope() will be mirrored on all replicas of each worker.
Moreover, it also sets a default device scope so that ops without
specified devices will end up on the correct worker.
Returns:
A context manager to use for creating variables with this strategy.
"""
return super(CollectiveAllReduceStrategy, self).scope()
@tf_export(v1=["distribute.experimental.MultiWorkerMirroredStrategy"]) # pylint: disable=missing-docstring
class CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):
__doc__ = CollectiveAllReduceStrategy.__doc__
def __init__(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO,
cluster_resolver=None):
"""Initializes the object."""
super(CollectiveAllReduceStrategyV1, self).__init__(
CollectiveAllReduceExtended(
self,
communication=communication,
cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MultiWorkerMirroredStrategy")
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_gpu_per_worker").set(self.extended._num_gpus_per_worker)
class CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):
"""Implementation of CollectiveAllReduceStrategy."""
def __init__(self,
container_strategy,
communication,
cluster_resolver):
self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()
distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)
assert isinstance(
communication,
cross_device_ops_lib.CollectiveCommunication)
self._communication = communication
self._initialize_strategy(self._cluster_resolver)
self._cfer_fn_cache = weakref.WeakKeyDictionary()
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
def _initialize_strategy(self, cluster_resolver):
if cluster_resolver.cluster_spec().as_dict():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(cluster_resolver)
def _initialize_local(self, cluster_resolver, devices=None):
"""Initializes the object for local training."""
self._is_chief = True
self._num_workers = 1
if ops.executing_eagerly_outside_functions():
try:
context.context().configure_collective_ops(
scoped_allocator_enabled_ops=("CollectiveReduce",))
except RuntimeError:
logging.warning("Collective ops is not configured at program startup. "
"Some performance features may not be enabled.")
self._collective_ops_configured = True
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if devices:
local_devices = devices
else:
if num_gpus:
local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
else:
local_devices = ("/device:CPU:0",)
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
self._collective_keys = cross_device_utils.CollectiveKeys()
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices),
collective_keys=self._collective_keys,
communication=self._communication)
# CrossDeviceOps for per host tensors.
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys,
communication=cross_device_ops_lib.CollectiveCommunication.RING,
)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
self._cluster_spec = None
self._task_type = None
self._task_id = None
# This is a mark to tell whether we are running with standalone client or
# independent worker. Right now with standalone client, strategy object is
# created as local strategy and then turn into multi-worker strategy via
# configure call.
self._local_or_standalone_client_mode = True
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info("Single-worker MultiWorkerMirroredStrategy with local_devices "
"= %r, communication = %s", local_devices, self._communication)
def _initialize_multi_worker(self, cluster_resolver):
"""Initializes the object for multi-worker training."""
cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_resolver.cluster_spec())
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`.")
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)
if not self._num_workers:
raise ValueError("No `worker`, `chief` or `evaluator` tasks can be found "
"in `cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
if (ops.executing_eagerly_outside_functions() and
not getattr(self, "_local_or_standalone_client_mode", False)):
context.context().configure_collective_ops(
collective_leader=multi_worker_util.collective_leader(
cluster_spec, task_type, task_id),
scoped_allocator_enabled_ops=("CollectiveReduce",),
device_filters=("/job:%s/task:%d" % (task_type, task_id),))
self._collective_ops_configured = True
# Starting a std server in eager mode and in independent worker mode.
if (context.executing_eagerly() and
not getattr(self, "_std_server_started", False) and
not getattr(self, "_local_or_standalone_client_mode", False)):
# Checking _local_or_standalone_client_mode as well because we should not
# create the std server in standalone client mode.
config_proto = config_pb2.ConfigProto()
config_proto = self._update_config_proto(config_proto)
if hasattr(cluster_resolver, "port"):
port = cluster_resolver.port
else:
port = 0
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
default_session_config=config_proto,
job_name=task_type,
task_index=task_id,
protocol=cluster_resolver.rpc_layer or "grpc",
port=port)
context.context().enable_collective_ops(server_def)
self._std_server_started = True
# The `ensure_initialized` is needed before calling
# `context.context().devices()`.
context.context().ensure_initialized()
logging.info(
"Enabled multi-worker collective ops with available devices: %r",
context.context().devices())
# TODO(yuefengz): The `num_gpus` is only for this particular task. It
# assumes all workers have the same number of GPUs. We should remove this
# assumption by querying all tasks for their numbers of GPUs.
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if num_gpus:
local_devices = tuple("%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
local_devices = (self._worker_device,)
self._collective_keys = cross_device_utils.CollectiveKeys()
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices) * self._num_workers,
collective_keys=self._collective_keys,
communication=self._communication)
# CrossDeviceOps for per host tensors.
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys,
communication=cross_device_ops_lib.CollectiveCommunication.RING,
)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
host_device = device_util.get_host_for_device(self._worker_device)
self._input_workers = input_lib.InputWorkers(
[(host_device, self.worker_devices)])
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info(
"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, "
"task_id = %r, num_workers = %r, local_devices = %r, "
"communication = %s", cluster_spec.as_dict(), task_type,
task_id, self._num_workers, local_devices,
self._communication)
def _get_variable_creator_initial_value(self,
replica_id,
device,
primary_var,
**kwargs):
if replica_id == 0: # First replica on each worker.
assert device is not None
assert primary_var is None
def initial_value_fn(): # pylint: disable=g-missing-docstring
# Only the first device participates in the broadcast of initial values.
group_key = self._collective_keys.get_group_key([device])
group_size = self._num_workers
collective_instance_key = (
self._collective_keys.get_variable_instance_key())
with ops.device(device):
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value = initial_value()
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(
initial_value, dtype=kwargs.get("dtype", None))
if self._num_workers > 1:
if self._is_chief:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(initial_value.shape,
initial_value.dtype,
group_size, group_key,
collective_instance_key)
return initial_value
return initial_value_fn
else:
return super(CollectiveAllReduceExtended,
self)._get_variable_creator_initial_value(
replica_id=replica_id,
device=device,
primary_var=primary_var,
**kwargs)
def _make_input_context(self):
if self._cluster_spec is None:
input_pipeline_id = 0
else:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
input_context = distribute_lib.InputContext(
num_input_pipelines=self._num_workers,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_context
def _experimental_distribute_dataset(self, dataset, options):
input_context = self._make_input_context()
return input_lib.get_distributed_dataset(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync,
input_context=input_context)
def _experimental_distribute_datasets_from_function(self, dataset_fn,
options):
input_context = self._make_input_context()
return input_lib.get_distributed_datasets_from_function(
dataset_fn=dataset_fn,
input_workers=self._input_workers,
input_contexts=[input_context],
strategy=self._container_strategy())
def _make_dataset_iterator(self, dataset):
"""Distributes the dataset to each local GPU."""
input_context = self._make_input_context()
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync,
input_context=input_context)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the input function to each local GPU."""
input_context = self._make_input_context()
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the object.
Args:
session_config: a `tf.compat.v1.ConfigProto`
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type, such as "worker".
task_id: the current task id.
Raises:
ValueError: if `task_type` is not in the `cluster_spec`.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker},
rpc_layer=self._rpc_layer)
self._initialize_multi_worker(cluster_resolver)
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
# Enable the scoped allocator optimization for CollectiveOps. This
# optimization converts many small all-reduces into fewer larger
# all-reduces.
rewrite_options = updated_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =
# ["CollectiveReduce"]. Since we can't assign to a repeated proto field, we
# clear and then append.
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if (not ops.executing_eagerly_outside_functions() and
self._communication ==
cross_device_ops_lib.CollectiveCommunication.NCCL):
updated_config.experimental.collective_nccl = True
if not self._cluster_spec:
return updated_config
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
updated_config.experimental.collective_group_leader = (
multi_worker_util.collective_leader(self._cluster_spec, self._task_type,
self._task_id))
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _get_cross_device_ops(self, value):
# CollectiveAllReduce works on a predefined set of devices. In most cases
# they should be the compute devices, but certain use cases may reduce host
# tensors as well (e.g. early stopping). We infer the cross_device_ops to
# use based on the number of devices, since inputs don't always have device
# annotations. The compute devices one is preferred since we can potentially
# leverage NCCL.
if isinstance(value, values.DistributedValues):
num_devices = len(value._values) # pylint: disable=protected-access
else:
num_devices = 1
if num_devices == len(self.worker_devices):
return self._cross_device_ops
else:
return self._host_cross_device_ops
def _reduce_to(self, reduce_op, value, destinations, experimental_hints):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if (isinstance(value, values.DistributedValues) and
len(self.worker_devices) == 1):
value = value.values[0]
# When there are multiple workers, we need to reduce across workers using
# collective ops.
if (not isinstance(value, values.DistributedValues) and
self._num_workers == 1):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, len(self.worker_devices))
return self._get_cross_device_ops(value).reduce(
reduce_op,
value,
destinations=destinations,
experimental_hints=experimental_hints)
def _warn_nccl_no_gpu(self):
if ((self._communication ==
cross_device_ops_lib.CollectiveCommunication.NCCL) and
self._num_gpus_per_worker == 0):
logging.warning("Enabled NCCL communication but no GPUs detected/"
"specified.")
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return self._num_workers > 1
@property
def experimental_between_graph(self):
return True
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def _num_replicas_in_sync(self):
return len(self.worker_devices) * self._num_workers
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
| 41.73913 | 114 | 0.7125 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import weakref
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
@tf_export("distribute.experimental.MultiWorkerMirroredStrategy", v1=[])
class CollectiveAllReduceStrategy(distribute_lib.Strategy):
def __init__(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO,
cluster_resolver=None):
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
communication=communication,
cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MultiWorkerMirroredStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended._num_gpus_per_worker)
@classmethod
def _from_local_devices(
cls,
devices,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO):
obj = cls(communication)
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices)
return obj
def scope(self):
return super(CollectiveAllReduceStrategy, self).scope()
@tf_export(v1=["distribute.experimental.MultiWorkerMirroredStrategy"])
class CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):
__doc__ = CollectiveAllReduceStrategy.__doc__
def __init__(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO,
cluster_resolver=None):
super(CollectiveAllReduceStrategyV1, self).__init__(
CollectiveAllReduceExtended(
self,
communication=communication,
cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MultiWorkerMirroredStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_gpu_per_worker").set(self.extended._num_gpus_per_worker)
class CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):
def __init__(self,
container_strategy,
communication,
cluster_resolver):
self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()
distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)
assert isinstance(
communication,
cross_device_ops_lib.CollectiveCommunication)
self._communication = communication
self._initialize_strategy(self._cluster_resolver)
self._cfer_fn_cache = weakref.WeakKeyDictionary()
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
def _initialize_strategy(self, cluster_resolver):
if cluster_resolver.cluster_spec().as_dict():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(cluster_resolver)
def _initialize_local(self, cluster_resolver, devices=None):
self._is_chief = True
self._num_workers = 1
if ops.executing_eagerly_outside_functions():
try:
context.context().configure_collective_ops(
scoped_allocator_enabled_ops=("CollectiveReduce",))
except RuntimeError:
logging.warning("Collective ops is not configured at program startup. "
"Some performance features may not be enabled.")
self._collective_ops_configured = True
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if devices:
local_devices = devices
else:
if num_gpus:
local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
else:
local_devices = ("/device:CPU:0",)
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
self._collective_keys = cross_device_utils.CollectiveKeys()
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices),
collective_keys=self._collective_keys,
communication=self._communication)
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys,
communication=cross_device_ops_lib.CollectiveCommunication.RING,
)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
self._cluster_spec = None
self._task_type = None
self._task_id = None
self._local_or_standalone_client_mode = True
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info("Single-worker MultiWorkerMirroredStrategy with local_devices "
"= %r, communication = %s", local_devices, self._communication)
def _initialize_multi_worker(self, cluster_resolver):
cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_resolver.cluster_spec())
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`.")
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)
if not self._num_workers:
raise ValueError("No `worker`, `chief` or `evaluator` tasks can be found "
"in `cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
if (ops.executing_eagerly_outside_functions() and
not getattr(self, "_local_or_standalone_client_mode", False)):
context.context().configure_collective_ops(
collective_leader=multi_worker_util.collective_leader(
cluster_spec, task_type, task_id),
scoped_allocator_enabled_ops=("CollectiveReduce",),
device_filters=("/job:%s/task:%d" % (task_type, task_id),))
self._collective_ops_configured = True
if (context.executing_eagerly() and
not getattr(self, "_std_server_started", False) and
not getattr(self, "_local_or_standalone_client_mode", False)):
config_proto = config_pb2.ConfigProto()
config_proto = self._update_config_proto(config_proto)
if hasattr(cluster_resolver, "port"):
port = cluster_resolver.port
else:
port = 0
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
default_session_config=config_proto,
job_name=task_type,
task_index=task_id,
protocol=cluster_resolver.rpc_layer or "grpc",
port=port)
context.context().enable_collective_ops(server_def)
self._std_server_started = True
context.context().ensure_initialized()
logging.info(
"Enabled multi-worker collective ops with available devices: %r",
context.context().devices())
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if num_gpus:
local_devices = tuple("%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
local_devices = (self._worker_device,)
self._collective_keys = cross_device_utils.CollectiveKeys()
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices) * self._num_workers,
collective_keys=self._collective_keys,
communication=self._communication)
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys,
communication=cross_device_ops_lib.CollectiveCommunication.RING,
)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
host_device = device_util.get_host_for_device(self._worker_device)
self._input_workers = input_lib.InputWorkers(
[(host_device, self.worker_devices)])
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info(
"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, "
"task_id = %r, num_workers = %r, local_devices = %r, "
"communication = %s", cluster_spec.as_dict(), task_type,
task_id, self._num_workers, local_devices,
self._communication)
def _get_variable_creator_initial_value(self,
replica_id,
device,
primary_var,
**kwargs):
if replica_id == 0:
assert device is not None
assert primary_var is None
def initial_value_fn():
group_key = self._collective_keys.get_group_key([device])
group_size = self._num_workers
collective_instance_key = (
self._collective_keys.get_variable_instance_key())
with ops.device(device):
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value = initial_value()
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(
initial_value, dtype=kwargs.get("dtype", None))
if self._num_workers > 1:
if self._is_chief:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(initial_value.shape,
initial_value.dtype,
group_size, group_key,
collective_instance_key)
return initial_value
return initial_value_fn
else:
return super(CollectiveAllReduceExtended,
self)._get_variable_creator_initial_value(
replica_id=replica_id,
device=device,
primary_var=primary_var,
**kwargs)
def _make_input_context(self):
if self._cluster_spec is None:
input_pipeline_id = 0
else:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
input_context = distribute_lib.InputContext(
num_input_pipelines=self._num_workers,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_context
def _experimental_distribute_dataset(self, dataset, options):
input_context = self._make_input_context()
return input_lib.get_distributed_dataset(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync,
input_context=input_context)
def _experimental_distribute_datasets_from_function(self, dataset_fn,
options):
input_context = self._make_input_context()
return input_lib.get_distributed_datasets_from_function(
dataset_fn=dataset_fn,
input_workers=self._input_workers,
input_contexts=[input_context],
strategy=self._container_strategy())
def _make_dataset_iterator(self, dataset):
input_context = self._make_input_context()
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync,
input_context=input_context)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_context = self._make_input_context()
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
if cluster_spec:
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker},
rpc_layer=self._rpc_layer)
self._initialize_multi_worker(cluster_resolver)
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
# Enable the scoped allocator optimization for CollectiveOps. This
# optimization converts many small all-reduces into fewer larger
# all-reduces.
rewrite_options = updated_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =
# ["CollectiveReduce"]. Since we can't assign to a repeated proto field, we
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if (not ops.executing_eagerly_outside_functions() and
self._communication ==
cross_device_ops_lib.CollectiveCommunication.NCCL):
updated_config.experimental.collective_nccl = True
if not self._cluster_spec:
return updated_config
assert self._task_type
assert self._task_id is not None
updated_config.experimental.collective_group_leader = (
multi_worker_util.collective_leader(self._cluster_spec, self._task_type,
self._task_id))
del updated_config.device_filters[:]
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _get_cross_device_ops(self, value):
# annotations. The compute devices one is preferred since we can potentially
# leverage NCCL.
if isinstance(value, values.DistributedValues):
num_devices = len(value._values) # pylint: disable=protected-access
else:
num_devices = 1
if num_devices == len(self.worker_devices):
return self._cross_device_ops
else:
return self._host_cross_device_ops
def _reduce_to(self, reduce_op, value, destinations, experimental_hints):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if (isinstance(value, values.DistributedValues) and
len(self.worker_devices) == 1):
value = value.values[0]
# When there are multiple workers, we need to reduce across workers using
# collective ops.
if (not isinstance(value, values.DistributedValues) and
self._num_workers == 1):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, len(self.worker_devices))
return self._get_cross_device_ops(value).reduce(
reduce_op,
value,
destinations=destinations,
experimental_hints=experimental_hints)
def _warn_nccl_no_gpu(self):
if ((self._communication ==
cross_device_ops_lib.CollectiveCommunication.NCCL) and
self._num_gpus_per_worker == 0):
logging.warning("Enabled NCCL communication but no GPUs detected/"
"specified.")
def _in_multi_worker_mode(self):
return self._num_workers > 1
@property
def experimental_between_graph(self):
return True
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def _num_replicas_in_sync(self):
return len(self.worker_devices) * self._num_workers
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
return True
| true | true |
f7347571cc0f50b736185a28baf83d913aa696aa | 4,065 | py | Python | tests/unit/clients/python/test_on_err.py | hirakjyoti08/jina | 437943dd2dab87e22b0662b2081f13250918ec01 | [
"Apache-2.0"
] | 1 | 2022-03-04T01:53:51.000Z | 2022-03-04T01:53:51.000Z | tests/unit/clients/python/test_on_err.py | hirakjyoti08/jina | 437943dd2dab87e22b0662b2081f13250918ec01 | [
"Apache-2.0"
] | 1 | 2022-03-08T18:46:28.000Z | 2022-03-08T18:47:24.000Z | tests/unit/clients/python/test_on_err.py | hirakjyoti08/jina | 437943dd2dab87e22b0662b2081f13250918ec01 | [
"Apache-2.0"
] | 1 | 2022-03-17T04:50:07.000Z | 2022-03-17T04:50:07.000Z | from typing import Optional
import aiohttp
import grpc
from jina.excepts import BadClientCallback
from jina import Flow, Client
import numpy as np
import pytest
from docarray import DocumentArray
from docarray.document.generators import from_ndarray
def validate(x):
raise NotImplementedError
@pytest.mark.skip(
reason='something wrong with parametrize in the following, setting either False or True work, but combining them does not. see discussion in https://jinaai.slack.com/archives/C018F60RBL5/p1613984424012700?thread_ts=1613954151.005100&cid=C018F60RBL5'
)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_error(protocol):
# In this particular test, when you write two tests in a row, you are testing the following case:
#
# You are testing exception in client's callback, not error in client's request generator
# 1. The exception breaks the `async for req in stub.Call(req_iter)` on the client
# 2. Server probably has something hold in the stream
# 3. Restart the client, keep server untouched.
# 4. Now, server stucks (because it considers the last connection wasn't end yet)
def validate(x):
raise NotImplementedError
with Flow(protocol=protocol).add() as f:
t = 0
try:
f.index(
from_ndarray(np.random.random([5, 4])),
on_done=validate,
continue_on_error=False,
)
except BadClientCallback:
# bad client callback will break the `async for req in stub.Call(req_iter)`
t = 1
# now query the gateway again, make sure gateway's channel is still usable
f.index(
from_ndarray(np.random.random([5, 4])),
on_done=validate,
continue_on_error=True,
)
assert t == 1
@pytest.mark.parametrize(
'protocol,exception',
[
('websocket', aiohttp.ClientError),
('grpc', grpc.aio._call.AioRpcError),
('http', aiohttp.ClientError),
],
)
def test_client_on_error_call(protocol, exception):
with pytest.raises(exception):
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
)
@pytest.mark.parametrize(
'protocol,exception',
[
('websocket', aiohttp.client_exceptions.ClientConnectorError),
('grpc', grpc.aio._call.AioRpcError),
('http', aiohttp.client_exceptions.ClientConnectorError),
],
)
def test_client_on_error_raise_exception(protocol, exception):
class OnError:
def __init__(self):
self.is_called = False
def __call__(self, response, exception_param: Optional[Exception] = None):
self.is_called = True
assert type(exception_param) == exception
on_error = OnError()
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
on_error=on_error,
)
assert on_error.is_called
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_error_deprecation(protocol):
class OnError:
def __init__(self):
self.is_called = False
def __call__(self, response): # this is deprecated
self.is_called = True
on_error = OnError()
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
on_error=on_error,
)
assert on_error.is_called
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_always_after_exception(protocol):
class OnAlways:
def __init__(self):
self.is_called = False
def __call__(self, response):
self.is_called = True
on_always = OnAlways()
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
on_always=on_always,
)
assert on_always.is_called
| 29.671533 | 253 | 0.652399 | from typing import Optional
import aiohttp
import grpc
from jina.excepts import BadClientCallback
from jina import Flow, Client
import numpy as np
import pytest
from docarray import DocumentArray
from docarray.document.generators import from_ndarray
def validate(x):
raise NotImplementedError
@pytest.mark.skip(
reason='something wrong with parametrize in the following, setting either False or True work, but combining them does not. see discussion in https://jinaai.slack.com/archives/C018F60RBL5/p1613984424012700?thread_ts=1613954151.005100&cid=C018F60RBL5'
)
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_error(protocol):
def validate(x):
raise NotImplementedError
with Flow(protocol=protocol).add() as f:
t = 0
try:
f.index(
from_ndarray(np.random.random([5, 4])),
on_done=validate,
continue_on_error=False,
)
except BadClientCallback:
# bad client callback will break the `async for req in stub.Call(req_iter)`
t = 1
# now query the gateway again, make sure gateway's channel is still usable
f.index(
from_ndarray(np.random.random([5, 4])),
on_done=validate,
continue_on_error=True,
)
assert t == 1
@pytest.mark.parametrize(
'protocol,exception',
[
('websocket', aiohttp.ClientError),
('grpc', grpc.aio._call.AioRpcError),
('http', aiohttp.ClientError),
],
)
def test_client_on_error_call(protocol, exception):
with pytest.raises(exception):
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
)
@pytest.mark.parametrize(
'protocol,exception',
[
('websocket', aiohttp.client_exceptions.ClientConnectorError),
('grpc', grpc.aio._call.AioRpcError),
('http', aiohttp.client_exceptions.ClientConnectorError),
],
)
def test_client_on_error_raise_exception(protocol, exception):
class OnError:
def __init__(self):
self.is_called = False
def __call__(self, response, exception_param: Optional[Exception] = None):
self.is_called = True
assert type(exception_param) == exception
on_error = OnError()
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
on_error=on_error,
)
assert on_error.is_called
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_error_deprecation(protocol):
class OnError:
def __init__(self):
self.is_called = False
def __call__(self, response):
self.is_called = True
on_error = OnError()
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
on_error=on_error,
)
assert on_error.is_called
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_client_on_always_after_exception(protocol):
class OnAlways:
def __init__(self):
self.is_called = False
def __call__(self, response):
self.is_called = True
on_always = OnAlways()
Client(host='0.0.0.0', protocol=protocol, port=12345).post(
'/blah',
inputs=DocumentArray.empty(10),
on_always=on_always,
)
assert on_always.is_called
| true | true |
f73475ffe265e1e6f8dd75b1a5da562fa80f07ef | 42,589 | py | Python | testing/python3/tests/test_dcgm_diag.py | deepio/DCGM | d10273f18fb3d425da752ab6bb7e07af3d18caec | [
"Apache-2.0"
] | 85 | 2021-02-03T19:58:50.000Z | 2022-03-21T08:00:11.000Z | testing/python3/tests/test_dcgm_diag.py | deepio/DCGM | d10273f18fb3d425da752ab6bb7e07af3d18caec | [
"Apache-2.0"
] | 19 | 2021-03-19T08:13:58.000Z | 2022-03-17T02:50:41.000Z | testing/python3/tests/test_dcgm_diag.py | deepio/DCGM | d10273f18fb3d425da752ab6bb7e07af3d18caec | [
"Apache-2.0"
] | 17 | 2021-02-04T06:47:30.000Z | 2022-03-21T22:14:03.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import dcgm_internal_helpers
import option_parser
import DcgmDiag
import dcgm_errors
import threading
import time
import sys
import os
import signal
import utils
import json
from ctypes import *
from apps.app_runner import AppRunner
from apps.dcgmi_app import DcgmiApp
from dcgm_internal_helpers import inject_value
# Most injection tests use SmStress plugin, which also sleeps for 3 seconds
injection_offset = 3
def injection_wrapper(handle, gpuId, fieldId, value, isInt):
# Sleep 1 second so that the insertion happens after the test run begins while not prolonging things
time.sleep(1)
if isInt:
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuId, fieldId, value, 0)
assert ret == dcgm_structs.DCGM_ST_OK
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuId, fieldId, value, 5)
assert ret == dcgm_structs.DCGM_ST_OK
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuId, fieldId, value, 10)
assert ret == dcgm_structs.DCGM_ST_OK
else:
ret = dcgm_internal_helpers.inject_field_value_fp64(handle, gpuId, fieldId, value, 0)
assert ret == dcgm_structs.DCGM_ST_OK
ret = dcgm_internal_helpers.inject_field_value_fp64(handle, gpuId, fieldId, value, 5)
assert ret == dcgm_structs.DCGM_ST_OK
ret = dcgm_internal_helpers.inject_field_value_fp64(handle, gpuId, fieldId, value, 10)
assert ret == dcgm_structs.DCGM_ST_OK
def check_diag_result_fail(response, gpuIndex, testIndex):
return response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_FAIL
def check_diag_result_pass(response, gpuIndex, testIndex):
return response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_PASS
def diag_result_assert_fail(response, gpuIndex, testIndex, msg, errorCode):
# Instead of checking that it failed, just make sure it didn't pass because we want to ignore skipped
# tests or tests that did not run.
assert response.perGpuResponses[gpuIndex].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_PASS, msg
if response.version == dcgm_structs.dcgmDiagResponse_version6:
codeMsg = "Failing test expected error code %d, but found %d" % \
(errorCode, response.perGpuResponses[gpuIndex].results[testIndex].error.code)
assert response.perGpuResponses[gpuIndex].results[testIndex].error.code == errorCode, codeMsg
def diag_result_assert_pass(response, gpuIndex, testIndex, msg):
# Instead of checking that it passed, just make sure it didn't fail because we want to ignore skipped
# tests or tests that did not run.
assert response.perGpuResponses[gpuIndex].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_FAIL, msg
if response.version == dcgm_structs.dcgmDiagResponse_version6:
codeMsg = "Passing test somehow has a non-zero error code!"
assert response.perGpuResponses[gpuIndex].results[testIndex].error.code == 0, codeMsg
def helper_test_dcgm_diag_dbe_insertion(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=8')
dd.UseFakeGpus()
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuIds[0],
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, 1, 5)
assert ret == dcgm_structs.DCGM_ST_OK, "Could not insert an error to test forced failure"
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuIds[0],
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, 1, 15)
assert ret == dcgm_structs.DCGM_ST_OK, "Could not insert an error to test forced failure"
response = test_utils.diag_execute_wrapper(dd, handle)
errorStr = "Expected results for %d GPUs, but found %d" % (len(gpuIds), response.gpuCount)
assert response.gpuCount == len(gpuIds), errorStr
diag_result_assert_fail(response, gpuIds[0], dcgm_structs.DCGM_DIAGNOSTIC_INDEX,
"Expected the diagnostic test to fail because we injected a DBE", dcgm_errors.DCGM_FR_FIELD_VIOLATION)
def helper_check_diag_empty_group(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
runDiagInfo = dcgm_structs.c_dcgmRunDiag_t()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version
runDiagInfo.groupId = groupObj.GetId()
runDiagInfo.validate = 1
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_GROUP_IS_EMPTY)):
response = test_utils.action_validate_wrapper(runDiagInfo, handle)
# Now make sure everything works well with a group
groupObj.AddGpu(gpuIds[0])
response = test_utils.action_validate_wrapper(runDiagInfo, handle)
assert response, "Should have received a response now that we have a non-empty group"
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_helper_embedded_check_diag_empty_group(handle, gpuIds):
helper_check_diag_empty_group(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_helper_standalone_check_diag_empty_group(handle, gpuIds):
helper_check_diag_empty_group(handle, gpuIds)
def diag_assert_error_found(response, gpuId, testIndex, errorStr):
if response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_SKIP and \
response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
warningFound = response.perGpuResponses[gpuId].results[testIndex].error.msg
assert warningFound.find(errorStr) != -1, "Expected to find '%s' as a warning, but found '%s'" % (errorStr, warningFound)
def diag_assert_error_not_found(response, gpuId, testIndex, errorStr):
if response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_SKIP and \
response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
warningFound = response.perGpuResponses[gpuId].results[testIndex].error.msg
assert warningFound.find(errorStr) == -1, "Expected not to find '%s' as a warning, but found it: '%s'" % (errorStr, warningFound)
def helper_check_diag_thermal_violation(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=10')
# kick off a thread to inject the failing value while I run the diag
diag_thread = threading.Thread(target=injection_wrapper,
args =[handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION,
9223372036854775792, True])
diag_thread.start()
response = test_utils.diag_execute_wrapper(dd, handle)
diag_thread.join()
assert response.gpuCount == len(gpuIds), "Expected %d gpus, but found %d reported" % (len(gpuIds), response.gpuCount)
for gpuIndex in range(response.gpuCount):
diag_assert_error_not_found(response, gpuIndex, dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "Thermal violations")
def helper_check_diag_high_temp_fail(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=10')
# kick off a thread to inject the failing value while I run the diag
diag_thread = threading.Thread(target=injection_wrapper,
args =[handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 120, True])
diag_thread.start()
response = test_utils.diag_execute_wrapper(dd, handle)
diag_thread.join()
assert response.gpuCount == len(gpuIds), "Expected %d gpus, but found %d reported" % (len(gpuIds), response.gpuCount)
diag_result_assert_fail(response, gpuIds[0], dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "Expected a failure due to 120 degree inserted temp.", dcgm_errors.DCGM_FR_TEMP_VIOLATION)
def helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuId):
"""
Verifies that the dcgmActionValidate_v2 API supports older versions of the dcgmRunDiag struct
by using the old structs to run a short validation test.
"""
def test_dcgm_run_diag(drd, version):
drd.validate = 1 # run a short test
drd.gpuList = str(gpuId)
# This will throw an exception on error
response = test_utils.action_validate_wrapper(drd, handle, version)
# Test version 6
drd = dcgm_structs.c_dcgmRunDiag_v7()
test_dcgm_run_diag(drd, dcgm_structs.dcgmRunDiag_version7)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_run_diag_backwards_compatibility_embedded(handle, gpuIds):
helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_run_diag_backwards_compatibility_standalone(handle, gpuIds):
helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuIds[0])
checked_gpus = {} # Used to track that a GPU has been verified as passing
# Makes sure a very basic diagnostic passes and returns a DcgmDiag object
def helper_verify_diag_passing(handle, gpuIds, testNames="SM Stress", testIndex=dcgm_structs.DCGM_SM_STRESS_INDEX, params="sm stress.test_duration=15", version=dcgm_structs.dcgmRunDiag_version, useFakeGpus=False):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr=testNames, paramsStr=params, version=version)
dd.SetThrottleMask(0) # We explicitly want to fail for throttle reasons since this test inserts throttling errors
# for verification
if useFakeGpus:
dd.UseFakeGpus()
# If we've already chchecked this GPU, then use the previous result
runDiag = False
for gpuId in gpuIds:
if gpuId in checked_gpus:
if checked_gpus[gpuId] == False:
test_utils.skip_test("Skipping because GPU %s does not pass SM Perf test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
else:
runDiag = True
if runDiag == False:
return dd
response = test_utils.diag_execute_wrapper(dd, handle)
for gpuId in gpuIds:
if not check_diag_result_pass(response, gpuId, testIndex):
checked_gpus[gpuId] = False
test_utils.skip_test("Skipping because GPU %s does not pass SM Perf test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
else:
checked_gpus[gpuId] = True
return dd
def find_throttle_failure(response, gpuId, pluginIndex):
if response.perGpuResponses[gpuId].results[pluginIndex].result != dcgm_structs.DCGM_DIAG_RESULT_PASS:
error = response.perGpuResponses[gpuId].results[pluginIndex].error.msg
if error.find('clock throttling') != -1:
return True, "%s (%s)" % (error, response.perGpuResponses[gpuId].results[pluginIndex].error.msg)
else:
return False, error
return False, ""
#####
# Helper method for inserting errors and performing the diag
def perform_diag_with_throttle_mask_and_verify(dd, handle, gpuId, inserted_error, throttle_mask, shouldPass, failureMsg):
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
interval = 0.1
if throttle_mask is not None:
dd.SetThrottleMask(throttle_mask)
inject_value(handle, gpuId, fieldId, inserted_error, injection_offset, True)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, inserted_error, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Start the diag
response = test_utils.diag_execute_wrapper(dd, handle)
# Check for pass or failure as per the shouldPass parameter
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX)
if shouldPass:
assert throttled == False, "Expected to not have a throttling error but found %s" % errMsg
else:
assert throttled == True, "Expected to find a throttling error but did not (%s)" % errMsg
def helper_test_throttle_mask_fail_hw_slowdown(handle, gpuId):
"""
Verifies that the throttle ignore mask ignores the masked throttling reasons.
"""
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
#####
# Insert a throttling error and verify that the test fails
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=0, shouldPass=False, failureMsg="Expected test to fail because of throttling"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_hw_slowdown(handle, gpuIds):
helper_test_throttle_mask_fail_hw_slowdown(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_run_injection(handle, gpuIds):
helper_test_throttle_mask_fail_hw_slowdown(handle, gpuIds[0])
def helper_test_throttle_mask_ignore_hw_slowdown(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore it (as integer value)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN, shouldPass=True,
failureMsg="Expected test to pass because throttle mask (interger bitmask) ignores the throttle reason"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_ignore_hw_slowdown(handle, gpuIds):
helper_test_throttle_mask_ignore_hw_slowdown(handle, gpuIds[0])
def helper_test_throttle_mask_ignore_hw_slowdown_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore it (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask="HW_SLOWDOWN", shouldPass=True,
failureMsg="Expected test to pass because throttle mask (named reason) ignores the throttle reason"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_ignore_hw_slowdown_string(handle, gpuIds):
helper_test_throttle_mask_ignore_hw_slowdown_string(handle, gpuIds[0])
def helper_test_throttle_mask_fail_double_inject_ignore_one(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert two throttling errors and set throttle mask to ignore only one (as integer)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId,
inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN | dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN, shouldPass=False,
failureMsg="Expected test to fail because throttle mask (interger bitmask) ignores one of the throttle reasons"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_double_inject_ignore_one(handle, gpuIds):
helper_test_throttle_mask_fail_double_inject_ignore_one(handle, gpuIds[0])
def helper_test_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert two throttling errors and set throttle mask to ignore only one (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId,
inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN | dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL,
throttle_mask="HW_SLOWDOWN", shouldPass=False,
failureMsg="Expected test to fail because throttle mask (named reason) ignores one of the throttle reasons"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuIds):
helper_test_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuIds[0])
def helper_test_throttle_mask_fail_ignore_different_throttle(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore a different reason (as integer value)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_POWER_BRAKE, shouldPass=False,
failureMsg="Expected test to fail because throttle mask (interger bitmask) ignores different throttle reason"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_ignore_different_throttle(handle, gpuIds):
helper_test_throttle_mask_fail_ignore_different_throttle(handle, gpuIds[0])
def helper_test_throttle_mask_fail_ignore_different_throttle_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore a different reason (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask="HW_POWER_BRAKE", shouldPass=False,
failureMsg="Expected test to fail because throttle mask (named reason) ignores different throttle reason"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_ignore_different_throttle_string(handle, gpuIds):
helper_test_throttle_mask_fail_ignore_different_throttle_string(handle, gpuIds[0])
def helper_test_throttle_mask_pass_no_throttle(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Clear throttling reasons and mask to verify test passes
dd.SetThrottleMask("")
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=0, throttle_mask=None, shouldPass=True,
failureMsg="Expected test to pass because there is no throttling"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_pass_no_throttle(handle, gpuIds):
helper_test_throttle_mask_pass_no_throttle(handle, gpuIds[0])
def helper_check_diag_stop_on_interrupt_signals(handle, gpuId):
"""
Verifies that a launched diag is stopped when the dcgmi executable recieves a SIGINT, SIGHUP, SIGQUIT, or SIGTERM
signal.
"""
# First check whether the GPU is healthy/supported
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=2",
version=dcgm_structs.dcgmRunDiag_version7)
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_diag_result_pass(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX):
test_utils.skip_test("Skipping because GPU %s does not pass SM Stress test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
# paths to dcgmi executable
paths = {
"Linux_32bit": "./apps/x86/dcgmi",
"Linux_64bit": "./apps/amd64/dcgmi",
"Linux_ppc64le": "./apps/ppc64le/dcgmi",
"Linux_aarch64": "./apps/aarch64/dcgmi"
}
# Verify test is running on a supported platform
if utils.platform_identifier not in paths:
test_utils.skip_test("Dcgmi is not supported on the current platform.")
dcgmi_path = paths[utils.platform_identifier]
def verify_exit_code_on_signal(signum):
# Ensure that host engine is ready to launch a new diagnostic
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr='1')
success = False
start = time.time()
while not success and (time.time() - start) <= 3:
try:
response = test_utils.diag_execute_wrapper(dd, handle)
success = True
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_DIAG_ALREADY_RUNNING):
# Only acceptable error due to small race condition between the nvvs process exiting and
# hostengine actually processing the exit. We try for a maximum of 3 seconds since this
# should be rare and last only for a short amount of time
time.sleep(1.5)
diagApp = AppRunner(dcgmi_path, args=["diag", "-r", "SM Stress", "-i", "%s" % gpuId,
"-d", "INFO", "--debugLogFile", "/tmp/nvvs.log"])
# Start the diag
diagApp.start(timeout=40)
logger.info("Launched dcgmi process with pid: %s" % diagApp.getpid())
# Ensure diag is running before sending interrupt signal
running, debug_output = dcgm_internal_helpers.check_nvvs_process(want_running=True, attempts=50)
assert running, "The nvvs process did not start within 25 seconds: %s" % (debug_output)
# There is a small race condition here - it is possible that the hostengine sends a SIGTERM before the
# nvvs process has setup a signal handler, and so the nvvs process does not stop when SIGTERM is sent.
# We sleep for 1 second to reduce the possibility of this scenario
time.sleep(1)
diagApp.signal(signum)
retCode = diagApp.wait()
# Check the return code and stdout/stderr output before asserting for better debugging info
if retCode != (signum + 128):
logger.error("Got retcode '%s' from launched diag." % retCode)
if diagApp.stderr_lines or diagApp.stdout_lines:
logger.info("dcgmi output:")
for line in diagApp.stdout_lines:
logger.info(line)
for line in diagApp.stderr_lines:
logger.error(line)
assert retCode == (signum + 128)
# Since the app returns a non zero exit code, we call the validate method to prevent false
# failures from the test framework
diagApp.validate()
# Give the launched nvvs process 15 seconds to terminate.
not_running, debug_output = dcgm_internal_helpers.check_nvvs_process(want_running=False, attempts=50)
assert not_running, "The launched nvvs process did not terminate within 25 seconds. pgrep output:\n%s" \
% debug_output
# Verify return code on SIGINT
# We simply verify the return code because explicitly checking whether the nvvs process has terminated is
# clunky and error-prone
logger.info("Testing stop on SIGINT")
verify_exit_code_on_signal(signal.SIGINT)
# Verify return code on SIGHUP
logger.info("Testing stop on SIGHUP")
verify_exit_code_on_signal(signal.SIGHUP)
# Verify return code on SIGQUIT
logger.info("Testing stop on SIGQUIT")
verify_exit_code_on_signal(signal.SIGQUIT)
# Verify return code on SIGTERM
logger.info("Testing stop on SIGTERM")
verify_exit_code_on_signal(signal.SIGTERM)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_diag_stop_on_signal_embedded(handle, gpuIds):
if not option_parser.options.developer_mode:
# This test can run into a race condition when using embedded host engine, which can cause nvvs to
# take >60 seconds to terminate after receiving a SIGTERM.
test_utils.skip_test("Skip test for more debugging")
helper_check_diag_stop_on_interrupt_signals(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_diag_stop_on_signal_standalone(handle, gpuIds):
helper_check_diag_stop_on_interrupt_signals(handle, gpuIds[0])
def helper_verify_log_file_creation(handle, gpuIds):
dd = helper_verify_diag_passing(handle, gpuIds, testNames="targeted stress", testIndex=dcgm_structs.DCGM_TARGETED_STRESS_INDEX, params="targeted stress.test_duration=10", useFakeGpus=True)
logname = '/tmp/tmp_test_debug_log'
dd.SetDebugLogFile(logname)
dd.SetDebugLevel(5)
response = test_utils.diag_execute_wrapper(dd, handle)
if len(response.systemError.msg) == 0:
skippedAll = True
passedCount = 0
errors = ""
for gpuId in gpuIds:
resultType = response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_TARGETED_STRESS_INDEX].result
if resultType not in [dcgm_structs.DCGM_DIAG_RESULT_SKIP, dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN]:
skippedAll = False
if resultType == dcgm_structs.DCGM_DIAG_RESULT_PASS:
passedCount = passedCount + 1
else:
warning = response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_TARGETED_STRESS_INDEX].error.msg
if len(warning):
errors = "%s, GPU %d failed: %s" % (errors, gpuId, warning)
if skippedAll == False:
detailedMsg = "passed on %d of %d GPUs" % (passedCount, response.gpuCount)
if len(errors):
detailedMsg = "%s and had these errors: %s" % (detailedMsg, errors)
logger.info(detailedMsg)
assert os.path.isfile(logname), "Logfile '%s' was not created and %s" % (logname, detailedMsg)
else:
logger.info("The diagnostic was skipped, so we cannot run this test.")
else:
logger.info("The diagnostic had a problem when executing, so we cannot run this test.")
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_verify_log_file_creation_standalone(handle, gpuIds):
helper_verify_log_file_creation(handle, gpuIds)
def helper_throttling_masking_failures(handle, gpuId):
#####
# First check whether the GPU is healthy
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=2",
version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0) # We explicitly want to fail for throttle reasons since this test inserts throttling errors
# for verification
dd.UseFakeGpus()
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_diag_result_pass(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX):
test_utils.skip_test("Skipping because GPU %s does not pass SM Perf test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
#####
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=15",
version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0)
dd.UseFakeGpus()
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
interval = 0.1
logger.info("Injecting benign errors")
inject_value(handle, gpuId, fieldId, 3, 1, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, 3, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
logger.info("Injecting actual errors")
inject_value(handle, gpuId, fieldId, insertedError, injection_offset, True)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
logger.info("Started diag")
response = test_utils.diag_execute_wrapper(dd, handle)
# Verify that the inserted values are visible in DCGM
# Max wait of 8 is because of 5 second offset + 2 seconds required for 20 matches + 1 second buffer.
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, insertedError, checkInterval=0.1, numMatches=1, maxWait=8), \
"Expected inserted errors to be visible in DCGM"
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX)
assert throttled, "Expected to find throttling failure, but did not: (%s)" % errMsg
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttling_masking_failures_standalone(handle, gpuIds):
helper_throttling_masking_failures(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_handle_concurrency_standalone(handle, gpuIds):
'''
Test that we can use a DCGM handle concurrently with a diagnostic running
'''
diagDuration = 10
gpuId = gpuIds[0]
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=%d" % diagDuration,
version=dcgm_structs.dcgmRunDiag_version)
dd.UseFakeGpus()
response = [None]
def run(dd, response):
response = test_utils.diag_execute_wrapper(dd, handle)
diagStartTime = time.time()
threadObj = threading.Thread(target=run, args=[dd, response])
threadObj.start()
#Give threadObj a head start on its 10 second run
time.sleep(1.0)
firstReturnedRequestLatency = None
numConcurrentCompleted = 0
sleepDuration = 1.0
while threadObj.is_alive():
#Make another request on the handle concurrently
moduleStatuses = dcgm_agent.dcgmModuleGetStatuses(handle)
secondRequestLatency = time.time() - diagStartTime
numConcurrentCompleted += 1
if firstReturnedRequestLatency is None:
firstReturnedRequestLatency = secondRequestLatency
time.sleep(sleepDuration)
diagThreadEndTime = time.time()
diagDuration = diagThreadEndTime - diagStartTime
if firstReturnedRequestLatency is None:
test_utils.skip_test("Diag returned instantly. It is probably not supported for gpuId %u" % gpuId)
logger.info("Completed %d concurrent requests. Diag ran for %.1f seconds" % (numConcurrentCompleted, diagDuration))
#We should have been able to complete a request every 2 seconds if we slept for 1 (conservatively)
numShouldHaveCompleted = int((diagDuration / sleepDuration) / 2.0)
assert numConcurrentCompleted >= numShouldHaveCompleted, "Expected at least %d concurrent tests completed. Got %d" % (numShouldHaveCompleted, numConcurrentCompleted)
def helper_per_gpu_responses_api(handle, gpuIds):
"""
Verify that pass/fail status for diagnostic tests are reported on a per GPU basis via dcgmActionValidate API call
"""
failGpuId = gpuIds[0]
dd = helper_verify_diag_passing(handle, gpuIds, useFakeGpus=True)
dd = DcgmDiag.DcgmDiag(gpuIds=[failGpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=15", version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0) # We explicitly want to fail for throttle reasons since this test inserts throttling errors
# for verification
dd.UseFakeGpus()
# Setup injection app
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
interval = 0.1
# Use an offset to make these errors start after the benign values
inject_value(handle, failGpuId, fieldId, insertedError, injection_offset, True)
inject_value(handle, failGpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(failGpuId, fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
response = test_utils.diag_execute_wrapper(dd, handle)
logger.info("Started diag")
# Verify that responses are reported on a per gpu basis. Ensure the first GPU failed, and all others passed
for gpuId in gpuIds:
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX)
if gpuId == failGpuId:
assert throttled, "Expected throttling error but found none (%s)" % errMsg
else:
assert not throttled, "Expected not to find a throttling error but found '%s'" % errMsg
def helper_per_gpu_responses_dcgmi(handle, gpuIds):
"""
Verify that pass/fail status for diagnostic tests are reported on a per GPU basis via dcgmi (for both normal stdout
and JSON output).
"""
def get_stdout(app):
output = ''
for line in app.stdout_lines:
output = output + line + " "
return output
def print_output(app):
logger.info(get_stdout(app))
for line in app.stderr_lines:
logger.error(line)
def verify_successful_dcgmi_run(app):
app.start(timeout=40)
logger.info("Started dcgmi diag with pid %s" % app.getpid())
retcode = app.wait()
if test_utils.is_mig_incompatible_failure(get_stdout(app)):
app.validate()
test_utils.skip_test("Skipping this test because MIG is configured incompatibly (preventing access to the whole GPU)")
# dcgm returns DCGM_ST_NVVS_ERROR on diag failure (which is expected here).
expected_retcode = c_uint8(dcgm_structs.DCGM_ST_NVVS_ISOLATE_ERROR).value
if retcode != expected_retcode:
if app.stderr_lines or app.stdout_lines:
logger.info("dcgmi output:")
print_output(app)
assert retcode == expected_retcode, \
"Expected dcgmi diag to have retcode %s. Got return code %s" % (expected_retcode, retcode)
app.validate() # non-zero exit code must be validated
#helper_verify_diag_passing(handle, gpuIds, useFakeGpus=True)
# Setup injection app
interval = 0.1
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
# Use an offset to make these errors start after the benign values
inject_value(handle, gpuIds[0], fieldId, insertedError, injection_offset, True)
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuIds[0], fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Verify dcgmi output
gpuIdStrings = list(map(str, gpuIds))
gpuList = ",".join(gpuIdStrings)
args = ["diag", "-r", "SM Stress", "-p", "sm stress.test_duration=5,pcie.max_pcie_replays=1", "-f", gpuList, "--throttle-mask", "0"]
dcgmiApp = DcgmiApp(args=args)
logger.info("Verifying stdout output")
verify_successful_dcgmi_run(dcgmiApp)
# Verify dcgmi output shows per gpu results (crude approximation of verifying correct console output)
stress_header_found = False
fail_gpu_found = False
fail_gpu_text = "Fail - GPU: %s" % gpuIds[0]
check_for_warning = False
warning_found = False
for line in dcgmiApp.stdout_lines:
if not stress_header_found:
if "Stress" not in line:
continue
stress_header_found = True
continue
if not fail_gpu_found:
if fail_gpu_text not in line:
continue
fail_gpu_found = True
check_for_warning = True
continue
if check_for_warning:
if "Warning" in line:
warning_found = True
break
if not (stress_header_found and fail_gpu_found and warning_found):
logger.info("dcgmi output:")
print_output(dcgmiApp)
assert stress_header_found, "Expected to see 'Stress' header in output"
assert fail_gpu_found, "Expected to see %s in output" % fail_gpu_text
assert warning_found, "Expected to see 'Warning' in output after GPU failure text"
inject_value(handle, gpuIds[0], fieldId, insertedError, injection_offset, True)
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuIds[0], fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Verify JSON output
logger.info("Verifying JSON output")
args.append("-j")
dcgmiApp = DcgmiApp(args=args)
verify_successful_dcgmi_run(dcgmiApp)
# Stop error insertion
logger.info("Stopped error injection")
# Verify per GPU results
json_output = "\n".join(dcgmiApp.stdout_lines)
output = json.loads(json_output)
verifed = False
if (len(output.get("DCGM GPU Diagnostic", {}).get("test_categories", [])) == 2
and output["DCGM GPU Diagnostic"]["test_categories"][1].get("category", None) == "Stress"
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["name"] == "SM Stress"
and len(output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"]) >= 2
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][0]["gpu_ids"] == str(gpuIds[0])
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][0]["status"] == "Fail"
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][1]["status"] == "Pass"):
verifed = True
if not verifed:
print_output(dcgmiApp)
assert verifed, "dcgmi JSON output did not pass verification"
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_per_gpu_responses_standalone_api(handle, gpuIds):
if len(gpuIds) < 2:
test_utils.skip_test("Skipping because this test requires 2 or more GPUs with same SKU")
if test_utils.is_throttling_masked_by_nvvs(handle, gpuIds[0], dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN):
test_utils.skip_test("Skipping because this SKU ignores the throttling we inject for this test")
logger.info("Starting test for per gpu responses (API call)")
helper_per_gpu_responses_api(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_per_gpu_responses_standalone_dcgmi(handle, gpuIds):
if len(gpuIds) < 2:
test_utils.skip_test("Skipping because this test requires 2 or more GPUs with same SKU")
if test_utils.is_throttling_masked_by_nvvs(handle, gpuIds[0], dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN):
test_utils.skip_test("Skipping because this SKU ignores the throttling we inject for this test")
logger.info("Starting test for per gpu responses (dcgmi output)")
helper_per_gpu_responses_dcgmi(handle, gpuIds)
def helper_test_diagnostic_config_usage(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="diagnostic", paramsStr="diagnostic.test_duration=10")
dd.SetConfigFileContents("%YAML 1.2\n\ncustom:\n- custom:\n diagnostic:\n max_sbe_errors: 1")
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL, 1000, injection_offset, True)
response = test_utils.diag_execute_wrapper(dd, handle)
assert response.perGpuResponses[gpuIds[0]].results[dcgm_structs.DCGM_DIAGNOSTIC_INDEX].result != dcgm_structs.DCGM_DIAG_RESULT_PASS, \
"Should have a failure due to injected SBEs, but got passing result"
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_diagnostic_config_usage_standalone(handle, gpuIds):
helper_test_diagnostic_config_usage(handle, gpuIds)
| 49.811696 | 213 | 0.734838 |
import pydcgm
import dcgm_structs
import dcgm_agent_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import dcgm_internal_helpers
import option_parser
import DcgmDiag
import dcgm_errors
import threading
import time
import sys
import os
import signal
import utils
import json
from ctypes import *
from apps.app_runner import AppRunner
from apps.dcgmi_app import DcgmiApp
from dcgm_internal_helpers import inject_value
injection_offset = 3
def injection_wrapper(handle, gpuId, fieldId, value, isInt):
time.sleep(1)
if isInt:
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuId, fieldId, value, 0)
assert ret == dcgm_structs.DCGM_ST_OK
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuId, fieldId, value, 5)
assert ret == dcgm_structs.DCGM_ST_OK
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuId, fieldId, value, 10)
assert ret == dcgm_structs.DCGM_ST_OK
else:
ret = dcgm_internal_helpers.inject_field_value_fp64(handle, gpuId, fieldId, value, 0)
assert ret == dcgm_structs.DCGM_ST_OK
ret = dcgm_internal_helpers.inject_field_value_fp64(handle, gpuId, fieldId, value, 5)
assert ret == dcgm_structs.DCGM_ST_OK
ret = dcgm_internal_helpers.inject_field_value_fp64(handle, gpuId, fieldId, value, 10)
assert ret == dcgm_structs.DCGM_ST_OK
def check_diag_result_fail(response, gpuIndex, testIndex):
return response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_FAIL
def check_diag_result_pass(response, gpuIndex, testIndex):
return response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_PASS
def diag_result_assert_fail(response, gpuIndex, testIndex, msg, errorCode):
# tests or tests that did not run.
assert response.perGpuResponses[gpuIndex].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_PASS, msg
if response.version == dcgm_structs.dcgmDiagResponse_version6:
codeMsg = "Failing test expected error code %d, but found %d" % \
(errorCode, response.perGpuResponses[gpuIndex].results[testIndex].error.code)
assert response.perGpuResponses[gpuIndex].results[testIndex].error.code == errorCode, codeMsg
def diag_result_assert_pass(response, gpuIndex, testIndex, msg):
# Instead of checking that it passed, just make sure it didn't fail because we want to ignore skipped
assert response.perGpuResponses[gpuIndex].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_FAIL, msg
if response.version == dcgm_structs.dcgmDiagResponse_version6:
codeMsg = "Passing test somehow has a non-zero error code!"
assert response.perGpuResponses[gpuIndex].results[testIndex].error.code == 0, codeMsg
def helper_test_dcgm_diag_dbe_insertion(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=8')
dd.UseFakeGpus()
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuIds[0],
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, 1, 5)
assert ret == dcgm_structs.DCGM_ST_OK, "Could not insert an error to test forced failure"
ret = dcgm_internal_helpers.inject_field_value_i64(handle, gpuIds[0],
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, 1, 15)
assert ret == dcgm_structs.DCGM_ST_OK, "Could not insert an error to test forced failure"
response = test_utils.diag_execute_wrapper(dd, handle)
errorStr = "Expected results for %d GPUs, but found %d" % (len(gpuIds), response.gpuCount)
assert response.gpuCount == len(gpuIds), errorStr
diag_result_assert_fail(response, gpuIds[0], dcgm_structs.DCGM_DIAGNOSTIC_INDEX,
"Expected the diagnostic test to fail because we injected a DBE", dcgm_errors.DCGM_FR_FIELD_VIOLATION)
def helper_check_diag_empty_group(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
runDiagInfo = dcgm_structs.c_dcgmRunDiag_t()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version
runDiagInfo.groupId = groupObj.GetId()
runDiagInfo.validate = 1
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_GROUP_IS_EMPTY)):
response = test_utils.action_validate_wrapper(runDiagInfo, handle)
groupObj.AddGpu(gpuIds[0])
response = test_utils.action_validate_wrapper(runDiagInfo, handle)
assert response, "Should have received a response now that we have a non-empty group"
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_helper_embedded_check_diag_empty_group(handle, gpuIds):
helper_check_diag_empty_group(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_helper_standalone_check_diag_empty_group(handle, gpuIds):
helper_check_diag_empty_group(handle, gpuIds)
def diag_assert_error_found(response, gpuId, testIndex, errorStr):
if response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_SKIP and \
response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
warningFound = response.perGpuResponses[gpuId].results[testIndex].error.msg
assert warningFound.find(errorStr) != -1, "Expected to find '%s' as a warning, but found '%s'" % (errorStr, warningFound)
def diag_assert_error_not_found(response, gpuId, testIndex, errorStr):
if response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_SKIP and \
response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
warningFound = response.perGpuResponses[gpuId].results[testIndex].error.msg
assert warningFound.find(errorStr) == -1, "Expected not to find '%s' as a warning, but found it: '%s'" % (errorStr, warningFound)
def helper_check_diag_thermal_violation(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=10')
diag_thread = threading.Thread(target=injection_wrapper,
args =[handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION,
9223372036854775792, True])
diag_thread.start()
response = test_utils.diag_execute_wrapper(dd, handle)
diag_thread.join()
assert response.gpuCount == len(gpuIds), "Expected %d gpus, but found %d reported" % (len(gpuIds), response.gpuCount)
for gpuIndex in range(response.gpuCount):
diag_assert_error_not_found(response, gpuIndex, dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "Thermal violations")
def helper_check_diag_high_temp_fail(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=10')
diag_thread = threading.Thread(target=injection_wrapper,
args =[handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 120, True])
diag_thread.start()
response = test_utils.diag_execute_wrapper(dd, handle)
diag_thread.join()
assert response.gpuCount == len(gpuIds), "Expected %d gpus, but found %d reported" % (len(gpuIds), response.gpuCount)
diag_result_assert_fail(response, gpuIds[0], dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "Expected a failure due to 120 degree inserted temp.", dcgm_errors.DCGM_FR_TEMP_VIOLATION)
def helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuId):
def test_dcgm_run_diag(drd, version):
drd.validate = 1
drd.gpuList = str(gpuId)
response = test_utils.action_validate_wrapper(drd, handle, version)
drd = dcgm_structs.c_dcgmRunDiag_v7()
test_dcgm_run_diag(drd, dcgm_structs.dcgmRunDiag_version7)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_run_diag_backwards_compatibility_embedded(handle, gpuIds):
helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_run_diag_backwards_compatibility_standalone(handle, gpuIds):
helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuIds[0])
checked_gpus = {}
def helper_verify_diag_passing(handle, gpuIds, testNames="SM Stress", testIndex=dcgm_structs.DCGM_SM_STRESS_INDEX, params="sm stress.test_duration=15", version=dcgm_structs.dcgmRunDiag_version, useFakeGpus=False):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr=testNames, paramsStr=params, version=version)
dd.SetThrottleMask(0)
if useFakeGpus:
dd.UseFakeGpus()
runDiag = False
for gpuId in gpuIds:
if gpuId in checked_gpus:
if checked_gpus[gpuId] == False:
test_utils.skip_test("Skipping because GPU %s does not pass SM Perf test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
else:
runDiag = True
if runDiag == False:
return dd
response = test_utils.diag_execute_wrapper(dd, handle)
for gpuId in gpuIds:
if not check_diag_result_pass(response, gpuId, testIndex):
checked_gpus[gpuId] = False
test_utils.skip_test("Skipping because GPU %s does not pass SM Perf test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
else:
checked_gpus[gpuId] = True
return dd
def find_throttle_failure(response, gpuId, pluginIndex):
if response.perGpuResponses[gpuId].results[pluginIndex].result != dcgm_structs.DCGM_DIAG_RESULT_PASS:
error = response.perGpuResponses[gpuId].results[pluginIndex].error.msg
if error.find('clock throttling') != -1:
return True, "%s (%s)" % (error, response.perGpuResponses[gpuId].results[pluginIndex].error.msg)
else:
return False, error
return False, ""
#####
# Helper method for inserting errors and performing the diag
def perform_diag_with_throttle_mask_and_verify(dd, handle, gpuId, inserted_error, throttle_mask, shouldPass, failureMsg):
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
interval = 0.1
if throttle_mask is not None:
dd.SetThrottleMask(throttle_mask)
inject_value(handle, gpuId, fieldId, inserted_error, injection_offset, True)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, inserted_error, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Start the diag
response = test_utils.diag_execute_wrapper(dd, handle)
# Check for pass or failure as per the shouldPass parameter
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX)
if shouldPass:
assert throttled == False, "Expected to not have a throttling error but found %s" % errMsg
else:
assert throttled == True, "Expected to find a throttling error but did not (%s)" % errMsg
def helper_test_throttle_mask_fail_hw_slowdown(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
#####
# Insert a throttling error and verify that the test fails
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=0, shouldPass=False, failureMsg="Expected test to fail because of throttling"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_hw_slowdown(handle, gpuIds):
helper_test_throttle_mask_fail_hw_slowdown(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_run_injection(handle, gpuIds):
helper_test_throttle_mask_fail_hw_slowdown(handle, gpuIds[0])
def helper_test_throttle_mask_ignore_hw_slowdown(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore it (as integer value)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN, shouldPass=True,
failureMsg="Expected test to pass because throttle mask (interger bitmask) ignores the throttle reason"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_ignore_hw_slowdown(handle, gpuIds):
helper_test_throttle_mask_ignore_hw_slowdown(handle, gpuIds[0])
def helper_test_throttle_mask_ignore_hw_slowdown_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore it (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask="HW_SLOWDOWN", shouldPass=True,
failureMsg="Expected test to pass because throttle mask (named reason) ignores the throttle reason"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_ignore_hw_slowdown_string(handle, gpuIds):
helper_test_throttle_mask_ignore_hw_slowdown_string(handle, gpuIds[0])
def helper_test_throttle_mask_fail_double_inject_ignore_one(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert two throttling errors and set throttle mask to ignore only one (as integer)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId,
inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN | dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN, shouldPass=False,
failureMsg="Expected test to fail because throttle mask (interger bitmask) ignores one of the throttle reasons"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_double_inject_ignore_one(handle, gpuIds):
helper_test_throttle_mask_fail_double_inject_ignore_one(handle, gpuIds[0])
def helper_test_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert two throttling errors and set throttle mask to ignore only one (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId,
inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN | dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL,
throttle_mask="HW_SLOWDOWN", shouldPass=False,
failureMsg="Expected test to fail because throttle mask (named reason) ignores one of the throttle reasons"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuIds):
helper_test_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuIds[0])
def helper_test_throttle_mask_fail_ignore_different_throttle(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore a different reason (as integer value)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_POWER_BRAKE, shouldPass=False,
failureMsg="Expected test to fail because throttle mask (interger bitmask) ignores different throttle reason"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_ignore_different_throttle(handle, gpuIds):
helper_test_throttle_mask_fail_ignore_different_throttle(handle, gpuIds[0])
def helper_test_throttle_mask_fail_ignore_different_throttle_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore a different reason (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask="HW_POWER_BRAKE", shouldPass=False,
failureMsg="Expected test to fail because throttle mask (named reason) ignores different throttle reason"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_ignore_different_throttle_string(handle, gpuIds):
helper_test_throttle_mask_fail_ignore_different_throttle_string(handle, gpuIds[0])
def helper_test_throttle_mask_pass_no_throttle(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Clear throttling reasons and mask to verify test passes
dd.SetThrottleMask("")
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=0, throttle_mask=None, shouldPass=True,
failureMsg="Expected test to pass because there is no throttling"
)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_pass_no_throttle(handle, gpuIds):
helper_test_throttle_mask_pass_no_throttle(handle, gpuIds[0])
def helper_check_diag_stop_on_interrupt_signals(handle, gpuId):
# First check whether the GPU is healthy/supported
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=2",
version=dcgm_structs.dcgmRunDiag_version7)
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_diag_result_pass(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX):
test_utils.skip_test("Skipping because GPU %s does not pass SM Stress test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
# paths to dcgmi executable
paths = {
"Linux_32bit": "./apps/x86/dcgmi",
"Linux_64bit": "./apps/amd64/dcgmi",
"Linux_ppc64le": "./apps/ppc64le/dcgmi",
"Linux_aarch64": "./apps/aarch64/dcgmi"
}
# Verify test is running on a supported platform
if utils.platform_identifier not in paths:
test_utils.skip_test("Dcgmi is not supported on the current platform.")
dcgmi_path = paths[utils.platform_identifier]
def verify_exit_code_on_signal(signum):
# Ensure that host engine is ready to launch a new diagnostic
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr='1')
success = False
start = time.time()
while not success and (time.time() - start) <= 3:
try:
response = test_utils.diag_execute_wrapper(dd, handle)
success = True
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_DIAG_ALREADY_RUNNING):
# Only acceptable error due to small race condition between the nvvs process exiting and
# hostengine actually processing the exit. We try for a maximum of 3 seconds since this
# should be rare and last only for a short amount of time
time.sleep(1.5)
diagApp = AppRunner(dcgmi_path, args=["diag", "-r", "SM Stress", "-i", "%s" % gpuId,
"-d", "INFO", "--debugLogFile", "/tmp/nvvs.log"])
# Start the diag
diagApp.start(timeout=40)
logger.info("Launched dcgmi process with pid: %s" % diagApp.getpid())
# Ensure diag is running before sending interrupt signal
running, debug_output = dcgm_internal_helpers.check_nvvs_process(want_running=True, attempts=50)
assert running, "The nvvs process did not start within 25 seconds: %s" % (debug_output)
# There is a small race condition here - it is possible that the hostengine sends a SIGTERM before the
# nvvs process has setup a signal handler, and so the nvvs process does not stop when SIGTERM is sent.
# We sleep for 1 second to reduce the possibility of this scenario
time.sleep(1)
diagApp.signal(signum)
retCode = diagApp.wait()
# Check the return code and stdout/stderr output before asserting for better debugging info
if retCode != (signum + 128):
logger.error("Got retcode '%s' from launched diag." % retCode)
if diagApp.stderr_lines or diagApp.stdout_lines:
logger.info("dcgmi output:")
for line in diagApp.stdout_lines:
logger.info(line)
for line in diagApp.stderr_lines:
logger.error(line)
assert retCode == (signum + 128)
# Since the app returns a non zero exit code, we call the validate method to prevent false
# failures from the test framework
diagApp.validate()
# Give the launched nvvs process 15 seconds to terminate.
not_running, debug_output = dcgm_internal_helpers.check_nvvs_process(want_running=False, attempts=50)
assert not_running, "The launched nvvs process did not terminate within 25 seconds. pgrep output:\n%s" \
% debug_output
# Verify return code on SIGINT
# We simply verify the return code because explicitly checking whether the nvvs process has terminated is
# clunky and error-prone
logger.info("Testing stop on SIGINT")
verify_exit_code_on_signal(signal.SIGINT)
# Verify return code on SIGHUP
logger.info("Testing stop on SIGHUP")
verify_exit_code_on_signal(signal.SIGHUP)
# Verify return code on SIGQUIT
logger.info("Testing stop on SIGQUIT")
verify_exit_code_on_signal(signal.SIGQUIT)
# Verify return code on SIGTERM
logger.info("Testing stop on SIGTERM")
verify_exit_code_on_signal(signal.SIGTERM)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_diag_stop_on_signal_embedded(handle, gpuIds):
if not option_parser.options.developer_mode:
# This test can run into a race condition when using embedded host engine, which can cause nvvs to
# take >60 seconds to terminate after receiving a SIGTERM.
test_utils.skip_test("Skip test for more debugging")
helper_check_diag_stop_on_interrupt_signals(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_diag_stop_on_signal_standalone(handle, gpuIds):
helper_check_diag_stop_on_interrupt_signals(handle, gpuIds[0])
def helper_verify_log_file_creation(handle, gpuIds):
dd = helper_verify_diag_passing(handle, gpuIds, testNames="targeted stress", testIndex=dcgm_structs.DCGM_TARGETED_STRESS_INDEX, params="targeted stress.test_duration=10", useFakeGpus=True)
logname = '/tmp/tmp_test_debug_log'
dd.SetDebugLogFile(logname)
dd.SetDebugLevel(5)
response = test_utils.diag_execute_wrapper(dd, handle)
if len(response.systemError.msg) == 0:
skippedAll = True
passedCount = 0
errors = ""
for gpuId in gpuIds:
resultType = response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_TARGETED_STRESS_INDEX].result
if resultType not in [dcgm_structs.DCGM_DIAG_RESULT_SKIP, dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN]:
skippedAll = False
if resultType == dcgm_structs.DCGM_DIAG_RESULT_PASS:
passedCount = passedCount + 1
else:
warning = response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_TARGETED_STRESS_INDEX].error.msg
if len(warning):
errors = "%s, GPU %d failed: %s" % (errors, gpuId, warning)
if skippedAll == False:
detailedMsg = "passed on %d of %d GPUs" % (passedCount, response.gpuCount)
if len(errors):
detailedMsg = "%s and had these errors: %s" % (detailedMsg, errors)
logger.info(detailedMsg)
assert os.path.isfile(logname), "Logfile '%s' was not created and %s" % (logname, detailedMsg)
else:
logger.info("The diagnostic was skipped, so we cannot run this test.")
else:
logger.info("The diagnostic had a problem when executing, so we cannot run this test.")
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_verify_log_file_creation_standalone(handle, gpuIds):
helper_verify_log_file_creation(handle, gpuIds)
def helper_throttling_masking_failures(handle, gpuId):
#####
# First check whether the GPU is healthy
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=2",
version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0) # We explicitly want to fail for throttle reasons since this test inserts throttling errors
# for verification
dd.UseFakeGpus()
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_diag_result_pass(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX):
test_utils.skip_test("Skipping because GPU %s does not pass SM Perf test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
#####
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=15",
version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0)
dd.UseFakeGpus()
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
interval = 0.1
logger.info("Injecting benign errors")
inject_value(handle, gpuId, fieldId, 3, 1, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, 3, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
logger.info("Injecting actual errors")
inject_value(handle, gpuId, fieldId, insertedError, injection_offset, True)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
logger.info("Started diag")
response = test_utils.diag_execute_wrapper(dd, handle)
# Verify that the inserted values are visible in DCGM
# Max wait of 8 is because of 5 second offset + 2 seconds required for 20 matches + 1 second buffer.
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, insertedError, checkInterval=0.1, numMatches=1, maxWait=8), \
"Expected inserted errors to be visible in DCGM"
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX)
assert throttled, "Expected to find throttling failure, but did not: (%s)" % errMsg
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttling_masking_failures_standalone(handle, gpuIds):
helper_throttling_masking_failures(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_handle_concurrency_standalone(handle, gpuIds):
diagDuration = 10
gpuId = gpuIds[0]
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=%d" % diagDuration,
version=dcgm_structs.dcgmRunDiag_version)
dd.UseFakeGpus()
response = [None]
def run(dd, response):
response = test_utils.diag_execute_wrapper(dd, handle)
diagStartTime = time.time()
threadObj = threading.Thread(target=run, args=[dd, response])
threadObj.start()
#Give threadObj a head start on its 10 second run
time.sleep(1.0)
firstReturnedRequestLatency = None
numConcurrentCompleted = 0
sleepDuration = 1.0
while threadObj.is_alive():
#Make another request on the handle concurrently
moduleStatuses = dcgm_agent.dcgmModuleGetStatuses(handle)
secondRequestLatency = time.time() - diagStartTime
numConcurrentCompleted += 1
if firstReturnedRequestLatency is None:
firstReturnedRequestLatency = secondRequestLatency
time.sleep(sleepDuration)
diagThreadEndTime = time.time()
diagDuration = diagThreadEndTime - diagStartTime
if firstReturnedRequestLatency is None:
test_utils.skip_test("Diag returned instantly. It is probably not supported for gpuId %u" % gpuId)
logger.info("Completed %d concurrent requests. Diag ran for %.1f seconds" % (numConcurrentCompleted, diagDuration))
#We should have been able to complete a request every 2 seconds if we slept for 1 (conservatively)
numShouldHaveCompleted = int((diagDuration / sleepDuration) / 2.0)
assert numConcurrentCompleted >= numShouldHaveCompleted, "Expected at least %d concurrent tests completed. Got %d" % (numShouldHaveCompleted, numConcurrentCompleted)
def helper_per_gpu_responses_api(handle, gpuIds):
failGpuId = gpuIds[0]
dd = helper_verify_diag_passing(handle, gpuIds, useFakeGpus=True)
dd = DcgmDiag.DcgmDiag(gpuIds=[failGpuId], testNamesStr="SM Stress", paramsStr="sm stress.test_duration=15", version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0) # We explicitly want to fail for throttle reasons since this test inserts throttling errors
# for verification
dd.UseFakeGpus()
# Setup injection app
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
interval = 0.1
# Use an offset to make these errors start after the benign values
inject_value(handle, failGpuId, fieldId, insertedError, injection_offset, True)
inject_value(handle, failGpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(failGpuId, fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
response = test_utils.diag_execute_wrapper(dd, handle)
logger.info("Started diag")
# Verify that responses are reported on a per gpu basis. Ensure the first GPU failed, and all others passed
for gpuId in gpuIds:
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_SM_STRESS_INDEX)
if gpuId == failGpuId:
assert throttled, "Expected throttling error but found none (%s)" % errMsg
else:
assert not throttled, "Expected not to find a throttling error but found '%s'" % errMsg
def helper_per_gpu_responses_dcgmi(handle, gpuIds):
def get_stdout(app):
output = ''
for line in app.stdout_lines:
output = output + line + " "
return output
def print_output(app):
logger.info(get_stdout(app))
for line in app.stderr_lines:
logger.error(line)
def verify_successful_dcgmi_run(app):
app.start(timeout=40)
logger.info("Started dcgmi diag with pid %s" % app.getpid())
retcode = app.wait()
if test_utils.is_mig_incompatible_failure(get_stdout(app)):
app.validate()
test_utils.skip_test("Skipping this test because MIG is configured incompatibly (preventing access to the whole GPU)")
# dcgm returns DCGM_ST_NVVS_ERROR on diag failure (which is expected here).
expected_retcode = c_uint8(dcgm_structs.DCGM_ST_NVVS_ISOLATE_ERROR).value
if retcode != expected_retcode:
if app.stderr_lines or app.stdout_lines:
logger.info("dcgmi output:")
print_output(app)
assert retcode == expected_retcode, \
"Expected dcgmi diag to have retcode %s. Got return code %s" % (expected_retcode, retcode)
app.validate() # non-zero exit code must be validated
#helper_verify_diag_passing(handle, gpuIds, useFakeGpus=True)
# Setup injection app
interval = 0.1
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
# Use an offset to make these errors start after the benign values
inject_value(handle, gpuIds[0], fieldId, insertedError, injection_offset, True)
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuIds[0], fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Verify dcgmi output
gpuIdStrings = list(map(str, gpuIds))
gpuList = ",".join(gpuIdStrings)
args = ["diag", "-r", "SM Stress", "-p", "sm stress.test_duration=5,pcie.max_pcie_replays=1", "-f", gpuList, "--throttle-mask", "0"]
dcgmiApp = DcgmiApp(args=args)
logger.info("Verifying stdout output")
verify_successful_dcgmi_run(dcgmiApp)
# Verify dcgmi output shows per gpu results (crude approximation of verifying correct console output)
stress_header_found = False
fail_gpu_found = False
fail_gpu_text = "Fail - GPU: %s" % gpuIds[0]
check_for_warning = False
warning_found = False
for line in dcgmiApp.stdout_lines:
if not stress_header_found:
if "Stress" not in line:
continue
stress_header_found = True
continue
if not fail_gpu_found:
if fail_gpu_text not in line:
continue
fail_gpu_found = True
check_for_warning = True
continue
if check_for_warning:
if "Warning" in line:
warning_found = True
break
if not (stress_header_found and fail_gpu_found and warning_found):
logger.info("dcgmi output:")
print_output(dcgmiApp)
assert stress_header_found, "Expected to see 'Stress' header in output"
assert fail_gpu_found, "Expected to see %s in output" % fail_gpu_text
assert warning_found, "Expected to see 'Warning' in output after GPU failure text"
inject_value(handle, gpuIds[0], fieldId, insertedError, injection_offset, True)
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuIds[0], fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Verify JSON output
logger.info("Verifying JSON output")
args.append("-j")
dcgmiApp = DcgmiApp(args=args)
verify_successful_dcgmi_run(dcgmiApp)
# Stop error insertion
logger.info("Stopped error injection")
# Verify per GPU results
json_output = "\n".join(dcgmiApp.stdout_lines)
output = json.loads(json_output)
verifed = False
if (len(output.get("DCGM GPU Diagnostic", {}).get("test_categories", [])) == 2
and output["DCGM GPU Diagnostic"]["test_categories"][1].get("category", None) == "Stress"
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["name"] == "SM Stress"
and len(output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"]) >= 2
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][0]["gpu_ids"] == str(gpuIds[0])
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][0]["status"] == "Fail"
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][1]["status"] == "Pass"):
verifed = True
if not verifed:
print_output(dcgmiApp)
assert verifed, "dcgmi JSON output did not pass verification"
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_per_gpu_responses_standalone_api(handle, gpuIds):
if len(gpuIds) < 2:
test_utils.skip_test("Skipping because this test requires 2 or more GPUs with same SKU")
if test_utils.is_throttling_masked_by_nvvs(handle, gpuIds[0], dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN):
test_utils.skip_test("Skipping because this SKU ignores the throttling we inject for this test")
logger.info("Starting test for per gpu responses (API call)")
helper_per_gpu_responses_api(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_per_gpu_responses_standalone_dcgmi(handle, gpuIds):
if len(gpuIds) < 2:
test_utils.skip_test("Skipping because this test requires 2 or more GPUs with same SKU")
if test_utils.is_throttling_masked_by_nvvs(handle, gpuIds[0], dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN):
test_utils.skip_test("Skipping because this SKU ignores the throttling we inject for this test")
logger.info("Starting test for per gpu responses (dcgmi output)")
helper_per_gpu_responses_dcgmi(handle, gpuIds)
def helper_test_diagnostic_config_usage(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="diagnostic", paramsStr="diagnostic.test_duration=10")
dd.SetConfigFileContents("%YAML 1.2\n\ncustom:\n- custom:\n diagnostic:\n max_sbe_errors: 1")
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL, 1000, injection_offset, True)
response = test_utils.diag_execute_wrapper(dd, handle)
assert response.perGpuResponses[gpuIds[0]].results[dcgm_structs.DCGM_DIAGNOSTIC_INDEX].result != dcgm_structs.DCGM_DIAG_RESULT_PASS, \
"Should have a failure due to injected SBEs, but got passing result"
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_diagnostic_config_usage_standalone(handle, gpuIds):
helper_test_diagnostic_config_usage(handle, gpuIds)
| true | true |
f73477b87ab81023e673fecd85d93a87674baf40 | 4,145 | py | Python | mux_python/models/update_asset_request.py | ryan-alley/mux-python | 82003fa5eb9acfa712418de52a88716e917cf000 | [
"MIT"
] | null | null | null | mux_python/models/update_asset_request.py | ryan-alley/mux-python | 82003fa5eb9acfa712418de52a88716e917cf000 | [
"MIT"
] | null | null | null | mux_python/models/update_asset_request.py | ryan-alley/mux-python | 82003fa5eb9acfa712418de52a88716e917cf000 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Mux API
Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501
The version of the OpenAPI document: v1
Contact: devex@mux.com
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from mux_python.configuration import Configuration
class UpdateAssetRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'passthrough': 'str'
}
attribute_map = {
'passthrough': 'passthrough'
}
def __init__(self, passthrough=None, local_vars_configuration=None): # noqa: E501
"""UpdateAssetRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._passthrough = None
self.discriminator = None
if passthrough is not None:
self.passthrough = passthrough
@property
def passthrough(self):
"""Gets the passthrough of this UpdateAssetRequest. # noqa: E501
Arbitrary metadata set for the Asset. Max 255 characters. In order to clear this value, the field should be included with an empty string value. # noqa: E501
:return: The passthrough of this UpdateAssetRequest. # noqa: E501
:rtype: str
"""
return self._passthrough
@passthrough.setter
def passthrough(self, passthrough):
"""Sets the passthrough of this UpdateAssetRequest.
Arbitrary metadata set for the Asset. Max 255 characters. In order to clear this value, the field should be included with an empty string value. # noqa: E501
:param passthrough: The passthrough of this UpdateAssetRequest. # noqa: E501
:type passthrough: str
"""
self._passthrough = passthrough
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateAssetRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UpdateAssetRequest):
return True
return self.to_dict() != other.to_dict()
| 31.401515 | 204 | 0.602171 |
import inspect
import pprint
import re
import six
from mux_python.configuration import Configuration
class UpdateAssetRequest(object):
openapi_types = {
'passthrough': 'str'
}
attribute_map = {
'passthrough': 'passthrough'
}
def __init__(self, passthrough=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._passthrough = None
self.discriminator = None
if passthrough is not None:
self.passthrough = passthrough
@property
def passthrough(self):
return self._passthrough
@passthrough.setter
def passthrough(self, passthrough):
self._passthrough = passthrough
def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, UpdateAssetRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, UpdateAssetRequest):
return True
return self.to_dict() != other.to_dict()
| true | true |
f734786d0e336e6a2f1b675fb9f89c68a3e86aae | 7,686 | py | Python | tests/conftest.py | sdss/sdss_access | 04531f969a6eccfb71b78fc604e2381da3249cb4 | [
"BSD-3-Clause"
] | 6 | 2019-01-21T03:02:55.000Z | 2022-01-10T00:47:08.000Z | tests/conftest.py | sdss/sdss_access | 04531f969a6eccfb71b78fc604e2381da3249cb4 | [
"BSD-3-Clause"
] | 23 | 2017-04-10T14:59:57.000Z | 2021-09-24T21:08:36.000Z | tests/conftest.py | sdss/sdss_access | 04531f969a6eccfb71b78fc604e2381da3249cb4 | [
"BSD-3-Clause"
] | 2 | 2017-07-01T07:02:03.000Z | 2019-04-22T12:49:55.000Z | # !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-03-24 12:22:30
# @Last modified by: Michael Talbot
# @Last Modified time: 2019-08-07 12:30:00
from __future__ import print_function, division, absolute_import
import glob
import gzip
import importlib
import os
import pytest
import yaml
import contextlib
import shutil
import tree.tree as treemod
from sdss_access import RsyncAccess, HttpAccess, CurlAccess
from sdss_access.path import Path
# PYTEST MODIFIERS
# -----------------
def pytest_addoption(parser):
"""Add new options"""
# run slow tests
parser.addoption('--runslow', action='store_true', default=False, help='Run slow tests.')
def pytest_runtest_setup(item):
"""Skip slow tests."""
if 'slow' in item.keywords and not item.config.getoption('--runslow'):
pytest.skip('Requires --runslow option to run.')
@pytest.fixture()
def path():
''' Fixture to create a generic Path object '''
path = Path()
path.replant_tree()
yield path
path = None
# releases to parametrize over
releases = ['work', 'DR15']
# read in test data parameters and also get paths
def get_data():
''' Retrieves the test data from the paths.yaml file '''
with open(os.path.join(os.path.dirname(__file__), 'data/paths.yaml')) as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
return data
data = get_data()
paths = data.get('paths')
@pytest.fixture(scope='session', params=releases)
def release(request):
''' release fixture '''
return request.param
@pytest.fixture(scope='session', params=paths)
def datapath(request):
''' parametrizes over the paths in test data'''
return request.param
@pytest.fixture(scope='session')
def expdata(release, datapath):
''' fixture to yield expected source data based on test data '''
remote = data.get('remote_base')
# remote base
base = remote['work'] if release == 'work' else remote['public']
# sas_module; a work or DR directory
sas_module = datapath['work'] if release == 'work' else release.lower()
# file location
location = datapath['location']
# full source file location
source = os.path.join(base, sas_module, location)
# full final file location
destination = os.path.join(os.getenv('SAS_BASE_DIR'), sas_module, location)
# combined dict
result = {'name': datapath['name'], 'params': datapath['params'], 'base': base,
'sas_module': sas_module, 'location': location, 'source': source,
'destination': destination, 'release': release.lower()}
yield result
result = None
@pytest.fixture(scope='session')
def inittask(expdata):
''' fixture to yield expected initial stream task based on test data '''
#patch = '' if expdata['release'] == 'work' else expdata['release']
#loc = os.path.join(patch, expdata['location'])
task = [{'sas_module': expdata['sas_module'], 'location': expdata['location'],
'source': expdata['source'], 'destination': expdata['destination'], 'exists': None}]
yield task
task = None
@pytest.fixture(scope='session')
def finaltask(expdata):
''' fixture to yield expected final stream task based on test data '''
task = [{'sas_module': expdata['sas_module'], 'location': expdata['location'],
'source': expdata['source'], 'destination': expdata['destination'], 'exists': None}]
yield task
task = None
@pytest.fixture(scope='session')
def rsync(release):
''' fixture to create generic rsync object - parametrized by release '''
if 'DR' in release:
rsync = RsyncAccess(label='test_rsync', public=True, release=release)
else:
rsync = RsyncAccess(label='test_rsync')
rsync.remote()
yield rsync
# teardown
rsync.reset()
rsync = None
@pytest.fixture(scope='session')
def radd(rsync, expdata):
''' fixture to add a path to an rsync object '''
rsync.add(expdata['name'], **expdata['params'])
yield rsync
rsync.reset()
@pytest.fixture(scope='session')
def rstream(radd):
''' fixture to set the stream for an parametrized rsync object '''
radd.set_stream()
yield radd
radd.reset()
@pytest.fixture(scope='session')
def http(release):
if 'DR' in release:
http = HttpAccess(public=True, release=release)
else:
http = HttpAccess()
yield http
http = None
@pytest.fixture(scope='session')
def curl(release):
''' fixture to create generic curl object - parametrized by release '''
if 'DR' in release:
curl = CurlAccess(label='test_curl', public=True, release=release)
else:
curl = CurlAccess(label='test_curl')
curl.remote()
yield curl
# teardown
curl.reset()
curl = None
@pytest.fixture(scope='session')
def cadd(curl, expdata):
''' fixture to add a path to an curl object '''
curl.add(expdata['name'], **expdata['params'])
yield curl
@pytest.fixture(scope='session')
def cstream(cadd):
''' fixture to set the stream for an parametrized curl object '''
cadd.set_stream()
yield cadd
@pytest.fixture()
def monkeysas(tmpdir, monkeypatch, path):
''' fixture to monkeypatch the sas_base_dir '''
orig = os.getenv("SAS_BASE_DIR")
tmppath = tmpdir / 'sas'
os.makedirs(tmppath, exist_ok=True)
monkeypatch.setenv('SAS_BASE_DIR', str(tmppath))
path.replant_tree()
yield
os.environ["SAS_BASE_DIR"] = orig
path.replant_tree()
@pytest.fixture()
def copydata(tmpdir, request):
''' fixture to copy a file into a temporary directory '''
srcpath = os.path.join(os.getenv("SAS_BASE_DIR"), request.param)
# skip the test if no real data exists to copy
if not os.path.exists(srcpath):
pytest.skip('file does not exist cannot copy')
sasdir = tmpdir / 'sas'
destpath = sasdir / request.param
os.makedirs(os.path.dirname(destpath), exist_ok=True)
shutil.copy(srcpath, destpath)
yield destpath
@pytest.fixture()
def copymulti(tmpdir, request):
''' Fixture to copy multiple files into a temporary directory '''
srcpath = os.path.join(os.getenv("SAS_BASE_DIR"), request.param)
files = glob.glob(srcpath)
if not files:
pytest.skip('Files do not exist, cannot copy')
for item in files:
loc = item.split(os.getenv("SAS_BASE_DIR") + '/')[-1]
sasdir = tmpdir / 'sas'
destpath = sasdir / loc
os.makedirs(os.path.dirname(destpath), exist_ok=True)
shutil.copy(item, destpath)
@contextlib.contextmanager
def gzuncompress(filename):
''' Context manager than gunzips a file temporarily. '''
import pathlib
pp = pathlib.Path(filename)
decompfile = pp.parent / pp.stem
with gzip.open(filename, 'rb') as f_in:
with open(decompfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(filename)
yield
@contextlib.contextmanager
def gzcompress(filename):
''' Context manager than gzips a file temporarily. '''
compfile = filename + '.gz'
with open(filename, 'rb') as f_in:
with gzip.open(compfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(filename)
yield
@pytest.fixture()
def monkeyhome(monkeypatch, tmp_path):
''' monkeypatch the HOME directory '''
path = (tmp_path / 'tmp').mkdir()
monkeypatch.setenv("HOME", str(path))
@pytest.fixture()
def monkeysdss5(monkeypatch):
monkeypatch.setenv('ALLWISE_DIR', '/tmp/allwise')
monkeypatch.setenv('EROSITA_DIR', '/tmp/erosita')
monkeypatch.setenv('ROBOSTRATEGY_DATA', '/tmp/robodata')
importlib.reload(treemod) | 28.572491 | 97 | 0.664065 |
from __future__ import print_function, division, absolute_import
import glob
import gzip
import importlib
import os
import pytest
import yaml
import contextlib
import shutil
import tree.tree as treemod
from sdss_access import RsyncAccess, HttpAccess, CurlAccess
from sdss_access.path import Path
def pytest_addoption(parser):
parser.addoption('--runslow', action='store_true', default=False, help='Run slow tests.')
def pytest_runtest_setup(item):
if 'slow' in item.keywords and not item.config.getoption('--runslow'):
pytest.skip('Requires --runslow option to run.')
@pytest.fixture()
def path():
path = Path()
path.replant_tree()
yield path
path = None
releases = ['work', 'DR15']
def get_data():
with open(os.path.join(os.path.dirname(__file__), 'data/paths.yaml')) as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
return data
data = get_data()
paths = data.get('paths')
@pytest.fixture(scope='session', params=releases)
def release(request):
return request.param
@pytest.fixture(scope='session', params=paths)
def datapath(request):
return request.param
@pytest.fixture(scope='session')
def expdata(release, datapath):
remote = data.get('remote_base')
base = remote['work'] if release == 'work' else remote['public']
sas_module = datapath['work'] if release == 'work' else release.lower()
location = datapath['location']
source = os.path.join(base, sas_module, location)
destination = os.path.join(os.getenv('SAS_BASE_DIR'), sas_module, location)
result = {'name': datapath['name'], 'params': datapath['params'], 'base': base,
'sas_module': sas_module, 'location': location, 'source': source,
'destination': destination, 'release': release.lower()}
yield result
result = None
@pytest.fixture(scope='session')
def inittask(expdata):
task = [{'sas_module': expdata['sas_module'], 'location': expdata['location'],
'source': expdata['source'], 'destination': expdata['destination'], 'exists': None}]
yield task
task = None
@pytest.fixture(scope='session')
def finaltask(expdata):
task = [{'sas_module': expdata['sas_module'], 'location': expdata['location'],
'source': expdata['source'], 'destination': expdata['destination'], 'exists': None}]
yield task
task = None
@pytest.fixture(scope='session')
def rsync(release):
if 'DR' in release:
rsync = RsyncAccess(label='test_rsync', public=True, release=release)
else:
rsync = RsyncAccess(label='test_rsync')
rsync.remote()
yield rsync
rsync.reset()
rsync = None
@pytest.fixture(scope='session')
def radd(rsync, expdata):
rsync.add(expdata['name'], **expdata['params'])
yield rsync
rsync.reset()
@pytest.fixture(scope='session')
def rstream(radd):
radd.set_stream()
yield radd
radd.reset()
@pytest.fixture(scope='session')
def http(release):
if 'DR' in release:
http = HttpAccess(public=True, release=release)
else:
http = HttpAccess()
yield http
http = None
@pytest.fixture(scope='session')
def curl(release):
if 'DR' in release:
curl = CurlAccess(label='test_curl', public=True, release=release)
else:
curl = CurlAccess(label='test_curl')
curl.remote()
yield curl
curl.reset()
curl = None
@pytest.fixture(scope='session')
def cadd(curl, expdata):
curl.add(expdata['name'], **expdata['params'])
yield curl
@pytest.fixture(scope='session')
def cstream(cadd):
cadd.set_stream()
yield cadd
@pytest.fixture()
def monkeysas(tmpdir, monkeypatch, path):
orig = os.getenv("SAS_BASE_DIR")
tmppath = tmpdir / 'sas'
os.makedirs(tmppath, exist_ok=True)
monkeypatch.setenv('SAS_BASE_DIR', str(tmppath))
path.replant_tree()
yield
os.environ["SAS_BASE_DIR"] = orig
path.replant_tree()
@pytest.fixture()
def copydata(tmpdir, request):
srcpath = os.path.join(os.getenv("SAS_BASE_DIR"), request.param)
if not os.path.exists(srcpath):
pytest.skip('file does not exist cannot copy')
sasdir = tmpdir / 'sas'
destpath = sasdir / request.param
os.makedirs(os.path.dirname(destpath), exist_ok=True)
shutil.copy(srcpath, destpath)
yield destpath
@pytest.fixture()
def copymulti(tmpdir, request):
srcpath = os.path.join(os.getenv("SAS_BASE_DIR"), request.param)
files = glob.glob(srcpath)
if not files:
pytest.skip('Files do not exist, cannot copy')
for item in files:
loc = item.split(os.getenv("SAS_BASE_DIR") + '/')[-1]
sasdir = tmpdir / 'sas'
destpath = sasdir / loc
os.makedirs(os.path.dirname(destpath), exist_ok=True)
shutil.copy(item, destpath)
@contextlib.contextmanager
def gzuncompress(filename):
import pathlib
pp = pathlib.Path(filename)
decompfile = pp.parent / pp.stem
with gzip.open(filename, 'rb') as f_in:
with open(decompfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(filename)
yield
@contextlib.contextmanager
def gzcompress(filename):
compfile = filename + '.gz'
with open(filename, 'rb') as f_in:
with gzip.open(compfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(filename)
yield
@pytest.fixture()
def monkeyhome(monkeypatch, tmp_path):
path = (tmp_path / 'tmp').mkdir()
monkeypatch.setenv("HOME", str(path))
@pytest.fixture()
def monkeysdss5(monkeypatch):
monkeypatch.setenv('ALLWISE_DIR', '/tmp/allwise')
monkeypatch.setenv('EROSITA_DIR', '/tmp/erosita')
monkeypatch.setenv('ROBOSTRATEGY_DATA', '/tmp/robodata')
importlib.reload(treemod) | true | true |
f7347a7336023c2124958b5d353570dbcc2ccf8a | 3,370 | py | Python | lotlan_scheduler/demo.py | iml130/lotlan-scheduler | b576f853706d614a918dccd9572cc2c2b666bbe4 | [
"Apache-2.0"
] | null | null | null | lotlan_scheduler/demo.py | iml130/lotlan-scheduler | b576f853706d614a918dccd9572cc2c2b666bbe4 | [
"Apache-2.0"
] | null | null | null | lotlan_scheduler/demo.py | iml130/lotlan-scheduler | b576f853706d614a918dccd9572cc2c2b666bbe4 | [
"Apache-2.0"
] | null | null | null | """ Contains main program example for scheduler """
# standard libraries
import sys
import os
sys.path.append(os.path.abspath("../lotlan_scheduler"))
# local sources
from lotlan_scheduler.scheduler import LotlanScheduler
from lotlan_scheduler.api.event import Event
def cb_triggered_by(mf_uuid, uuid_, event_information):
print("cb_triggered_by from mf: " + str(mf_uuid))
print("UUID: " + str(uuid_), "Event_Info: " + str(event_information))
# foreach event in event_information
def cb_next_to(mf_uuid, transport_orders):
print("cb_next_to from mf: " + str(mf_uuid))
print(str(transport_orders))
def cb_finished_by(mf_uuid, uuid_, event_information):
print("cb_finished_by from mf: " + str(mf_uuid))
print("UUID: " + str(uuid_), "Event_Info: " + str(event_information))
def cb_task_finished(mf_uuid, uuid_):
print("cb_task_finished from mf: " + str(mf_uuid))
print("task with uuid " + str(uuid_) + " finished")
def cb_all_finished(mf_uuid):
print("cb_all_finished from mf: " + str(mf_uuid))
def main():
test_flag = False
lotlan_string = ""
if len(sys.argv) >= 2:
if sys.argv[1] == "--test":
test_flag = True
with open(sys.argv[2], "r") as file:
lotlan_string = file.read()
else:
with open(sys.argv[1], "r") as file:
lotlan_string = file.read()
lotlan_logic = LotlanScheduler(lotlan_string, test_flag)
material_flows = lotlan_logic.get_materialflows()
for material_flow in material_flows:
material_flow.register_callback_triggered_by(cb_triggered_by)
material_flow.register_callback_next_to(cb_next_to)
material_flow.register_callback_finished_by(cb_finished_by)
material_flow.register_callback_task_finished(cb_task_finished)
material_flow.register_callback_all_finished(cb_all_finished)
material_flow.start()
material_flow_running = True
while material_flow_running:
input_str = str(input("Wait for input:>"))
if input_str != "":
mf_number, uid, input_type, input_name, input_value = input_str.split(" ")
mf_number = int(mf_number)
if mf_number < len(material_flows):
if input_type == "b":
input_type = "Boolean"
input_value = input_value == "True"
elif input_type == "i":
input_type = "Integer"
input_value = int(input_value)
elif input_type == "f":
input_type = "Float"
input_value = float(input_value)
elif input_type == "s":
input_type = "String"
material_flows[mf_number].fire_event(str(uid), Event(input_name, "",
input_type, value=input_value))
# check if a material flow is still running
# if every material flow is finished we are done otherwise continue
material_flow_running = False
for mf in material_flows:
if mf.is_running() is True:
material_flow_running = True
if __name__ == "__main__":
main()
| 34.387755 | 90 | 0.601187 |
import sys
import os
sys.path.append(os.path.abspath("../lotlan_scheduler"))
from lotlan_scheduler.scheduler import LotlanScheduler
from lotlan_scheduler.api.event import Event
def cb_triggered_by(mf_uuid, uuid_, event_information):
print("cb_triggered_by from mf: " + str(mf_uuid))
print("UUID: " + str(uuid_), "Event_Info: " + str(event_information))
def cb_next_to(mf_uuid, transport_orders):
print("cb_next_to from mf: " + str(mf_uuid))
print(str(transport_orders))
def cb_finished_by(mf_uuid, uuid_, event_information):
print("cb_finished_by from mf: " + str(mf_uuid))
print("UUID: " + str(uuid_), "Event_Info: " + str(event_information))
def cb_task_finished(mf_uuid, uuid_):
print("cb_task_finished from mf: " + str(mf_uuid))
print("task with uuid " + str(uuid_) + " finished")
def cb_all_finished(mf_uuid):
print("cb_all_finished from mf: " + str(mf_uuid))
def main():
test_flag = False
lotlan_string = ""
if len(sys.argv) >= 2:
if sys.argv[1] == "--test":
test_flag = True
with open(sys.argv[2], "r") as file:
lotlan_string = file.read()
else:
with open(sys.argv[1], "r") as file:
lotlan_string = file.read()
lotlan_logic = LotlanScheduler(lotlan_string, test_flag)
material_flows = lotlan_logic.get_materialflows()
for material_flow in material_flows:
material_flow.register_callback_triggered_by(cb_triggered_by)
material_flow.register_callback_next_to(cb_next_to)
material_flow.register_callback_finished_by(cb_finished_by)
material_flow.register_callback_task_finished(cb_task_finished)
material_flow.register_callback_all_finished(cb_all_finished)
material_flow.start()
material_flow_running = True
while material_flow_running:
input_str = str(input("Wait for input:>"))
if input_str != "":
mf_number, uid, input_type, input_name, input_value = input_str.split(" ")
mf_number = int(mf_number)
if mf_number < len(material_flows):
if input_type == "b":
input_type = "Boolean"
input_value = input_value == "True"
elif input_type == "i":
input_type = "Integer"
input_value = int(input_value)
elif input_type == "f":
input_type = "Float"
input_value = float(input_value)
elif input_type == "s":
input_type = "String"
material_flows[mf_number].fire_event(str(uid), Event(input_name, "",
input_type, value=input_value))
material_flow_running = False
for mf in material_flows:
if mf.is_running() is True:
material_flow_running = True
if __name__ == "__main__":
main()
| true | true |
f7347b6d8e309b0c78b0fcfb2a1f79f5c9335adf | 3,189 | py | Python | profiles_project/settings.py | voltagebots/Django | ed1764b74c5e0599613d2f5e544566f461d9e641 | [
"MIT"
] | null | null | null | profiles_project/settings.py | voltagebots/Django | ed1764b74c5e0599613d2f5e544566f461d9e641 | [
"MIT"
] | null | null | null | profiles_project/settings.py | voltagebots/Django | ed1764b74c5e0599613d2f5e544566f461d9e641 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'smztt3zom_kgvvw^68qyfvvvy7bupo09f8w2@j$zewn0j&02+w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profile_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.717742 | 91 | 0.70022 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'smztt3zom_kgvvw^68qyfvvvy7bupo09f8w2@j$zewn0j&02+w'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profile_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f7347c2720fbf9bb96a4e4f3907a47f52167125a | 5,616 | py | Python | scripts/gen-shaping-tests.py | laurmaedje/rustybuzz | 8a138b1843dcb967b785510e80dc38c4d6c319cf | [
"MIT"
] | 255 | 2020-01-13T02:09:28.000Z | 2022-03-21T11:33:00.000Z | scripts/gen-shaping-tests.py | laurmaedje/rustybuzz | 8a138b1843dcb967b785510e80dc38c4d6c319cf | [
"MIT"
] | 44 | 2020-01-13T09:54:23.000Z | 2022-03-14T03:50:34.000Z | scripts/gen-shaping-tests.py | laurmaedje/rustybuzz | 8a138b1843dcb967b785510e80dc38c4d6c319cf | [
"MIT"
] | 18 | 2020-02-18T14:05:41.000Z | 2022-03-14T03:34:38.000Z | #!/usr/bin/env python
import os
import sys
import subprocess
from pathlib import Path
# There is no sane way to test them.
IGNORE_TESTS = [
'macos.tests',
]
IGNORE_TEST_CASES = [
# aots tests
# in-house tests
# --shaper=fallback is not supported.
'simple_002',
# Not possible to implement without shaping.
'arabic_fallback_shaping_001',
# `dfont` is not supported.
'collections_001',
'collections_002',
'collections_003',
# Face index out of bounds. ttf-parser doesn't permit this.
'collections_006',
# no `hhea` table.
'indic_decompose_001',
# ttf-parser doesn't support phantom points
'variations_space_001',
# text-rendering-tests tests
# Unknown issue. Investigate.
'cmap_1_004',
'shknda_3_031',
'shlana_10_028',
'shlana_10_041',
'shlana_5_010',
'shlana_5_012',
]
def update_relative_path(tests_name, fontfile):
fontfile = fontfile.replace('../fonts/', '')
return f'tests/fonts/{tests_name}/{fontfile}' # relative to the root dir
# Converts `U+0041,U+0078` into `\u{0041}\u{0078}`
def convert_unicodes(unicodes):
text = ''
for (i, u) in enumerate(unicodes.split(',')):
if i > 0 and i % 10 == 0:
text += '\\\n '
text += f'\\u{{{u[2:]}}}'
return text
def convert_test(hb_dir, hb_shape_exe, tests_name, file_name, idx, data, fonts):
fontfile, options, unicodes, glyphs_expected = data.split(':')
fontfile_rs = update_relative_path(tests_name, fontfile)
unicodes_rs = convert_unicodes(unicodes)
test_name = file_name.replace('.tests', '').replace('-', '_') + f'_{idx:03d}'
test_name = test_name.lower()
options = options.replace('--shaper=ot', '')
options = options.replace(' --font-funcs=ft', '').replace('--font-funcs=ft', '')
options = options.replace(' --font-funcs=ot', '').replace('--font-funcs=ot', '')
options = options.replace('--font-size=1000', '') # we don't support font scaling
options = options.strip()
# We have to actually run hb-shape instead of using predefined results,
# because hb sometimes stores results for freetype and not for embedded OpenType
# engine, which we are using.
# Right now, it only affects 'text-rendering-tests'.
if len(options) != 0:
options_list = options.split(' ')
else:
options_list = []
options_list.insert(0, str(hb_shape_exe))
# Force OT functions, since this is the only one we support in rustybuzz.
options_list.append('--font-funcs=ot')
abs_font_path = hb_dir.joinpath('test/shaping/data')\
.joinpath(tests_name)\
.joinpath('tests') \
.joinpath(fontfile)
options_list.append(str(abs_font_path))
options_list.append(f'--unicodes={unicodes}') # no need to escape it
glyphs_expected = subprocess.run(options_list, check=True, stdout=subprocess.PIPE)\
.stdout.decode()
glyphs_expected = glyphs_expected[1:-2] # remove `[..]\n`
glyphs_expected = glyphs_expected.replace('|', '|\\\n ')
options = options.replace('"', '\\"')
fonts.add(os.path.split(fontfile_rs)[1])
if test_name in IGNORE_TEST_CASES:
return ''
return (f'#[test]\n'
f'fn {test_name}() {{\n'
f' assert_eq!(\n'
f' shape(\n'
f' "{fontfile_rs}",\n'
f' "{unicodes_rs}",\n'
f' "{options}",\n'
f' ),\n'
f' "{glyphs_expected}"\n'
f' );\n'
f'}}\n'
'\n')
def convert(hb_dir, hb_shape_exe, tests_dir, tests_name):
files = sorted(os.listdir(tests_dir))
files = [f for f in files if f.endswith('.tests')]
fonts = set()
rust_code = ('// WARNING: this file was generated by ../scripts/gen-shaping-tests.py\n'
'\n'
'mod shaping_impl;\n'
'use shaping_impl::shape;\n'
'\n')
for file in files:
if file in IGNORE_TESTS:
continue
with open(tests_dir / file, 'r') as f:
for idx, test in enumerate(f.read().splitlines()):
# skip comments and empty lines
if test.startswith('#') or len(test) == 0:
continue
rust_code += convert_test(hb_dir, hb_shape_exe, tests_name,
file, idx + 1, test, fonts)
tests_name_snake_case = tests_name.replace('-', '_')
with open(f'../tests/shaping_{tests_name_snake_case}.rs', 'w') as f:
f.write(rust_code)
return fonts
if len(sys.argv) != 2:
print('Usage: gen-shaping-tests.py /path/to/harfbuzz-src')
exit(1)
hb_dir = Path(sys.argv[1])
assert hb_dir.exists()
# Check that harfbuzz was built.
hb_shape_exe = hb_dir.joinpath('builddir/util/hb-shape')
if not hb_shape_exe.exists():
print('Build harfbuzz first using:')
print(' meson builddir')
print(' ninja -Cbuilddir')
exit(1)
used_fonts = []
font_files = []
test_dir_names = ['aots', 'in-house', 'text-rendering-tests']
for test_dir_name in test_dir_names:
tests_dir = hb_dir / f'test/shaping/data/{test_dir_name}/tests'
used_fonts += convert(hb_dir, hb_shape_exe, tests_dir, test_dir_name)
font_files += os.listdir(hb_dir / f'test/shaping/data/{test_dir_name}/fonts')
# Check for unused fonts.
unused_fonts = sorted(list(set(font_files).difference(used_fonts)))
if len(unused_fonts) != 0:
print('Unused fonts:')
for font in unused_fonts:
print(font)
| 29.714286 | 91 | 0.605591 |
import os
import sys
import subprocess
from pathlib import Path
IGNORE_TESTS = [
'macos.tests',
]
IGNORE_TEST_CASES = [
'simple_002',
'arabic_fallback_shaping_001',
'collections_001',
'collections_002',
'collections_003',
'collections_006',
# no `hhea` table.
'indic_decompose_001',
# ttf-parser doesn't support phantom points
'variations_space_001',
'cmap_1_004',
'shknda_3_031',
'shlana_10_028',
'shlana_10_041',
'shlana_5_010',
'shlana_5_012',
]
def update_relative_path(tests_name, fontfile):
fontfile = fontfile.replace('../fonts/', '')
return f'tests/fonts/{tests_name}/{fontfile}'
def convert_unicodes(unicodes):
text = ''
for (i, u) in enumerate(unicodes.split(',')):
if i > 0 and i % 10 == 0:
text += '\\\n '
text += f'\\u{{{u[2:]}}}'
return text
def convert_test(hb_dir, hb_shape_exe, tests_name, file_name, idx, data, fonts):
fontfile, options, unicodes, glyphs_expected = data.split(':')
fontfile_rs = update_relative_path(tests_name, fontfile)
unicodes_rs = convert_unicodes(unicodes)
test_name = file_name.replace('.tests', '').replace('-', '_') + f'_{idx:03d}'
test_name = test_name.lower()
options = options.replace('--shaper=ot', '')
options = options.replace(' --font-funcs=ft', '').replace('--font-funcs=ft', '')
options = options.replace(' --font-funcs=ot', '').replace('--font-funcs=ot', '')
options = options.replace('--font-size=1000', '')
options = options.strip()
# We have to actually run hb-shape instead of using predefined results,
# because hb sometimes stores results for freetype and not for embedded OpenType
# engine, which we are using.
# Right now, it only affects 'text-rendering-tests'.
if len(options) != 0:
options_list = options.split(' ')
else:
options_list = []
options_list.insert(0, str(hb_shape_exe))
# Force OT functions, since this is the only one we support in rustybuzz.
options_list.append('--font-funcs=ot')
abs_font_path = hb_dir.joinpath('test/shaping/data')\
.joinpath(tests_name)\
.joinpath('tests') \
.joinpath(fontfile)
options_list.append(str(abs_font_path))
options_list.append(f'--unicodes={unicodes}') # no need to escape it
glyphs_expected = subprocess.run(options_list, check=True, stdout=subprocess.PIPE)\
.stdout.decode()
glyphs_expected = glyphs_expected[1:-2] # remove `[..]\n`
glyphs_expected = glyphs_expected.replace('|', '|\\\n ')
options = options.replace('"', '\\"')
fonts.add(os.path.split(fontfile_rs)[1])
if test_name in IGNORE_TEST_CASES:
return ''
return (f'
f'fn {test_name}() {{\n'
f' assert_eq!(\n'
f' shape(\n'
f' "{fontfile_rs}",\n'
f' "{unicodes_rs}",\n'
f' "{options}",\n'
f' ),\n'
f' "{glyphs_expected}"\n'
f' );\n'
f'}}\n'
'\n')
def convert(hb_dir, hb_shape_exe, tests_dir, tests_name):
files = sorted(os.listdir(tests_dir))
files = [f for f in files if f.endswith('.tests')]
fonts = set()
rust_code = ('// WARNING: this file was generated by ../scripts/gen-shaping-tests.py\n'
'\n'
'mod shaping_impl;\n'
'use shaping_impl::shape;\n'
'\n')
for file in files:
if file in IGNORE_TESTS:
continue
with open(tests_dir / file, 'r') as f:
for idx, test in enumerate(f.read().splitlines()):
# skip comments and empty lines
if test.startswith('
continue
rust_code += convert_test(hb_dir, hb_shape_exe, tests_name,
file, idx + 1, test, fonts)
tests_name_snake_case = tests_name.replace('-', '_')
with open(f'../tests/shaping_{tests_name_snake_case}.rs', 'w') as f:
f.write(rust_code)
return fonts
if len(sys.argv) != 2:
print('Usage: gen-shaping-tests.py /path/to/harfbuzz-src')
exit(1)
hb_dir = Path(sys.argv[1])
assert hb_dir.exists()
# Check that harfbuzz was built.
hb_shape_exe = hb_dir.joinpath('builddir/util/hb-shape')
if not hb_shape_exe.exists():
print('Build harfbuzz first using:')
print(' meson builddir')
print(' ninja -Cbuilddir')
exit(1)
used_fonts = []
font_files = []
test_dir_names = ['aots', 'in-house', 'text-rendering-tests']
for test_dir_name in test_dir_names:
tests_dir = hb_dir / f'test/shaping/data/{test_dir_name}/tests'
used_fonts += convert(hb_dir, hb_shape_exe, tests_dir, test_dir_name)
font_files += os.listdir(hb_dir / f'test/shaping/data/{test_dir_name}/fonts')
# Check for unused fonts.
unused_fonts = sorted(list(set(font_files).difference(used_fonts)))
if len(unused_fonts) != 0:
print('Unused fonts:')
for font in unused_fonts:
print(font)
| true | true |
f7347d0417b9471093682564a5f485ea4fc07294 | 2,826 | py | Python | ipywidgets/widgets/widget_layout.py | minrk/ipython_widgets | a80597fd4c81dd75a2f610e376fe21a64e0df8d1 | [
"BSD-3-Clause"
] | 2 | 2019-05-15T03:10:38.000Z | 2020-10-08T08:08:24.000Z | ipywidgets/widgets/widget_layout.py | minrk/ipython_widgets | a80597fd4c81dd75a2f610e376fe21a64e0df8d1 | [
"BSD-3-Clause"
] | 2 | 2015-12-29T18:03:24.000Z | 2016-05-19T06:26:50.000Z | ipywidgets/widgets/widget_layout.py | minrk/ipython_widgets | a80597fd4c81dd75a2f610e376fe21a64e0df8d1 | [
"BSD-3-Clause"
] | 1 | 2020-05-14T14:32:56.000Z | 2020-05-14T14:32:56.000Z | """Contains the Layout class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Unicode, Instance
from .widget_core import CoreWidget
class Layout(CoreWidget):
"""Layout specification
Defines a layout that can be expressed using CSS. Supports a subset of
https://developer.mozilla.org/en-US/docs/Web/CSS/Reference
When a property is also accessible via a shorthand property, we only
expose the shorthand.
For example:
- ``flex-grow``, ``flex-shrink`` and ``flex-basis`` are bound to ``flex``.
- ``flex-wrap`` and ``flex-direction`` are bound to ``flex-flow``.
- ``margin-[top/bottom/left/right]`` values are bound to ``margin``, etc.
"""
_model_module = Unicode('jupyter-js-widgets').tag(sync=True)
_view_module = Unicode('jupyter-js-widgets').tag(sync=True)
_view_name = Unicode('LayoutView').tag(sync=True)
_model_name = Unicode('LayoutModel').tag(sync=True)
# Keys
align_content = Unicode(None, allow_none=True).tag(sync=True)
align_items = Unicode(None, allow_none=True).tag(sync=True)
align_self = Unicode(None, allow_none=True).tag(sync=True)
bottom = Unicode(None, allow_none=True).tag(sync=True)
border = Unicode(None, allow_none=True).tag(sync=True)
display = Unicode(None, allow_none=True).tag(sync=True)
flex = Unicode(None, allow_none=True).tag(sync=True)
flex_flow = Unicode(None, allow_none=True).tag(sync=True)
height = Unicode(None, allow_none=True).tag(sync=True)
justify_content = Unicode(None, allow_none=True).tag(sync=True)
left = Unicode(None, allow_none=True).tag(sync=True)
margin = Unicode(None, allow_none=True).tag(sync=True)
max_height = Unicode(None, allow_none=True).tag(sync=True)
max_width = Unicode(None, allow_none=True).tag(sync=True)
min_height = Unicode(None, allow_none=True).tag(sync=True)
min_width = Unicode(None, allow_none=True).tag(sync=True)
overflow = Unicode(None, allow_none=True).tag(sync=True)
overflow_x = Unicode(None, allow_none=True).tag(sync=True)
overflow_y = Unicode(None, allow_none=True).tag(sync=True)
order = Unicode(None, allow_none=True).tag(sync=True)
padding = Unicode(None, allow_none=True).tag(sync=True)
right = Unicode(None, allow_none=True).tag(sync=True)
top = Unicode(None, allow_none=True).tag(sync=True)
visibility = Unicode(None, allow_none=True).tag(sync=True)
width = Unicode(None, allow_none=True).tag(sync=True)
class LayoutTraitType(Instance):
klass = Layout
def validate(self, obj, value):
if isinstance(value, dict):
return super(LayoutTraitType, self).validate(obj, Layout(**value))
else:
return super(LayoutTraitType, self).validate(obj, value)
| 42.179104 | 78 | 0.703114 |
from traitlets import Unicode, Instance
from .widget_core import CoreWidget
class Layout(CoreWidget):
_model_module = Unicode('jupyter-js-widgets').tag(sync=True)
_view_module = Unicode('jupyter-js-widgets').tag(sync=True)
_view_name = Unicode('LayoutView').tag(sync=True)
_model_name = Unicode('LayoutModel').tag(sync=True)
align_content = Unicode(None, allow_none=True).tag(sync=True)
align_items = Unicode(None, allow_none=True).tag(sync=True)
align_self = Unicode(None, allow_none=True).tag(sync=True)
bottom = Unicode(None, allow_none=True).tag(sync=True)
border = Unicode(None, allow_none=True).tag(sync=True)
display = Unicode(None, allow_none=True).tag(sync=True)
flex = Unicode(None, allow_none=True).tag(sync=True)
flex_flow = Unicode(None, allow_none=True).tag(sync=True)
height = Unicode(None, allow_none=True).tag(sync=True)
justify_content = Unicode(None, allow_none=True).tag(sync=True)
left = Unicode(None, allow_none=True).tag(sync=True)
margin = Unicode(None, allow_none=True).tag(sync=True)
max_height = Unicode(None, allow_none=True).tag(sync=True)
max_width = Unicode(None, allow_none=True).tag(sync=True)
min_height = Unicode(None, allow_none=True).tag(sync=True)
min_width = Unicode(None, allow_none=True).tag(sync=True)
overflow = Unicode(None, allow_none=True).tag(sync=True)
overflow_x = Unicode(None, allow_none=True).tag(sync=True)
overflow_y = Unicode(None, allow_none=True).tag(sync=True)
order = Unicode(None, allow_none=True).tag(sync=True)
padding = Unicode(None, allow_none=True).tag(sync=True)
right = Unicode(None, allow_none=True).tag(sync=True)
top = Unicode(None, allow_none=True).tag(sync=True)
visibility = Unicode(None, allow_none=True).tag(sync=True)
width = Unicode(None, allow_none=True).tag(sync=True)
class LayoutTraitType(Instance):
klass = Layout
def validate(self, obj, value):
if isinstance(value, dict):
return super(LayoutTraitType, self).validate(obj, Layout(**value))
else:
return super(LayoutTraitType, self).validate(obj, value)
| true | true |
f7347d6f0909c926f14b411d4b55f806cc32f60a | 2,421 | py | Python | config.py | daph/Game2Text | 19c1fa370c55564ba8dd489e46cd567a2d688ff0 | [
"Apache-2.0"
] | 1 | 2021-08-29T06:38:13.000Z | 2021-08-29T06:38:13.000Z | config.py | agloo/Game2Text | 90a5529d537695dd4eff03a0e20a23b5587bef92 | [
"Apache-2.0"
] | null | null | null | config.py | agloo/Game2Text | 90a5529d537695dd4eff03a0e20a23b5587bef92 | [
"Apache-2.0"
] | null | null | null | from configparser import ConfigParser
import os
import platform
OCR_CONFIG = 'OCRCONFIG'
TRANSLATION_CONFIG = 'TRANSLATIONCONFIG'
APPERANCE_CONFIG = 'APPEARANCE'
APP_CONFIG = 'APPCONFIG'
ANKI_CONFIG = 'ANKICONFIG'
LOG_CONFIG = 'LOGCONFIG'
SCRIPT_MATCH_CONFIG = 'SCRIPTMATCHCONFIG'
TEXTHOOKER_CONFIG = 'TEXTHOOKERCONFIG'
HOTKEYS_CONFIG = '$OS_HOTKEYS'
PATHS_CONFIG = 'PATHS'
OS_STRING = '$OS'
#Get the configparser object
# config_object = ConfigParser()
#Path for config file
config_file = os.path.join(os.path.dirname(__file__), 'config.ini')
def get_platform_for_section(section):
platform_names_to_config_os_name = {
'Windows': 'WINDOWS',
'Darwin': 'MAC',
'Linux': 'LINUX'
}
platform_name = platform.system()
return section.replace(OS_STRING, platform_names_to_config_os_name[platform_name])
def r_config(section, key):
if OS_STRING in section:
section = get_platform_for_section(section)
#Read config.ini file
config_object = ConfigParser()
config_object.read(config_file, encoding='utf-8')
#Get the password
section = config_object[section]
return section[key]
def r_config_section(section):
if OS_STRING in section:
section = get_platform_for_section(section)
config_object = ConfigParser()
config_object.read(config_file, encoding='utf-8')
section = dict(config_object[section])
return section
def r_config_all():
config_object = ConfigParser()
config_object.read(config_file, encoding='utf-8')
section_dict = {}
for section in config_object:
if 'WINDOWS' in section or 'MAC' in section or 'LINUX' in section:
continue
section_dict[section] = dict(config_object[section])
# Platform specific config
section_dict[HOTKEYS_CONFIG] = dict(config_object[get_platform_for_section(HOTKEYS_CONFIG)])
return section_dict
def w_config(section, to_update_dict):
if OS_STRING in section:
section = get_platform_for_section(section)
#Read config.ini file
config_object = ConfigParser()
config_object.read("config.ini", encoding='utf-8')
#Get the USERINFO section
section = config_object[section]
#Update the key value
for key, value in to_update_dict.items():
section[key] = value
#Write changes back to file
with open('config.ini', 'w', encoding='utf-8') as conf:
config_object.write(conf) | 30.2625 | 96 | 0.71995 | from configparser import ConfigParser
import os
import platform
OCR_CONFIG = 'OCRCONFIG'
TRANSLATION_CONFIG = 'TRANSLATIONCONFIG'
APPERANCE_CONFIG = 'APPEARANCE'
APP_CONFIG = 'APPCONFIG'
ANKI_CONFIG = 'ANKICONFIG'
LOG_CONFIG = 'LOGCONFIG'
SCRIPT_MATCH_CONFIG = 'SCRIPTMATCHCONFIG'
TEXTHOOKER_CONFIG = 'TEXTHOOKERCONFIG'
HOTKEYS_CONFIG = '$OS_HOTKEYS'
PATHS_CONFIG = 'PATHS'
OS_STRING = '$OS'
config_file = os.path.join(os.path.dirname(__file__), 'config.ini')
def get_platform_for_section(section):
platform_names_to_config_os_name = {
'Windows': 'WINDOWS',
'Darwin': 'MAC',
'Linux': 'LINUX'
}
platform_name = platform.system()
return section.replace(OS_STRING, platform_names_to_config_os_name[platform_name])
def r_config(section, key):
if OS_STRING in section:
section = get_platform_for_section(section)
config_object = ConfigParser()
config_object.read(config_file, encoding='utf-8')
section = config_object[section]
return section[key]
def r_config_section(section):
if OS_STRING in section:
section = get_platform_for_section(section)
config_object = ConfigParser()
config_object.read(config_file, encoding='utf-8')
section = dict(config_object[section])
return section
def r_config_all():
config_object = ConfigParser()
config_object.read(config_file, encoding='utf-8')
section_dict = {}
for section in config_object:
if 'WINDOWS' in section or 'MAC' in section or 'LINUX' in section:
continue
section_dict[section] = dict(config_object[section])
section_dict[HOTKEYS_CONFIG] = dict(config_object[get_platform_for_section(HOTKEYS_CONFIG)])
return section_dict
def w_config(section, to_update_dict):
if OS_STRING in section:
section = get_platform_for_section(section)
config_object = ConfigParser()
config_object.read("config.ini", encoding='utf-8')
section = config_object[section]
for key, value in to_update_dict.items():
section[key] = value
with open('config.ini', 'w', encoding='utf-8') as conf:
config_object.write(conf) | true | true |
f7347dbc6fec75d527f85033b83f2d3ecc839f3b | 442 | py | Python | proyecto/Modificaciones.py | RandyDpoe45/proyectoMultimedial | ca20911ec6ac8cd765bb03dc36b728e97808e3b0 | [
"MIT"
] | null | null | null | proyecto/Modificaciones.py | RandyDpoe45/proyectoMultimedial | ca20911ec6ac8cd765bb03dc36b728e97808e3b0 | [
"MIT"
] | null | null | null | proyecto/Modificaciones.py | RandyDpoe45/proyectoMultimedial | ca20911ec6ac8cd765bb03dc36b728e97808e3b0 | [
"MIT"
] | null | null | null | from cv2 import *
class Modificaciones (object):
def __init__(self,img,segIni,segFin,posx,posy):
self.im=img
self.height, self.width, self.channels = self.im.shape
self.segIni=segIni
self.segFin=segFin
self.posy=posy
self.posx=posx
def pertenece(self,segundo):
if(segundo>=self.segIni and segundo<=self.segFin):
return True
| 27.625 | 63 | 0.576923 | from cv2 import *
class Modificaciones (object):
def __init__(self,img,segIni,segFin,posx,posy):
self.im=img
self.height, self.width, self.channels = self.im.shape
self.segIni=segIni
self.segFin=segFin
self.posy=posy
self.posx=posx
def pertenece(self,segundo):
if(segundo>=self.segIni and segundo<=self.segFin):
return True
| true | true |
f73482af1cc0d86071662c8660889511c4534099 | 8,586 | py | Python | tencentcloud/lp/v20200224/models.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 465 | 2018-04-27T09:54:59.000Z | 2022-03-29T02:18:01.000Z | tencentcloud/lp/v20200224/models.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 91 | 2018-04-27T09:48:11.000Z | 2022-03-12T08:04:04.000Z | tencentcloud/lp/v20200224/models.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 232 | 2018-05-02T08:02:46.000Z | 2022-03-30T08:02:48.000Z | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class QueryLoginProtectionRequest(AbstractModel):
"""QueryLoginProtection请求参数结构体
"""
def __init__(self):
r"""
:param LoginIp: 登录来源的外网 IP。
:type LoginIp: str
:param Uid: 用户 ID 不同的 accountType 对应不同的用户 ID。如果是 QQ,则填入对应的 openid,微信用户则填入对应的 openid/unionid,手机号则填入对应真实用户手机号(如13123456789)。
:type Uid: str
:param LoginTime: 登录时间戳,单位:秒。
:type LoginTime: str
:param AccountType: 用户账号类型(QQ 开放帐号、微信开放账号需要 提交工单 由腾讯云进行资格审核):
1:QQ 开放帐号。
2:微信开放账号。
4:手机号。
0:其他。
10004:手机号 MD5。
:type AccountType: str
:param AppIdU: accountType 是 QQ 或微信开放账号时,该参数必填,表示 QQ 或微信分配给网站或应用的 AppID,用来唯一标识网站或应用。
:type AppIdU: str
:param AssociateAccount: accountType 是 QQ 或微信开放账号时,用于标识 QQ 或微信用户登录后关联业务自身的账号 ID。
:type AssociateAccount: str
:param NickName: 昵称,UTF-8 编码。
:type NickName: str
:param PhoneNumber: 手机号:国家代码-手机号, 如0086-15912345687(0086前不需要+号)。
:type PhoneNumber: str
:param EmailAddress: 用户邮箱地址(非系统自动生成)。
:type EmailAddress: str
:param RegisterTime: 注册来源的外网 IP。
:type RegisterTime: str
:param Address: 地址。
:type Address: str
:param CookieHash: 用户 HTTP 请求中的 cookie 进行2次 hash 的值,只要保证相同 cookie 的 hash 值一致即可。
:type CookieHash: str
:param LoginSource: 登录来源:
0:其他
1:PC 网页
2:移动页面
3:App
4:微信公众号
:type LoginSource: str
:param LoginType: 登录方式:
0:其他
1:手动帐号密码输入
2:动态短信密码登录
3:二维码扫描登录
:type LoginType: str
:param Referer: 用户 HTTP 请求的 referer 值。
:type Referer: str
:param JumpUrl: 登录成功后跳转页面。
:type JumpUrl: str
:param UserAgent: 用户 HTTP 请求的 userAgent。
:type UserAgent: str
:param XForwardedFor: 用户 HTTP 请求中的 x_forward_for。
:type XForwardedFor: str
:param MouseClickCount: 用户操作过程中鼠标单击次数。
:type MouseClickCount: str
:param KeyboardClickCount: 用户操作过程中键盘单击次数。
:type KeyboardClickCount: str
:param Result: 注册结果:
0:失败
1:成功
:type Result: str
:param Reason: 失败原因:
0:其他
1:参数错误
2:帐号冲突
3:验证错误
:type Reason: str
:param LoginSpend: 登录耗时,单位:秒。
:type LoginSpend: str
:param MacAddress: MAC 地址或设备唯一标识。
:type MacAddress: str
:param VendorId: 手机制造商 ID,如果手机注册,请带上此信息。
:type VendorId: str
:param AppVersion: App 客户端版本。
:type AppVersion: str
:param Imei: 手机设备号。
:type Imei: str
:param BusinessId: 业务 ID 网站或应用在多个业务中使用此服务,通过此 ID 区分统计数据。
:type BusinessId: str
:param WxSubType: 1:微信公众号
2:微信小程序
:type WxSubType: str
:param RandNum: Token 签名随机数,微信小程序必填,建议16个字符。
:type RandNum: str
:param WxToken: 如果是微信小程序,该字段为以 ssesion_key 为 key 去签名随机数radnNum得到的值(hmac_sha256 签名算法)。
如果是微信公众号或第三方登录,则为授权的 access_token(注意:不是普通 access_token,具体看 微信官方文档)。
:type WxToken: str
"""
self.LoginIp = None
self.Uid = None
self.LoginTime = None
self.AccountType = None
self.AppIdU = None
self.AssociateAccount = None
self.NickName = None
self.PhoneNumber = None
self.EmailAddress = None
self.RegisterTime = None
self.Address = None
self.CookieHash = None
self.LoginSource = None
self.LoginType = None
self.Referer = None
self.JumpUrl = None
self.UserAgent = None
self.XForwardedFor = None
self.MouseClickCount = None
self.KeyboardClickCount = None
self.Result = None
self.Reason = None
self.LoginSpend = None
self.MacAddress = None
self.VendorId = None
self.AppVersion = None
self.Imei = None
self.BusinessId = None
self.WxSubType = None
self.RandNum = None
self.WxToken = None
def _deserialize(self, params):
self.LoginIp = params.get("LoginIp")
self.Uid = params.get("Uid")
self.LoginTime = params.get("LoginTime")
self.AccountType = params.get("AccountType")
self.AppIdU = params.get("AppIdU")
self.AssociateAccount = params.get("AssociateAccount")
self.NickName = params.get("NickName")
self.PhoneNumber = params.get("PhoneNumber")
self.EmailAddress = params.get("EmailAddress")
self.RegisterTime = params.get("RegisterTime")
self.Address = params.get("Address")
self.CookieHash = params.get("CookieHash")
self.LoginSource = params.get("LoginSource")
self.LoginType = params.get("LoginType")
self.Referer = params.get("Referer")
self.JumpUrl = params.get("JumpUrl")
self.UserAgent = params.get("UserAgent")
self.XForwardedFor = params.get("XForwardedFor")
self.MouseClickCount = params.get("MouseClickCount")
self.KeyboardClickCount = params.get("KeyboardClickCount")
self.Result = params.get("Result")
self.Reason = params.get("Reason")
self.LoginSpend = params.get("LoginSpend")
self.MacAddress = params.get("MacAddress")
self.VendorId = params.get("VendorId")
self.AppVersion = params.get("AppVersion")
self.Imei = params.get("Imei")
self.BusinessId = params.get("BusinessId")
self.WxSubType = params.get("WxSubType")
self.RandNum = params.get("RandNum")
self.WxToken = params.get("WxToken")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class QueryLoginProtectionResponse(AbstractModel):
"""QueryLoginProtection返回参数结构体
"""
def __init__(self):
r"""
:param CodeDesc: AssociateAccount
accountType 是 QQ 或微信开放账号时,用于标识 QQ 或微信用户登录后关联业务自身的账号 ID。
LoginTime
操作时间。
Uid
用户 ID 不同的 accountType 对应不同的用户 ID。如果是 QQ,则填入对应的 openid,微信用户则填入对应的 openid/unionid,手机号则填入对应真实用户手机号(如13123456789)。
LoginIp
登录 IP。
Level
0:表示无恶意。
1 - 4:恶意等级由低到高。
RiskType
风险类型。
出参不用填"Req业务侧错误码。成功时返回 Success,错误时返回具体业务错误原因。uestId"等公共出参, 详细解释>>>
注意:此字段可能返回 null,表示取不到有效值。
:type CodeDesc: str
:param AssociateAccount: accountType 是 QQ 或微信开放账号时,用于标识 QQ 或微信用户登录后关联业务自身的账号 ID。
注意:此字段可能返回 null,表示取不到有效值。
:type AssociateAccount: str
:param LoginTime: 操作时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LoginTime: str
:param Uid: 用户 ID 不同的 accountType 对应不同的用户 ID。如果是 QQ,则填入对应的 openid,微信用户则填入对应的 openid/unionid,手机号则填入对应真实用户手机号(如13123456789)。
注意:此字段可能返回 null,表示取不到有效值。
:type Uid: str
:param LoginIp: 登录 IP。
注意:此字段可能返回 null,表示取不到有效值。
:type LoginIp: str
:param Level: 0:表示无恶意。
1 - 4:恶意等级由低到高。
:type Level: int
:param RiskType: 风险类型。
:type RiskType: list of int
:param RootId: accountType 是 QQ 或微信开放账号时,用于标识 QQ 或微信用户登录后关联业务自身的账号 ID。
注意:此字段可能返回 null,表示取不到有效值。
:type RootId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CodeDesc = None
self.AssociateAccount = None
self.LoginTime = None
self.Uid = None
self.LoginIp = None
self.Level = None
self.RiskType = None
self.RootId = None
self.RequestId = None
def _deserialize(self, params):
self.CodeDesc = params.get("CodeDesc")
self.AssociateAccount = params.get("AssociateAccount")
self.LoginTime = params.get("LoginTime")
self.Uid = params.get("Uid")
self.LoginIp = params.get("LoginIp")
self.Level = params.get("Level")
self.RiskType = params.get("RiskType")
self.RootId = params.get("RootId")
self.RequestId = params.get("RequestId") | 33.150579 | 130 | 0.643722 |
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class QueryLoginProtectionRequest(AbstractModel):
def __init__(self):
self.LoginIp = None
self.Uid = None
self.LoginTime = None
self.AccountType = None
self.AppIdU = None
self.AssociateAccount = None
self.NickName = None
self.PhoneNumber = None
self.EmailAddress = None
self.RegisterTime = None
self.Address = None
self.CookieHash = None
self.LoginSource = None
self.LoginType = None
self.Referer = None
self.JumpUrl = None
self.UserAgent = None
self.XForwardedFor = None
self.MouseClickCount = None
self.KeyboardClickCount = None
self.Result = None
self.Reason = None
self.LoginSpend = None
self.MacAddress = None
self.VendorId = None
self.AppVersion = None
self.Imei = None
self.BusinessId = None
self.WxSubType = None
self.RandNum = None
self.WxToken = None
def _deserialize(self, params):
self.LoginIp = params.get("LoginIp")
self.Uid = params.get("Uid")
self.LoginTime = params.get("LoginTime")
self.AccountType = params.get("AccountType")
self.AppIdU = params.get("AppIdU")
self.AssociateAccount = params.get("AssociateAccount")
self.NickName = params.get("NickName")
self.PhoneNumber = params.get("PhoneNumber")
self.EmailAddress = params.get("EmailAddress")
self.RegisterTime = params.get("RegisterTime")
self.Address = params.get("Address")
self.CookieHash = params.get("CookieHash")
self.LoginSource = params.get("LoginSource")
self.LoginType = params.get("LoginType")
self.Referer = params.get("Referer")
self.JumpUrl = params.get("JumpUrl")
self.UserAgent = params.get("UserAgent")
self.XForwardedFor = params.get("XForwardedFor")
self.MouseClickCount = params.get("MouseClickCount")
self.KeyboardClickCount = params.get("KeyboardClickCount")
self.Result = params.get("Result")
self.Reason = params.get("Reason")
self.LoginSpend = params.get("LoginSpend")
self.MacAddress = params.get("MacAddress")
self.VendorId = params.get("VendorId")
self.AppVersion = params.get("AppVersion")
self.Imei = params.get("Imei")
self.BusinessId = params.get("BusinessId")
self.WxSubType = params.get("WxSubType")
self.RandNum = params.get("RandNum")
self.WxToken = params.get("WxToken")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class QueryLoginProtectionResponse(AbstractModel):
def __init__(self):
self.CodeDesc = None
self.AssociateAccount = None
self.LoginTime = None
self.Uid = None
self.LoginIp = None
self.Level = None
self.RiskType = None
self.RootId = None
self.RequestId = None
def _deserialize(self, params):
self.CodeDesc = params.get("CodeDesc")
self.AssociateAccount = params.get("AssociateAccount")
self.LoginTime = params.get("LoginTime")
self.Uid = params.get("Uid")
self.LoginIp = params.get("LoginIp")
self.Level = params.get("Level")
self.RiskType = params.get("RiskType")
self.RootId = params.get("RootId")
self.RequestId = params.get("RequestId") | true | true |
f734831f13dffdba489d2f15e75f93d43e6b312c | 2,108 | py | Python | path2insight/utils.py | armell/path2insight | 63577bab69794f06f5c0fbaa8e7b1c0f67d7b7cf | [
"MIT"
] | 2 | 2018-02-14T14:22:53.000Z | 2018-02-14T14:53:41.000Z | path2insight/utils.py | armell/path2insight | 63577bab69794f06f5c0fbaa8e7b1c0f67d7b7cf | [
"MIT"
] | null | null | null | path2insight/utils.py | armell/path2insight | 63577bab69794f06f5c0fbaa8e7b1c0f67d7b7cf | [
"MIT"
] | 1 | 2020-04-13T09:56:18.000Z | 2020-04-13T09:56:18.000Z | import sys
from collections import Iterable
from path2insight import WindowsFilePath, PosixFilePath
PATH_OBJECT_TYPES = (WindowsFilePath, PosixFilePath)
# ----------------------------------------------------
class VisibleDeprecationWarning(UserWarning):
"""Visible deprecation warning.
Based on numpy's VisibleDeprecationWarning.
"""
pass
def MissingDependencyError(Exception):
"""Optional dependency not available."""
pass
def _import_jellyfish():
"""Check if jellyfish is installed."""
try:
from jellyfish import levenshtein_distance as lv
return lv
except ModuleNotFoundError:
raise MissingDependencyError(
"Install the module 'jellyfish' to compute string distances.")
def iteritems(d):
"""Python 2, 3 compatibility."""
try:
return d.items()
except AttributeError:
return d.iteritems()
def unique(l):
"""Return a list with unique elements"""
return list(set(l))
# ----------------------------------------------------
# the following path is largely based / taken from the six module and pandas
PY3 = (sys.version_info[0] >= 3)
if PY3:
string_types = str,
binary_type = bytes
else:
string_types = basestring,
binary_type = str
string_and_binary_types = (string_types,) + (binary_type,)
def is_list_like(obj):
"""
Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
obj : The object to check.
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_list_like([1, 2, 3])
True
>>> is_list_like({1, 2, 3})
True
>>> is_list_like(datetime(2017, 1, 1))
False
>>> is_list_like("foo")
False
>>> is_list_like(1)
False
"""
return (isinstance(obj, Iterable) and
not isinstance(obj, string_types + (binary_type,)))
| 22.189474 | 76 | 0.621442 | import sys
from collections import Iterable
from path2insight import WindowsFilePath, PosixFilePath
PATH_OBJECT_TYPES = (WindowsFilePath, PosixFilePath)
class VisibleDeprecationWarning(UserWarning):
pass
def MissingDependencyError(Exception):
pass
def _import_jellyfish():
try:
from jellyfish import levenshtein_distance as lv
return lv
except ModuleNotFoundError:
raise MissingDependencyError(
"Install the module 'jellyfish' to compute string distances.")
def iteritems(d):
try:
return d.items()
except AttributeError:
return d.iteritems()
def unique(l):
return list(set(l))
PY3 = (sys.version_info[0] >= 3)
if PY3:
string_types = str,
binary_type = bytes
else:
string_types = basestring,
binary_type = str
string_and_binary_types = (string_types,) + (binary_type,)
def is_list_like(obj):
return (isinstance(obj, Iterable) and
not isinstance(obj, string_types + (binary_type,)))
| true | true |
f734838dd63e5c9ccb5e77e1cad16fd540101498 | 351,709 | py | Python | catboost/pytest/test.py | thundergolfer/catboost | 60942dee40f1407466d0b1e486f0a1d445e6aa91 | [
"Apache-2.0"
] | null | null | null | catboost/pytest/test.py | thundergolfer/catboost | 60942dee40f1407466d0b1e486f0a1d445e6aa91 | [
"Apache-2.0"
] | null | null | null | catboost/pytest/test.py | thundergolfer/catboost | 60942dee40f1407466d0b1e486f0a1d445e6aa91 | [
"Apache-2.0"
] | null | null | null | from itertools import permutations
import yatest.common
from yatest.common import ExecutionTimeoutError, ExecutionError
import pytest
import os
import filecmp
import numpy as np
import pandas as pd
import timeit
import json
import catboost
from catboost_pytest_lib import (
apply_catboost,
compare_evals_with_precision,
compare_fit_evals_with_precision,
compare_evals,
data_file,
execute_catboost_fit,
execute_dist_train,
format_crossvalidation,
generate_concatenated_random_labeled_dataset,
get_catboost_binary_path,
get_limited_precision_dsv_diff_tool,
local_canonical_file,
permute_dataset_columns,
remove_time_from_json,
)
CATBOOST_PATH = yatest.common.binary_path("catboost/app/catboost")
BOOSTING_TYPE = ['Ordered', 'Plain']
GROW_POLICIES = ['SymmetricTree', 'Lossguide', 'Depthwise']
BOOSTING_TYPE_WITH_GROW_POLICIES = [('Ordered', 'SymmetricTree'), ('Plain', 'SymmetricTree'),
('Plain', 'Lossguide'), ('Plain', 'Depthwise')]
PREDICTION_TYPES = ['Probability', 'RawFormulaVal', 'Class']
BINCLASS_LOSSES = ['Logloss', 'CrossEntropy']
MULTICLASS_LOSSES = ['MultiClass', 'MultiClassOneVsAll']
CLASSIFICATION_LOSSES = BINCLASS_LOSSES + MULTICLASS_LOSSES
REGRESSION_LOSSES = ['MAE', 'MAPE', 'Poisson', 'Quantile', 'RMSE', 'RMSEWithUncertainty', 'LogLinQuantile', 'Lq']
PAIRWISE_LOSSES = ['PairLogit', 'PairLogitPairwise']
GROUPWISE_LOSSES = ['YetiRank', 'YetiRankPairwise', 'QueryRMSE', 'QuerySoftMax']
RANKING_LOSSES = PAIRWISE_LOSSES + GROUPWISE_LOSSES
ALL_LOSSES = CLASSIFICATION_LOSSES + REGRESSION_LOSSES + RANKING_LOSSES
SAMPLING_UNIT_TYPES = ['Object', 'Group']
OVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']
LOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile',
'Poisson', 'MAPE', 'MultiClass', 'MultiClassOneVsAll']
LEAF_ESTIMATION_METHOD = ['Gradient', 'Newton']
# test both parallel in and non-parallel modes
# default block size (5000000) is too big to run in parallel on these tests
SCORE_CALC_OBJ_BLOCK_SIZES = ['60', '5000000']
SCORE_CALC_OBJ_BLOCK_SIZES_IDS = ['calc_block=60', 'calc_block=5000000']
SEPARATOR_TYPES = [
'ByDelimiter',
'BySense',
]
TEXT_FEATURE_ESTIMATORS = [
'BoW',
'NaiveBayes',
'BM25',
'BoW,NaiveBayes',
'BoW,NaiveBayes,BM25'
]
ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE = data_file('rotten_tomatoes_small_with_embeddings', 'train')
ROTTEN_TOMATOES_WITH_EMBEDDINGS_CD_BINCLASS_FILE = data_file(
'rotten_tomatoes_small_with_embeddings',
'cd_binclass'
)
ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE = data_file(
'rotten_tomatoes_small_with_embeddings',
'cd_binclass_only_embeddings'
)
def diff_tool(threshold=None):
return get_limited_precision_dsv_diff_tool(threshold, True)
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv_multiregression(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiRMSE',
'-f', data_file('multiregression', 'train'),
'--column-description', data_file('multiregression', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--cv', format_crossvalidation(is_inverted, 2, 10),
'--cv-rand', '42',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_multiregression(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MultiRMSE',
pool='multiregression',
train='train',
test='test',
cd='train.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--boost-from-average', '0'))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_multiregression_single(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MultiRMSE',
pool='multiregression',
train='train',
test='test',
cd='train_single.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--boost-from-average', '0'))))]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('n_trees', [100, 500])
def test_multiregression(boosting_type, grow_policy, n_trees):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_calc_path = yatest.common.test_output_path('test.calc')
output_metric_path = yatest.common.test_output_path('test.metric')
cmd_fit = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', data_file('multiregression', 'train'),
'-t', data_file('multiregression', 'test'),
'--column-description', data_file('multiregression', 'train.cd'),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--grow-policy', grow_policy
)
execute_catboost_fit('CPU', cmd_fit)
cmd_calc = (
CATBOOST_PATH,
'calc',
'--column-description', data_file('multiregression', 'train.cd'),
'-T', '4',
'-m', output_model_path,
'--input-path', data_file('multiregression', 'test'),
'-o', output_calc_path
)
yatest.common.execute(cmd_calc)
cmd_metric = (
CATBOOST_PATH,
'eval-metrics',
'--column-description', data_file('multiregression', 'train.cd'),
'-T', '4',
'-m', output_model_path,
'--input-path', data_file('multiregression', 'test'),
'-o', output_metric_path,
'--metrics', 'MultiRMSE'
)
yatest.common.execute(cmd_metric)
return [
local_canonical_file(output_eval_path),
local_canonical_file(output_calc_path),
local_canonical_file(output_metric_path)
]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('n_trees', [100, 500])
@pytest.mark.parametrize('target_count', [1, 2, 3])
def test_multiregression_target_permutation_invariance(boosting_type, n_trees, target_count):
np.random.seed(42)
X_COUNT = 200
X_DIM = 5
x = np.random.randn(X_COUNT, X_DIM)
y = np.stack([
np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))
for i in range(target_count)
], axis=1)
test_size = X_COUNT // 2
x_test, y_test = x[:test_size], y[:test_size]
x_train, y_train = x[test_size:], y[test_size:]
train_file = yatest.common.test_output_path('train')
test_file = yatest.common.test_output_path('test')
get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))
get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))
get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))
with open(get_cd_path(target_count), 'w') as cd:
cd.write(''.join(('{}\tTarget\tm\n'.format(i) for i in range(target_count))))
evals = []
for perm in permutations(range(target_count)):
inv_perm = range(target_count)
for i, j in enumerate(perm):
inv_perm[j] = i
np.savetxt(train_file, np.hstack([y_train[:, perm], x_train]), delimiter='\t')
np.savetxt(test_file, np.hstack([y_test[:, perm], x_test]), delimiter='\t')
fit_cmd = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', train_file,
'-t', test_file,
'--column-description', get_cd_path(target_count),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', get_model_path(target_count),
'--eval-file', get_eval_path(target_count),
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', fit_cmd)
eval = np.loadtxt(get_eval_path(target_count), delimiter='\t', skiprows=1, usecols=range(1, target_count + 1)).reshape((-1, target_count))
evals.append(eval[:, inv_perm])
for eva in evals:
assert np.allclose(eva, evals[0])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('n_trees', [10, 100, 1000])
@pytest.mark.parametrize('target_count', [1, 2, 3])
def test_compare_multiregression_with_regression(boosting_type, n_trees, target_count):
np.random.seed(42)
ERR_PERC = 0.1
X_COUNT = 200
X_DIM = 5
x = np.random.randn(X_COUNT, X_DIM)
y = np.stack([
np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))
for i in range(target_count)
], axis=1)
test_size = X_COUNT // 2
x_test, y_test = x[:test_size], y[:test_size]
x_train, y_train = x[test_size:], y[test_size:]
train_file = yatest.common.test_output_path('train')
test_file = yatest.common.test_output_path('test')
np.savetxt(train_file, np.hstack([y_train, x_train]), delimiter='\t')
np.savetxt(test_file, np.hstack([y_test, x_test]), delimiter='\t')
get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))
get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))
get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))
with open(get_cd_path(target_count), 'w') as cd:
cd.write(''.join(('{}\tTarget\tm\n'.format(i) for i in range(target_count))))
fit_cmd = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', train_file,
'-t', test_file,
'--column-description', get_cd_path(target_count),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', get_model_path(target_count),
'--eval-file', get_eval_path(target_count),
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', fit_cmd)
for i in range(target_count):
with open(get_cd_path(i), 'w') as cd:
cd.write(''.join((('{}\tTarget\n'.format(j) if j == i else '{}\tAuxiliary\n'.format(j)) for j in range(target_count))))
rmse_fit_cmd = (
'--loss-function', 'RMSE',
'--boosting-type', boosting_type,
'-f', train_file,
'-t', test_file,
'--column-description', get_cd_path(i),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', get_model_path(i),
'--eval-file', get_eval_path(i),
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', rmse_fit_cmd)
multirmse_eval = np.loadtxt(get_eval_path(target_count), delimiter='\t', skiprows=1, usecols=range(1, target_count + 1))
rmse_eval = np.stack([
np.loadtxt(get_eval_path(i), delimiter='\t', skiprows=1, usecols=1)
for i in range(target_count)
], axis=1)
# cannot compare approxes because they are very different due to different boosting algorithms
multi_rmse_loss = np.mean((multirmse_eval - y_test)**2)
rmse_loss = np.mean((rmse_eval - y_test)**2)
assert rmse_loss.shape == multi_rmse_loss.shape
assert multi_rmse_loss < rmse_loss * (1 + ERR_PERC)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('n_trees', [100, 500])
def test_multiregression_single(boosting_type, n_trees):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_calc_path = yatest.common.test_output_path('test.calc')
output_metric_path = yatest.common.test_output_path('test.metric')
cmd_fit = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', data_file('multiregression', 'train'),
'-t', data_file('multiregression', 'test'),
'--column-description', data_file('multiregression', 'train_single.cd'),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd_fit)
cmd_calc = (
CATBOOST_PATH,
'calc',
'--column-description', data_file('multiregression', 'train_single.cd'),
'-T', '4',
'-m', output_model_path,
'--input-path', data_file('multiregression', 'test'),
'-o', output_calc_path
)
yatest.common.execute(cmd_calc)
cmd_metric = (
CATBOOST_PATH,
'eval-metrics',
'--column-description', data_file('multiregression', 'train_single.cd'),
'-T', '4',
'-m', output_model_path,
'--input-path', data_file('multiregression', 'test'),
'-o', output_metric_path,
'--metrics', 'MultiRMSE'
)
yatest.common.execute(cmd_metric)
return [
local_canonical_file(output_eval_path),
local_canonical_file(output_calc_path),
local_canonical_file(output_metric_path)
]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('n_trees', [100, 500])
def test_multiregression_with_cat_features(boosting_type, n_trees):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd_fit = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', data_file('multiregression', 'train'),
'-t', data_file('multiregression', 'test'),
'--column-description', data_file('multiregression', 'train_with_cat_features.cd'),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd_fit)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_queryrmse(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--grow-policy', grow_policy
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_queryrmse_newton_gradient(boosting_type, dev_score_calc_obj_block_size):
newton_eval_path = yatest.common.test_output_path('newton.eval')
gradient_eval_path = yatest.common.test_output_path('gradient.eval')
def run_catboost(eval_path, leaf_estimation_method):
cmd = [
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'--leaf-estimation-method', leaf_estimation_method,
'-i', '20',
'-T', '4',
'--eval-file', eval_path,
'--use-best-model', 'false',
]
execute_catboost_fit('CPU', cmd)
run_catboost(newton_eval_path, 'Newton')
run_catboost(gradient_eval_path, 'Gradient')
assert filecmp.cmp(newton_eval_path, gradient_eval_path)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_pool_with_QueryId(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.query_id'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--grow-policy', grow_policy
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_rmse_on_qwise_pool(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--grow-policy', grow_policy
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_averagegain(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'AverageGain:top=2;hints=skip_train~false',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_queryauc(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'QueryAUC:hints=skip_train~false',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_queryaverage(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'QueryAverage:top=2;hints=skip_train~false',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('sigma', ['sigma=' + str(sigma) for sigma in [0.01, 1, 10]])
@pytest.mark.parametrize('num_estimations', ['num_estimations=' + str(n_estim) for n_estim in [1, 100]])
def test_stochastic_filter(sigma, num_estimations):
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('pool.cd')
train_path = yatest.common.test_output_path('train.txt')
test_path = yatest.common.test_output_path('test.txt')
prng = np.random.RandomState(seed=0)
n_samples_by_query = 20
n_features = 10
n_queries = 50
n_samples = n_samples_by_query * n_queries
features = prng.uniform(0, 1, size=(n_samples, n_features))
weights = prng.uniform(0, 1, size=n_features)
labels = np.dot(features, weights)
query_ids = np.arange(0, n_samples) // n_queries
money = (n_queries - np.arange(0, n_samples) % n_queries) * 10
labels = labels.reshape((n_samples, 1))
query_ids = query_ids.reshape((n_samples, 1))
money = money.reshape((n_samples, 1))
features = np.hstack((labels, query_ids, money, features))
n_learn = int(0.7 * n_samples)
learn = features[:n_learn, :]
test = features[n_learn:, :]
np.savetxt(train_path, learn, fmt='%.5f', delimiter='\t')
np.savetxt(test_path, test, fmt='%.5f', delimiter='\t')
np.savetxt(cd_path, [[0, 'Target'], [1, 'GroupId']], fmt='%s', delimiter='\t')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_one_thread_path = yatest.common.test_output_path('learn_error_one_thread.tsv')
test_error_one_thread_path = yatest.common.test_output_path('test_error_one_thread.tsv')
loss_description = 'StochasticFilter:' + sigma + ';' + num_estimations
cmd = [
'--loss-function', loss_description,
'--leaf-estimation-backtracking', 'No',
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'--boosting-type', 'Plain',
'-i', '20',
'-m', model_path,
'--use-best-model', 'false',
]
cmd_one_thread = cmd + [
'--learn-err-log', learn_error_one_thread_path,
'--test-err-log', test_error_one_thread_path,
'-T', '1'
]
cmd_four_thread = cmd + [
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'-T', '4'
]
execute_catboost_fit('CPU', cmd_one_thread)
execute_catboost_fit('CPU', cmd_four_thread)
compare_evals(learn_error_one_thread_path, learn_error_path)
compare_evals(test_error_one_thread_path, test_error_path)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path)]
@pytest.mark.parametrize('metric', ['DCG', 'NDCG'])
@pytest.mark.parametrize('top', [-1, 1, 10])
@pytest.mark.parametrize('dcg_type', ['Base', 'Exp'])
@pytest.mark.parametrize('denominator', ['Position', 'LogPosition'])
def test_stochastic_rank(metric, top, dcg_type, denominator):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
loss = 'StochasticRank:metric={};top={};type={};denominator={};hints=skip_train~false'.format(
metric, top, dcg_type, denominator)
cmd = (
'--loss-function', loss,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--cd', data_file('querywise', 'train.cd.query_id'),
'-i', '10',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path)]
@pytest.mark.parametrize('top', [-1, 1, 10])
@pytest.mark.parametrize('decay', [1.0, 0.6, 0.0])
def test_stochastic_rank_pfound(top, decay):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)
cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', loss,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--cd', data_file('querywise', 'train.cd.query_id'),
'-i', '10',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path
)
yatest.common.execute(cmd)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path)]
@pytest.mark.parametrize('top', [-1, 1, 10])
@pytest.mark.parametrize('decay', [1.0, 0.6, 0.0])
def test_stochastic_rank_pfound_with_many_ones(top, decay):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)
np.random.seed(0)
train_with_ones = yatest.common.test_output_path('train_with_ones')
TARGET_COLUMN = 2
with open(data_file('querywise', 'train')) as fin:
with open(train_with_ones, 'w') as fout:
for line in fin.readlines():
if np.random.random() < 0.25:
parts = line.split('\t')
parts[TARGET_COLUMN] = '1.0'
line = '\t'.join(parts)
fout.write(line)
cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', loss,
'-f', train_with_ones,
'--cd', data_file('querywise', 'train.cd.query_id'),
'-i', '10',
'--learn-err-log', learn_error_path
)
yatest.common.execute(cmd)
return [local_canonical_file(learn_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('top', [2, 100])
def test_averagegain_with_query_weights(boosting_type, top):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.group_weight'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'--custom-metric', 'AverageGain:top={};hints=skip_train~false'.format(top),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('top_size', [2, 5, 10, -1])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('cd_file', ['train.cd', 'train.cd.subgroup_id'])
def test_pfound(top_size, boosting_type, cd_file):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', cd_file),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'PFound:top={};hints=skip_train~false'.format(top_size),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_params_ordering():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
learn_error_reversed_path = yatest.common.test_output_path('learn_error_reversed.tsv')
test_error_path = yatest.common.test_output_path('ignored.tsv')
def get_cmd(custom_metric, learn_error_path):
return (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '20',
'-T', '4',
'--custom-metric', custom_metric,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', get_cmd("PFound:top=1;decay=0.6;hints=skip_train~false", learn_error_path))
execute_catboost_fit('CPU', get_cmd("PFound:decay=0.6;top=1;hints=skip_train~false", learn_error_reversed_path))
with open(learn_error_path) as f:
assert 'PFound:top=1;decay=0.6' in f.read()
with open(learn_error_reversed_path) as f:
assert 'PFound:decay=0.6;top=1' in f.read()
def test_recall_at_k():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '10',
'-T', '4',
'--custom-metric', 'RecallAt:top=3',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_precision_at_k():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '10',
'-T', '4',
'--custom-metric', 'PrecisionAt:top=3',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_mapk(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'MAP:top={}'.format(10),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('ndcg_power_mode', ['Base', 'Exp'])
@pytest.mark.parametrize('metric_type', ['DCG', 'NDCG'])
@pytest.mark.parametrize('ndcg_denominator', ['None', 'LogPosition', 'Position'])
def test_ndcg(boosting_type, ndcg_power_mode, metric_type, ndcg_denominator):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
denominator = '' if ndcg_denominator == 'None' else ';denominator={}'.format(ndcg_denominator)
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', '{}:top={};type={};hints=skip_train~false{}'.format(metric_type, 10, ndcg_power_mode, denominator),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_queryrmse_approx_on_full_history():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--approx-on-full-history',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--boosting-type', 'Ordered',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_pairlogit(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
def run_catboost(eval_path, learn_pairs):
cmd = [
'--loss-function', 'PairLogit',
'--eval-metric', 'PairAccuracy',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', learn_pairs),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'--ctr', 'Borders,Counter',
'--l2-leaf-reg', '0',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
]
execute_catboost_fit('CPU', cmd)
run_catboost(output_eval_path, 'train.pairs')
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(output_eval_path)]
def test_pairs_generation():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
def run_catboost(eval_path):
cmd = [
'--loss-function', 'PairLogit',
'--eval-metric', 'PairAccuracy',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--ctr', 'Borders,Counter',
'--l2-leaf-reg', '0',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
]
execute_catboost_fit('CPU', cmd)
run_catboost(output_eval_path)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(output_eval_path)]
def test_pairs_generation_with_max_pairs():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
def run_catboost(eval_path):
cmd = [
'--loss-function', 'PairLogit:max_pairs=30',
'--eval-metric', 'PairLogit:max_pairs=30',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--ctr', 'Borders,Counter',
'--l2-leaf-reg', '0',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--fstr-file', output_fstr_path,
]
execute_catboost_fit('CPU', cmd)
run_catboost(output_eval_path)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(output_eval_path),
local_canonical_file(output_fstr_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_pairlogit_no_target(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'PairLogit',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.no_target'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_pairlogit_approx_on_full_history():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'PairLogit',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--approx-on-full-history',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--boosting-type', 'Ordered',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
@pytest.mark.parametrize('pairs_file', ['train.pairs', 'train.pairs.weighted'])
def test_pairlogit_pairwise(pairs_file, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'PairLogitPairwise',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_yetirank(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'YetiRank',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])
def test_pairwise_reproducibility(loss_function):
def run_catboost(threads, model_path, eval_path):
cmd = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '5',
'-T', str(threads),
'-m', model_path,
'--eval-file', eval_path,
]
execute_catboost_fit('CPU', cmd)
model_1 = yatest.common.test_output_path('model_1.bin')
eval_1 = yatest.common.test_output_path('test_1.eval')
run_catboost(1, model_1, eval_1)
model_4 = yatest.common.test_output_path('model_4.bin')
eval_4 = yatest.common.test_output_path('test_4.eval')
run_catboost(4, model_4, eval_4)
assert filecmp.cmp(eval_1, eval_4)
def test_pairs_vs_grouped_pairs():
output_model_path = yatest.common.test_output_path('model.bin')
def run_catboost(learn_pairs_path_with_scheme, test_pairs_path_with_scheme, eval_path):
cmd = [
'--loss-function', 'PairLogit',
'--eval-metric', 'PairAccuracy',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', learn_pairs_path_with_scheme,
'--test-pairs', test_pairs_path_with_scheme,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
'--use-best-model', 'false',
]
execute_catboost_fit('CPU', cmd)
eval_path_ungrouped = yatest.common.test_output_path('test_eval_ungrouped')
run_catboost(
data_file('querywise', 'train.pairs'),
data_file('querywise', 'test.pairs'),
eval_path_ungrouped
)
eval_path_grouped = yatest.common.test_output_path('test_eval_grouped')
run_catboost(
'dsv-grouped://' + data_file('querywise', 'train.grouped_pairs'),
'dsv-grouped://' + data_file('querywise', 'test.grouped_pairs'),
eval_path_grouped
)
assert filecmp.cmp(eval_path_ungrouped, eval_path_grouped)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_yetirank_with_params(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'YetiRank:permutations=5;decay=0.9',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_yetirank_pairwise(dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'YetiRankPairwise',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', ('YetiRank', 'YetiRankPairwise'))
def test_yetirank_default_metric(loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', loss_function,
'--has-header',
'-f', data_file('black_friday', 'train'),
'-t', data_file('black_friday', 'test'),
'--column-description', data_file('black_friday', 'cd'),
'--model-file', output_model_path,
'--boosting-type', 'Plain',
'-i', '5',
'-T', '4',
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(test_error_path)]
@pytest.mark.parametrize('eval_metric', ['MRR', 'MRR:top=1', 'ERR', 'ERR:top=1'])
def test_reciprocal_rank_metrics(eval_metric):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'YetiRank',
'--eval-metric', eval_metric,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.query_id'),
'--boosting-type', 'Plain',
'-i', '20',
'-T', '4',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
NAN_MODE = ['Min', 'Max']
@pytest.mark.parametrize('nan_mode', NAN_MODE)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_nan_mode(nan_mode, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'-f', data_file('adult_nan', 'train_small'),
'-t', data_file('adult_nan', 'test_small'),
'--column-description', data_file('adult_nan', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--nan-mode', nan_mode,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult_nan', 'test_small'),
'--column-description', data_file('adult_nan', 'train.cd'),
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert (compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('border_count', [64, 255, 350, 1000, 2500])
def test_different_border_count(border_count):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
train_path = data_file('querywise', 'train')
test_path = data_file('querywise', 'test')
cd_path = data_file('querywise', 'train.cd')
cmd = (
'--use-best-model', 'false',
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '20',
'-T', '4',
'-x', str(border_count),
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert (compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_nan_mode_forbidden(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--nan-mode', 'Forbidden',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_overfit_detector_iter(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '2000',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.5',
'--rsm', '1',
'--od-type', 'Iter',
'--od-wait', '2',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_overfit_detector_inc_to_dec(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '2000',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.5',
'--rsm', '1',
'--od-pval', '0.5',
'--od-type', 'IncToDec',
'--od-wait', '2',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('overfitting_detector_type', OVERFITTING_DETECTOR_TYPE)
def test_overfit_detector_with_resume_from_snapshot(boosting_type, grow_policy, overfitting_detector_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
snapshot_path = yatest.common.test_output_path('snapshot')
cmd_prefix = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.5',
'--rsm', '1',
'--leaf-estimation-iterations', '10',
'--max-ctr-complexity', '4',
'--snapshot-file', snapshot_path,
'--od-type', overfitting_detector_type
)
if overfitting_detector_type == 'IncToDec':
cmd_prefix += (
'--od-wait', '2',
'--od-pval', '0.5'
)
elif overfitting_detector_type == 'Iter':
cmd_prefix += ('--od-wait', '2')
cmd_first = cmd_prefix + ('-i', '10')
execute_catboost_fit('CPU', cmd_first)
cmd_second = cmd_prefix + ('-i', '2000')
execute_catboost_fit('CPU', cmd_second)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('leaf_estimation_method', LEAF_ESTIMATION_METHOD)
def test_per_object_approx_on_full_history(leaf_estimation_method):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', 'Ordered',
'--approx-on-full-history',
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-w', '0.5',
'--od-pval', '0.99',
'--rsm', '1',
'--leaf-estimation-method', leaf_estimation_method,
'--leaf-estimation-iterations', '20',
'--use-best-model', 'false')
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_shrink_model(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '1',
'--od-pval', '0.99',
'--rsm', '1',
'--use-best-model', 'true'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('leaf_estimation_method', LEAF_ESTIMATION_METHOD)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_multi_leaf_estimation_method(leaf_estimation_method, boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'MultiClass',
'-f', data_file('cloudness_small', 'train_small'),
'-t', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', leaf_estimation_method,
'--leaf-estimation-iterations', '2',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
LOSS_FUNCTIONS_SHORT = ['Logloss', 'MultiClass']
@pytest.mark.parametrize(
'loss_function',
LOSS_FUNCTIONS_SHORT,
ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]
)
@pytest.mark.parametrize(
'column_name',
['doc_id', 'sample_id'],
ids=['column_name=doc_id', 'column_name=sample_id']
)
def test_sample_id(loss_function, column_name):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
column_description = data_file('adult_' + column_name, 'train.cd')
cmd = (
'--loss-function', loss_function,
'-f', data_file('adult_doc_id', 'train'),
'-t', data_file('adult_doc_id', 'test'),
'--column-description', column_description,
'--boosting-type', 'Plain',
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult_doc_id', 'test'),
'--column-description', column_description,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(cmd)
assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
POOLS = ['amazon', 'adult']
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_apply_missing_vals(boosting_type, grow_policy):
model_path = yatest.common.test_output_path('adult_model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('test_adult_missing_val.tsv'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'--output-path', output_eval_path
)
yatest.common.execute(calc_cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_crossentropy(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'CrossEntropy',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_permutation_block(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--fold-permutation-block', '239',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_ignored_features(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'-I', '0:1:3:5-7:10000',
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_ignored_features_names():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'RMSE',
'--has-header',
'--learn-set', data_file('black_friday', 'train'),
'--test-set', data_file('black_friday', 'test'),
'--column-description', data_file('black_friday', 'cd'),
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-I', 'Stay_In_Current_City_Years:Product_Category_2:Gender',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_ignored_features_not_read():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
input_cd_path = data_file('adult', 'train.cd')
cd_path = yatest.common.test_output_path('train.cd')
with open(input_cd_path, "rt") as f:
cd_lines = f.readlines()
with open(cd_path, "wt") as f:
for cd_line in cd_lines:
# Corrupt some features by making them 'Num'
if cd_line.split() == ('5', 'Categ'): # column 5 --> feature 4
cd_line = cd_line.replace('Categ', 'Num')
if cd_line.split() == ('7', 'Categ'): # column 7 --> feature 6
cd_line = cd_line.replace('Categ', 'Num')
f.write(cd_line)
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', cd_path,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'-I', '4:6', # Ignore the corrupted features
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
# Not needed: return [local_canonical_file(output_eval_path)]
def test_ignored_features_not_read_names():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
input_cd_path = data_file('black_friday', 'cd')
cd_path = yatest.common.test_output_path('cd')
with open(input_cd_path, "rt") as f:
cd_lines = f.readlines()
with open(cd_path, "wt") as f:
for cd_line in cd_lines:
if cd_line.split() == ('2', 'Categ', 'Gender'):
cd_line = cd_line.replace('2', 'Num', 'Gender')
if cd_line.split() == ('10', 'Categ', 'Product_Category_3'):
cd_line = cd_line.replace('10', 'Num', 'Product_Category_3')
f.write(cd_line)
cmd = (
'--loss-function', 'RMSE',
'--has-header',
'--learn-set', data_file('black_friday', 'train'),
'--test-set', data_file('black_friday', 'test'),
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-I', 'Gender:Product_Category_3',
)
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_baseline(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('train_adult_baseline.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('train_adult_baseline.cd'),
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multiclass_baseline(boosting_type, loss_function):
labels = ['0', '1', '2', '3']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
cmd = (
'--loss-function', loss_function,
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', model_path,
'--eval-file', eval_path,
'--use-best-model', 'false',
'--classes-count', '4'
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert(compare_evals(eval_path, formula_predict_path))
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multiclass_baseline_lost_class(boosting_type, loss_function):
labels = [0, 1, 2, 3]
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
cmd = (
'--loss-function', loss_function,
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', model_path,
'--eval-file', eval_path,
'--use-best-model', 'false',
'--classes-count', '4',
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_weights(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_weights_no_bootstrap(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'--bootstrap-type', 'No',
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_weights_gradient(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', 'Gradient'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_logloss_with_not_binarized_target(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_not_binarized', 'train_small'),
'-t', data_file('adult_not_binarized', 'test_small'),
'--column-description', data_file('adult_not_binarized', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--target-border', '0.5',
'--eval-file', output_eval_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_all_targets(loss_function, boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_model_path_without_test = yatest.common.test_output_path('model_without_test.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
base_cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'--counter-calc-method', 'SkipTest', # TODO(kirillovs): remove after setting SkipTest as default type
'-w', '0.03',
'-T', '4',
)
train_with_test_cmd = base_cmd + (
'-t', data_file('adult', 'test_small'),
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', train_with_test_cmd)
train_without_test_cmd = base_cmd + (
'-m', output_model_path_without_test,
)
execute_catboost_fit('CPU', train_without_test_cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
formula_predict_without_test_path = yatest.common.test_output_path('predict_without_test.eval')
base_calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--prediction-type', 'RawFormulaVal'
)
calc_cmd = base_calc_cmd + (
'-m', output_model_path,
'--output-path', formula_predict_path,
)
calc_cmd_without_test = base_calc_cmd + (
'-m', output_model_path_without_test,
'--output-path', formula_predict_without_test_path,
)
yatest.common.execute(calc_cmd)
yatest.common.execute(calc_cmd_without_test)
if loss_function == 'MAPE':
# TODO(kirillovs): uncomment this after resolving MAPE problems
# assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path), local_canonical_file(formula_predict_path)]
else:
assert(compare_evals(output_eval_path, formula_predict_path))
assert(filecmp.cmp(formula_predict_without_test_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_cv(is_inverted, boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--cv', format_crossvalidation(is_inverted, 2, 10),
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv_for_query(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--cv', format_crossvalidation(is_inverted, 2, 7),
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv_for_pairs(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'PairLogit',
'-f', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--cv', format_crossvalidation(is_inverted, 2, 7),
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('bad_cv_params', ['XX', 'YY', 'XY'])
def test_multiple_cv_spec(bad_cv_params):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
if bad_cv_params == 'XX':
cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),
'--cv', format_crossvalidation(is_inverted=False, n=4, k=7))
elif bad_cv_params == 'XY':
cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),
'--cv', format_crossvalidation(is_inverted=True, n=4, k=7))
elif bad_cv_params == 'YY':
cmd += ('--cv', format_crossvalidation(is_inverted=True, n=2, k=10),
'--cv', format_crossvalidation(is_inverted=True, n=4, k=7))
else:
raise Exception('bad bad_cv_params value:' + bad_cv_params)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('error_type', ['0folds', 'fold_idx_overflow'])
def test_bad_fold_cv_spec(is_inverted, error_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'-m', output_model_path,
('--cv:Inverted' if is_inverted else '--cv:Classical'),
{'0folds': '0/0', 'fold_idx_overflow': '3/2'}[error_type],
'--eval-file', output_eval_path,
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_empty_eval(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_time(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--has-time',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_gradient(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-method', 'Gradient',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'loss_function',
LOSS_FUNCTIONS_SHORT,
ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]
)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_gradient_with_leafwise_approxes(loss_function, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')
cmd = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', 'Plain',
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-method', 'Gradient',
'--eval-file', output_eval_path,
]
execute_catboost_fit('CPU', cmd)
cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']
execute_catboost_fit('CPU', cmd)
assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_newton(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-iterations', '1',
'--leaf-estimation-method', 'Newton',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_newton_with_leafwise_approxes(dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')
cmd = [
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', 'Plain',
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-iterations', '1',
'--leaf-estimation-method', 'Newton',
'--eval-file', output_eval_path,
]
execute_catboost_fit('CPU', cmd)
cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']
execute_catboost_fit('CPU', cmd)
assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_newton_on_pool_with_weights(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '40',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-method', 'Newton',
'--leaf-estimation-iterations', '7',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_priors(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--ctr', 'Borders:Prior=-2:Prior=0:Prior=8:Prior=1:Prior=-1:Prior=3,'
'Counter:Prior=0',
'--per-feature-ctr', '4:Borders:Prior=0.444,Counter:Prior=0.444;'
'6:Borders:Prior=0.666,Counter:Prior=0.666;'
'8:Borders:Prior=-0.888:Prior=0.888,Counter:Prior=-0.888:Prior=0.888',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_ctr_buckets(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiClass',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--ctr', 'Buckets'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_fold_len_multiplier(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiClass',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--fold-len-multiplier', '1.5'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction', 'ShapValues', 'PredictionDiff']
DATASET_DEPENDENT_FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'LossFunctionChange', 'ShapValues', 'PredictionDiff']
@pytest.mark.parametrize('fstr_type', FSTR_TYPES)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_fstr(fstr_type, boosting_type, grow_policy):
pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train_small'),
cd_path=data_file(pool, 'train.cd'),
boosting_type=boosting_type,
grow_policy=grow_policy,
normalize=False,
additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())
)
@pytest.mark.parametrize('fstr_type', ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction'])
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_fstr_with_text_features(fstr_type, boosting_type, grow_policy):
pool = 'rotten_tomatoes'
separator_type = 'ByDelimiter'
feature_estimators = 'BoW,NaiveBayes,BM25'
tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train'),
cd_path=data_file(pool, 'cd_binclass'),
boosting_type=boosting_type,
grow_policy=grow_policy,
normalize=False,
additional_train_params=('--text-processing', json.dumps(text_processing)) +
(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())
)
@pytest.mark.parametrize('fstr_type', ['LossFunctionChange', 'ShapValues'])
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_fstr_with_text_features_shap(fstr_type, boosting_type, grow_policy):
pool = 'rotten_tomatoes'
separator_type = 'ByDelimiter'
feature_estimators = 'NaiveBayes'
tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train'),
cd_path=data_file(pool, 'cd_binclass'),
boosting_type=boosting_type,
grow_policy=grow_policy,
normalize=False,
additional_train_params=('--random-strength', '0', '--text-processing', json.dumps(text_processing)) +
(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())
)
@pytest.mark.parametrize('fstr_type', FSTR_TYPES)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_fstr_normalized_model(fstr_type, grow_policy):
pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train_small'),
cd_path=data_file(pool, 'train.cd'),
boosting_type='Plain',
grow_policy=grow_policy,
normalize=True,
additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())
)
@pytest.mark.parametrize('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_fstr_with_target_border(fstr_type, grow_policy):
if fstr_type == 'PredictionDiff':
# because PredictionDiff needs pool without categorical features
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd')
else:
train_path = data_file('adult_not_binarized', 'train_small')
cd_path = data_file('adult_not_binarized', 'train.cd')
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=train_path,
cd_path=cd_path,
boosting_type='Plain',
grow_policy=grow_policy,
normalize=False,
additional_train_params=('--target-border', '0.4')
)
@pytest.mark.parametrize('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_fstr_with_weights(fstr_type, grow_policy):
return do_test_fstr(
fstr_type,
loss_function='RMSE',
input_path=data_file('querywise', 'train'),
cd_path=data_file('querywise', 'train.cd.weight'),
boosting_type='Plain',
grow_policy=grow_policy,
normalize=False
)
@pytest.mark.parametrize('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_fstr_with_class_weights(fstr_type, grow_policy):
pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train_small'),
cd_path=data_file(pool, 'train.cd'),
boosting_type='Plain',
grow_policy=grow_policy,
normalize=False,
additional_train_params=('--class-weights', '0.25,0.75')
)
@pytest.mark.parametrize('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)
def test_fstr_with_target_border_and_class_weights(fstr_type):
if fstr_type == 'PredictionDiff':
# because PredictionDiff needs pool without categorical features
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd')
else:
train_path = data_file('adult_not_binarized', 'train_small')
cd_path = data_file('adult_not_binarized', 'train.cd')
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=train_path,
cd_path=cd_path,
boosting_type='Plain',
grow_policy='SymmetricTree',
normalize=False,
additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')
)
def do_test_fstr(
fstr_type,
loss_function,
input_path,
cd_path,
boosting_type,
grow_policy,
normalize,
additional_train_params=()
):
model_path = yatest.common.test_output_path('model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', input_path,
'--column-description', cd_path,
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '10',
'-w', '0.03',
'-T', '4',
'--one-hot-max-size', '10',
'-m', model_path
) + additional_train_params
execute_catboost_fit('CPU', cmd)
if fstr_type == 'PredictionDiff':
with open(input_path) as input:
fstr_pool_path = yatest.common.test_output_path('input.tsv')
with open(fstr_pool_path, "w") as output:
output.write(input.readline())
output.write(input.readline())
input_path = fstr_pool_path
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', input_path,
'--column-description', cd_path,
'-m', model_path,
'-o', output_fstr_path,
'--fstr-type', fstr_type
)
if normalize:
make_model_normalized(model_path)
if not(
fstr_type == 'PredictionValuesChange' or
fstr_type == 'InternalFeatureImportance' and loss_function not in RANKING_LOSSES
):
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(fstr_cmd)
return
yatest.common.execute(fstr_cmd)
return local_canonical_file(output_fstr_path)
def make_model_normalized(model_path):
yatest.common.execute([
CATBOOST_PATH,
'normalize-model',
'--model-path', model_path,
'--output-model', model_path,
'--set-scale', '0.5',
'--set-bias', '0.125',
])
@pytest.mark.parametrize('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])
def test_loss_change_fstr(loss_function):
return do_test_loss_change_fstr(loss_function, normalize=False)
def test_loss_change_fstr_normalized():
return do_test_loss_change_fstr('QueryRMSE', normalize=True)
def do_test_loss_change_fstr(loss_function, normalize):
model_path = yatest.common.test_output_path('model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
train_fstr_path = yatest.common.test_output_path('t_fstr.tsv')
def add_loss_specific_params(cmd, fstr_mode):
if loss_function in ['PairLogit', 'PairLogitPairwise']:
cmd += ('--column-description', data_file('querywise', 'train.cd.no_target'))
if fstr_mode:
cmd += ('--input-pairs', data_file('querywise', 'train.pairs'))
else:
cmd += ('--learn-pairs', data_file('querywise', 'train.pairs'))
else:
cmd += ('--column-description', data_file('querywise', 'train.cd'))
return cmd
cmd_prefix = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'--learn-set', data_file('querywise', 'train'),
'--boosting-type', 'Plain',
'-i', '10',
'-w', '0.03',
'-T', '4',
'--one-hot-max-size', '10',
'--fstr-file', train_fstr_path,
'--fstr-type', 'LossFunctionChange',
'--model-file', model_path
)
cmd = add_loss_specific_params(cmd_prefix, fstr_mode=False)
execute_catboost_fit('CPU', cmd)
fstr_cmd_prefix = (
CATBOOST_PATH,
'fstr',
'--input-path', data_file('querywise', 'train'),
'--model-file', model_path,
'--output-path', output_fstr_path,
'--fstr-type', 'LossFunctionChange',
)
fstr_cmd = add_loss_specific_params(fstr_cmd_prefix, fstr_mode=True)
if normalize:
make_model_normalized(model_path)
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(fstr_cmd)
return
yatest.common.execute(fstr_cmd)
fit_output = np.loadtxt(train_fstr_path, dtype='float', delimiter='\t')
fstr_output = np.loadtxt(output_fstr_path, dtype='float', delimiter='\t')
assert(np.allclose(fit_output, fstr_output, rtol=1e-6))
return [local_canonical_file(output_fstr_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('ranking_parameters', [
{'loss-function': 'PairLogit', 'fstr-type': 'LossFunctionChange'},
{'loss-function': 'Logloss', 'fstr-type': 'PredictionValuesChange'}
])
def test_fstr_feature_importance_default_value(boosting_type, ranking_parameters):
model_path = yatest.common.test_output_path('model.bin')
fstr_path_0 = yatest.common.test_output_path('fstr_0.tsv')
fstr_path_1 = yatest.common.test_output_path('fstr_1.tsv')
internal_fstr_path_0 = yatest.common.test_output_path('internal_fstr_0.tsv')
internal_fstr_path_1 = yatest.common.test_output_path('internal_fstr_1.tsv')
pool = 'adult' if ranking_parameters['loss-function'] == 'Logloss' else 'black_friday'
pool_path = data_file(pool, 'train_small' if pool == 'adult' else 'train')
cd_path = data_file(pool, 'train.cd' if pool == 'adult' else 'cd')
has_header_suffix = ('--has-header',) if pool == 'black_friday' else ()
cmd = (
'--use-best-model', 'false',
'--learn-set', pool_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'--one-hot-max-size', '10',
'--model-file', model_path,
'--loss-function', ranking_parameters['loss-function']
) + has_header_suffix
if ranking_parameters['loss-function'] == 'Logloss':
cmd += ('--target-border', '0.5')
execute_catboost_fit(
'CPU',
cmd + ('--fstr-file', fstr_path_0,
'--fstr-internal-file', internal_fstr_path_0,
'--fstr-type', 'FeatureImportance')
)
execute_catboost_fit(
'CPU',
cmd + ('--fstr-file', fstr_path_1,
'--fstr-internal-file', internal_fstr_path_1,
'--fstr-type', ranking_parameters['fstr-type'])
)
assert filecmp.cmp(fstr_path_0, fstr_path_1)
assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', pool_path,
'--column-description', cd_path,
'--model-file', model_path,
) + has_header_suffix
yatest.common.execute(
fstr_cmd + ('--output-path', fstr_path_1,
'--fstr-type', 'FeatureImportance')
)
yatest.common.execute(
fstr_cmd + ('--output-path', internal_fstr_path_1,
'--fstr-type', 'InternalFeatureImportance')
)
assert filecmp.cmp(fstr_path_0, fstr_path_1)
assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_loss_change_fstr_without_pairs(boosting_type):
model_path = yatest.common.test_output_path('model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'PairLogit',
'--learn-set', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--boosting-type', boosting_type,
'-i', '10',
'--learning-rate', '0.03',
'-T', '4',
'--one-hot-max-size', '10',
'--model-file', model_path
)
execute_catboost_fit('CPU', cmd)
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--model-file', model_path,
'--output-path', output_fstr_path,
'--fstr-type', 'LossFunctionChange',
)
yatest.common.execute(fstr_cmd)
try:
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd.no_target'),
'--model-file', model_path,
'--fstr-type', 'LossFunctionChange',
)
yatest.common.execute(fstr_cmd)
except:
return [local_canonical_file(output_fstr_path)]
assert False
def test_loss_change_fstr_on_different_pool_type():
output_model_path = yatest.common.test_output_path('model.bin')
output_dsv_fstr_path = yatest.common.test_output_path('fstr.tsv')
output_quantized_fstr_path = yatest.common.test_output_path('fstr.tsv.quantized')
train_fstr_path = yatest.common.test_output_path('train_fstr.tsv')
def get_pool_path(set_name, is_quantized=False):
path = data_file('querywise', set_name)
return 'quantized://' + path + '.quantized' if is_quantized else path
cd_file = data_file('querywise', 'train.cd')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'PairLogit',
'--learn-set', get_pool_path('train', True),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'-i', '10',
'-T', '4',
'--fstr-file', train_fstr_path,
'--fstr-type', 'LossFunctionChange',
'--model-file', output_model_path,
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH, 'fstr',
'--input-path', get_pool_path('train'),
'--column-description', cd_file,
'--input-pairs', data_file('querywise', 'train.pairs'),
'--model-file', output_model_path,
'--output-path', output_dsv_fstr_path,
'--fstr-type', 'LossFunctionChange',
)
yatest.common.execute(cmd)
cmd = (
CATBOOST_PATH, 'fstr',
'--input-path', get_pool_path('train', True),
'--input-pairs', data_file('querywise', 'train.pairs'),
'--model-file', output_model_path,
'--output-path', output_quantized_fstr_path,
'--fstr-type', 'LossFunctionChange',
)
yatest.common.execute(cmd)
fstr_dsv = np.loadtxt(output_dsv_fstr_path, dtype='float', delimiter='\t')
fstr_quantized = np.loadtxt(output_quantized_fstr_path, dtype='float', delimiter='\t')
train_fstr = np.loadtxt(train_fstr_path, dtype='float', delimiter='\t')
assert(np.allclose(fstr_dsv, fstr_quantized, rtol=1e-6))
assert(np.allclose(fstr_dsv, train_fstr, rtol=1e-6))
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_reproducibility(loss_function, grow_policy, dev_score_calc_obj_block_size):
def run_catboost(threads, model_path, eval_path):
cmd = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '25',
'-T', str(threads),
'-m', model_path,
'--eval-file', eval_path,
]
execute_catboost_fit('CPU', cmd)
model_1 = yatest.common.test_output_path('model_1.bin')
eval_1 = yatest.common.test_output_path('test_1.eval')
run_catboost(1, model_1, eval_1)
model_4 = yatest.common.test_output_path('model_4.bin')
eval_4 = yatest.common.test_output_path('test_4.eval')
run_catboost(4, model_4, eval_4)
assert filecmp.cmp(eval_1, eval_4)
BORDER_TYPES = ['Median', 'GreedyLogSum', 'UniformAndQuantiles', 'MinEntropy', 'MaxLogSum', 'Uniform']
@pytest.mark.parametrize('border_type', BORDER_TYPES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_feature_border_types(border_type, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--feature-border-type', border_type,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('depth', [4, 8])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_deep_tree_classification(depth, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'--depth', str(depth),
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_regularization(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-method', 'Newton',
'--eval-file', output_eval_path,
'--l2-leaf-reg', '5'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
REG_LOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'MAE', 'Lq:q=1', 'Lq:q=1.5', 'Lq:q=3', 'Quantile', 'LogLinQuantile', 'Poisson', 'MAPE',
'Huber:delta=1.0']
@pytest.mark.parametrize('loss_function', REG_LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_reg_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_multi_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')
cmd = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('cloudness_small', 'train_small'),
'-t', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path
]
execute_catboost_fit('CPU', cmd)
if boosting_type == 'Plain':
cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']
execute_catboost_fit('CPU', cmd)
assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
BORDER_TYPES = ['MinEntropy', 'Median', 'UniformAndQuantiles', 'MaxLogSum', 'GreedyLogSum', 'Uniform']
@pytest.mark.parametrize(
'border_type',
BORDER_TYPES,
ids=lambda border_type: 'border_type=%s' % border_type
)
@pytest.mark.parametrize(
'border_count',
[1, 3, 10],
ids=lambda border_count: 'border_count=%d' % border_count
)
@pytest.mark.parametrize(
'boosting_type',
BOOSTING_TYPE,
ids=lambda boosting_type: 'boosting_type=%s' % boosting_type
)
def test_ctr_target_quantization(border_type, border_count, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '3',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--ctr', 'Borders:TargetBorderType=' + border_type,
'--ctr-target-border-count', str(border_count)
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
COUNTER_METHODS = ['Full', 'SkipTest']
@pytest.mark.parametrize('counter_calc_method', COUNTER_METHODS)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_counter_calc(counter_calc_method, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '60',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--counter-calc-method', counter_calc_method
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
CTR_TYPES = ['Borders', 'Buckets', 'BinarizedTargetMeanValue:TargetBorderCount=10', 'Borders,BinarizedTargetMeanValue:TargetBorderCount=10', 'Buckets,Borders']
@pytest.mark.parametrize('ctr_type', CTR_TYPES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_ctr_type(ctr_type, boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '3',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--ctr', ctr_type
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_overfitting_detector_metric(boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--eval-metric', 'AUC:hints=skip_train~false',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_same_metric_skip_different(boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path_with_custom_metric = yatest.common.test_output_path('test_error_with_custom_metric.tsv')
learn_error_path_with_custom_metric = yatest.common.test_output_path('learn_error_with_custom_metric.tsv')
cmd = [
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
]
cmd_without_custom_metric = cmd + [
'--eval-metric', 'AUC:hints=skip_train~false',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
]
cmd_with_custom_metric = cmd + [
'--eval-metric', 'AUC:hints=skip_train~true',
'--custom-metric', 'AUC:hints=skip_train~false',
'--learn-err-log', learn_error_path_with_custom_metric,
'--test-err-log', test_error_path_with_custom_metric,
]
execute_catboost_fit('CPU', cmd_without_custom_metric)
execute_catboost_fit('CPU', cmd_with_custom_metric)
assert filecmp.cmp(learn_error_path_with_custom_metric, learn_error_path)
@pytest.mark.parametrize('loss_function', BINCLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_loss_for_classification(loss_function, boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
custom_metrics = [
metric for metric in
[
'AUC:hints=skip_train~false',
'Logloss',
'CrossEntropy',
'Accuracy',
'Precision',
'Recall',
'F1',
'TotalF1',
'MCC',
'BalancedAccuracy',
'BalancedErrorRate',
'Kappa',
'WKappa',
'BrierScore',
'ZeroOneLoss',
'HammingLoss',
'HingeLoss',
'NormalizedGini'
]
if metric != loss_function
]
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-w', '0.03',
'-i', '10',
'-T', '4',
'--custom-metric', ','.join(custom_metrics),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
if loss_function == 'Logloss':
cmd += ('--target-border', '0.5')
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_loglikelihood_of_prediction(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'-w', '0.03',
'-i', '10',
'-T', '4',
'--custom-metric', 'LogLikelihoodOfPrediction',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path, diff_tool(1e-7)), local_canonical_file(test_error_path, diff_tool(1e-7))]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_loss_for_multiclassification(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiClass',
'-f', data_file('cloudness_small', 'train_small'),
'-t', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--custom-metric',
'AUC:hints=skip_train~false;type=OneVsAll,Accuracy,Precision,Recall,F1,TotalF1,MCC,Kappa,WKappa,ZeroOneLoss,HammingLoss,HingeLoss,NormalizedGini',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_calc_prediction_type(boosting_type):
model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'--output-path', output_eval_path,
'--prediction-type', 'Probability'
)
yatest.common.execute(calc_cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_calc_no_target(boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
fit_output_eval_path = yatest.common.test_output_path('fit_test.eval')
calc_output_eval_path = yatest.common.test_output_path('calc_test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', model_path,
'--counter-calc-method', 'SkipTest',
'--eval-file', fit_output_eval_path
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('train_notarget.cd'),
'-m', model_path,
'--output-path', calc_output_eval_path
)
yatest.common.execute(calc_cmd)
assert(compare_evals(fit_output_eval_path, calc_output_eval_path))
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_classification_progress_restore(boosting_type):
def run_catboost(iters, model_path, eval_path, additional_params=None):
import random
import shutil
import string
letters = string.ascii_lowercase
train_random_name = ''.join(random.choice(letters) for i in xrange(8))
shutil.copy(data_file('adult', 'train_small'), train_random_name)
cmd = [
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', train_random_name,
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', str(iters),
'-T', '4',
'-m', model_path,
'--eval-file', eval_path,
]
if additional_params:
cmd += additional_params
execute_catboost_fit('CPU', cmd)
canon_model_path = yatest.common.test_output_path('canon_model.bin')
canon_eval_path = yatest.common.test_output_path('canon_test.eval')
run_catboost(30, canon_model_path, canon_eval_path)
model_path = yatest.common.test_output_path('model.bin')
eval_path = yatest.common.test_output_path('test.eval')
progress_path = yatest.common.test_output_path('test.cbp')
run_catboost(15, model_path, eval_path, additional_params=['--snapshot-file', progress_path])
run_catboost(30, model_path, eval_path, additional_params=['--snapshot-file', progress_path])
assert filecmp.cmp(canon_eval_path, eval_path)
# TODO(kirillovs): make this active when progress_file parameter will be deleted from json params
# assert filecmp.cmp(canon_model_path, model_path)
@pytest.mark.parametrize('loss_function', CLASSIFICATION_LOSSES)
@pytest.mark.parametrize('prediction_type', PREDICTION_TYPES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_prediction_type(prediction_type, loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--prediction-type', prediction_type
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_const_feature(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
train_path = yatest.common.test_output_path('train_small')
test_path = yatest.common.test_output_path('test_small')
train_dataset = np.loadtxt(data_file('adult', 'train_small'), dtype=str, delimiter='\t')
test_dataset = np.loadtxt(data_file('adult', 'test_small'), dtype=str, delimiter='\t')
train_dataset[:, 14] = '0'
test_dataset[:, 14] = '0'
np.savetxt(train_path, train_dataset, fmt='%s', delimiter='\t')
np.savetxt(test_path, test_dataset[:10, :], fmt='%s', delimiter='\t')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', train_path,
'-t', test_path,
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
QUANTILE_LOSS_FUNCTIONS = ['Quantile', 'LogLinQuantile']
@pytest.mark.parametrize('loss_function', QUANTILE_LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_quantile_targets(loss_function, boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function + ':alpha=0.9',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '5',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantile_targets_exact(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Quantile:alpha=0.9',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', 'Exact'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantile_weights(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Quantile:alpha=0.9',
'-f', data_file('higgs', 'train_small'),
'-t', data_file('higgs', 'test_small'),
'--column-description', data_file('higgs', 'train_weight.cd'),
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', 'Exact'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantile_categorical(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Quantile:alpha=0.9',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', 'Exact'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_quantile_exact_distributed():
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MAE',
pool='higgs',
train='train_small',
test='test_small',
cd='train.cd',
other_options=(
'--leaf-estimation-method', 'Exact',
'--boost-from-average', 'False'
)
)))]
CUSTOM_LOSS_FUNCTIONS = ['RMSE,MAE', 'Quantile:alpha=0.9', 'MSLE,MedianAbsoluteError,SMAPE',
'NumErrors:greater_than=0.01,NumErrors:greater_than=0.1,NumErrors:greater_than=0.5',
'FairLoss:smoothness=0.9']
@pytest.mark.parametrize('custom_loss_function', CUSTOM_LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_loss(custom_loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '50',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--custom-metric', custom_loss_function,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
eps = 0 if 'MSLE' not in custom_loss_function else 1e-9
return [local_canonical_file(learn_error_path, diff_tool=diff_tool(eps)),
local_canonical_file(test_error_path, diff_tool=diff_tool(eps))]
def test_train_dir():
output_model_path = 'model.bin'
output_eval_path = 'test.eval'
train_dir_path = 'trainDir'
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '2',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--train-dir', train_dir_path,
'--fstr-file', 'fstr.tsv',
'--fstr-internal-file', 'ifstr.tsv'
)
execute_catboost_fit('CPU', cmd)
outputs = ['time_left.tsv', 'learn_error.tsv', 'test_error.tsv', output_model_path, output_eval_path, 'fstr.tsv', 'ifstr.tsv']
for output in outputs:
assert os.path.isfile(train_dir_path + '/' + output)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('qwise_loss', ['QueryRMSE', 'RMSE'])
def test_train_on_binarized_equal_train_on_float(boosting_type, qwise_loss):
output_model_path = yatest.common.test_output_path('model.bin')
output_model_path_binarized = yatest.common.test_output_path('model_binarized.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
borders_file = yatest.common.test_output_path('borders.tsv')
borders_file_output = borders_file + '.out'
predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')
predictions_path_learn_binarized = yatest.common.test_output_path('predictions_learn_binarized.tsv')
predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')
predictions_path_test_binarized = yatest.common.test_output_path('predictions_test_binarized.tsv')
learn_file = data_file('querywise', 'train')
cd_file = data_file('querywise', 'train.cd')
test_file = data_file('querywise', 'test')
params = {"--loss-function": qwise_loss,
"-f": learn_file,
"-t": test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '100',
'-T': '4',
'-m': output_model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--use-best-model': 'false',
'--output-borders-file': borders_file_output,
}
params_binarized = dict(params)
params_binarized['--input-borders-file'] = borders_file_output
params_binarized['--output-borders-file'] = borders_file
params_binarized['-m'] = output_model_path_binarized
execute_catboost_fit(task_type='CPU', params=params)
apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)
apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)
execute_catboost_fit(
task_type='CPU',
params=params_binarized,
)
apply_catboost(output_model_path_binarized, learn_file, cd_file, predictions_path_learn_binarized)
apply_catboost(output_model_path_binarized, test_file, cd_file, predictions_path_test_binarized)
assert (filecmp.cmp(predictions_path_learn, predictions_path_learn_binarized))
assert (filecmp.cmp(predictions_path_test, predictions_path_test_binarized))
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(predictions_path_test),
local_canonical_file(predictions_path_learn),
local_canonical_file(borders_file)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_feature_id_fstr(boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
)
execute_catboost_fit('CPU', cmd)
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train_with_id.cd'),
'-m', model_path,
'-o', output_fstr_path,
)
yatest.common.execute(fstr_cmd)
return local_canonical_file(output_fstr_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_names_logloss(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--class-names', '1,0'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_names_multiclass(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'test_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--eval-file', output_eval_path,
'--class-names', '0.,0.5,1.,0.25,0.75'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_names_multiclass_last_class_missed(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'test_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--eval-file', output_eval_path,
'--class-names', '0.,0.5,0.25,0.75,1.',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_weight_logloss(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--class-weights', '0.5,2'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_weight_multiclass(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--class-weights', '0.5,2'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_params_from_file(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '6',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--params-file', data_file('params.json')
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_lost_class(boosting_type, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('cloudness_lost_class', 'train_small'),
'-t', data_file('cloudness_lost_class', 'test_small'),
'--column-description', data_file('cloudness_lost_class', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--classes-count', '3',
'--prediction-type', 'RawFormulaVal,Class',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_weight_with_lost_class(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiClass',
'-f', data_file('cloudness_lost_class', 'train_small'),
'-t', data_file('cloudness_lost_class', 'test_small'),
'--column-description', data_file('cloudness_lost_class', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--classes-count', '3',
'--class-weights', '0.5,2,2',
'--prediction-type', 'RawFormulaVal,Class',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_one_hot(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.1',
'--one-hot-max-size', '10'
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'--output-path', calc_eval_path
)
yatest.common.execute(calc_cmd)
assert(compare_evals(output_eval_path, calc_eval_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_random_strength(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.1',
'--random-strength', '100'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_only_categorical_features(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult_all_categorical.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.1',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_weight_sampling_per_tree(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--sampling-frequency', 'PerTree',
)
execute_catboost_fit('CPU', cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('used_ram_limit', ['1Kb', '4Gb'])
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
['600', '5000000'],
ids=['calc_block=600', 'calc_block=5000000']
)
def test_allow_writing_files_and_used_ram_limit(boosting_type, used_ram_limit, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--allow-writing-files', 'false',
'--used-ram-limit', used_ram_limit,
'--loss-function', 'Logloss',
'--max-ctr-complexity', '5',
'--depth', '7',
'-f', data_file('airlines_5K', 'train'),
'-t', data_file('airlines_5K', 'test'),
'--column-description', data_file('airlines_5K', 'cd'),
'--has-header',
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-w', '0.03',
'-T', '6',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'ignored_features',
[True, False],
ids=['ignored_features=True', 'ignored_features=False']
)
def test_apply_with_permuted_columns(ignored_features):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('airlines_5K', 'train'),
'-t', data_file('airlines_5K', 'test'),
'--column-description', data_file('airlines_5K', 'cd'),
'--has-header',
'-i', '20',
'-w', '0.03',
'-T', '6',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
if ignored_features:
cmd += ('--ignore-features', '0:2:5')
execute_catboost_fit('CPU', cmd)
permuted_test_path, permuted_cd_path = permute_dataset_columns(
data_file('airlines_5K', 'test'),
data_file('airlines_5K', 'cd'),
seed=123)
permuted_predict_path = yatest.common.test_output_path('permuted_predict.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', permuted_test_path,
'--has-header',
'--column-description', permuted_cd_path,
'-m', output_model_path,
'--output-path', permuted_predict_path,
'--output-columns', 'SampleId,RawFormulaVal,Label'
)
yatest.common.execute(calc_cmd)
assert filecmp.cmp(output_eval_path, permuted_predict_path)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_subsample_per_tree(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--sampling-frequency', 'PerTree',
'--bootstrap-type', 'Bernoulli',
'--subsample', '0.5',
)
execute_catboost_fit('CPU', cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_subsample_per_tree_level(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--sampling-frequency', 'PerTreeLevel',
'--bootstrap-type', 'Bernoulli',
'--subsample', '0.5',
)
if grow_policy == 'Lossguide':
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
else:
execute_catboost_fit('CPU', cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_bagging_per_tree_level(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--bagging-temperature', '0.5',
)
execute_catboost_fit('CPU', cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_plain(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--boosting-type', 'Plain',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_bootstrap(boosting_type, dev_score_calc_obj_block_size):
bootstrap_option = {
'no': ('--bootstrap-type', 'No',),
'bayes': ('--bootstrap-type', 'Bayesian', '--bagging-temperature', '0.0',),
'bernoulli': ('--bootstrap-type', 'Bernoulli', '--subsample', '1.0',)
}
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
)
for bootstrap in bootstrap_option:
model_path = yatest.common.test_output_path('model_' + bootstrap + '.bin')
eval_path = yatest.common.test_output_path('test_' + bootstrap + '.eval')
execute_catboost_fit('CPU', cmd + ('-m', model_path, '--eval-file', eval_path,) + bootstrap_option[bootstrap])
ref_eval_path = yatest.common.test_output_path('test_no.eval')
assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bayes.eval')))
assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bernoulli.eval')))
return [local_canonical_file(ref_eval_path)]
def test_json_logging():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
json_path = yatest.common.test_output_path('catboost_training.json')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-w', '0.03',
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--json-log', json_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(remove_time_from_json(json_path))]
def test_json_logging_metric_period():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
json_path = yatest.common.test_output_path('catboost_training.json')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--json-log', json_path,
'--metric-period', '2',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(remove_time_from_json(json_path))]
def test_output_columns_format():
model_path = yatest.common.test_output_path('adult_model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
# Intentionally skipped: -t ...
'-i', '10',
'-T', '4',
'-m', model_path,
'--output-columns', 'SampleId,RawFormulaVal,#2,Label',
'--eval-file', output_eval_path
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'--output-path', formula_predict_path,
'--output-columns', 'SampleId,RawFormulaVal'
)
yatest.common.execute(calc_cmd)
return local_canonical_file(output_eval_path, formula_predict_path)
def test_eval_period():
model_path = yatest.common.test_output_path('adult_model.bin')
cmd = (
'--use-best-model', 'false',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'-m', model_path,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'--output-path', formula_predict_path,
'--eval-period', '2'
)
yatest.common.execute(calc_cmd)
return local_canonical_file(formula_predict_path)
def test_weights_output():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--output-columns', 'SampleId,RawFormulaVal,Weight,Label',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_baseline_output():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('train_adult_baseline.cd'),
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--output-columns', 'SampleId,RawFormulaVal,Baseline,Label',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_baseline_from_file_output():
output_model_path = yatest.common.test_output_path('model.bin')
eval_0_path = yatest.common.test_output_path('test_0.eval')
eval_1_path = yatest.common.test_output_path('test_1.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--learn-set', data_file('higgs', 'train_small'),
'--test-set', data_file('higgs', 'test_small'),
'--column-description', data_file('higgs', 'train_baseline.cd'),
'-i', '10',
'--learning-rate', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_0_path,
'--output-columns', 'SampleId,RawFormulaVal',
)
execute_catboost_fit('CPU', cmd)
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--learn-set', data_file('higgs', 'train_small'),
'--test-set', data_file('higgs', 'test_small'),
'--column-description', data_file('higgs', 'train_weight.cd'),
'--learn-baseline', data_file('higgs', 'train_baseline'),
'--test-baseline', data_file('higgs', 'test_baseline'),
'-i', '10',
'--ignore-features', '0', # baseline column
'--learning-rate', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_1_path,
'--output-columns', 'SampleId,RawFormulaVal',
)
execute_catboost_fit('CPU', cmd)
compare_evals(eval_0_path, eval_1_path)
def test_group_weight_output():
model_path = yatest.common.test_output_path('model.bin')
fit_eval_path = yatest.common.test_output_path('test_0.eval')
calc_eval_path = yatest.common.test_output_path('test_1.eval')
fit_cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', 'QueryRMSE',
'--learn-set', data_file('querywise', 'train'),
'--test-set', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.group_weight'),
'-i', '10',
'-m', model_path,
'--eval-file', fit_eval_path,
'--output-columns', 'SampleId,RawFormulaVal,GroupWeight'
)
yatest.common.execute(fit_cmd)
fit_eval = pd.read_csv(fit_eval_path, sep='\t')
test_group_weight = pd.read_csv(data_file('querywise', 'test'), sep='\t', header=None)[0]
assert 'GroupWeight' in fit_eval.columns
assert np.allclose(fit_eval['GroupWeight'], test_group_weight)
calc_cmd = (
CATBOOST_PATH,
'calc',
'-m', model_path,
'--input-path', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.group_weight'),
'--output-path', calc_eval_path,
'--output-columns', 'SampleId,RawFormulaVal,GroupWeight'
)
yatest.common.execute(calc_cmd)
calc_eval = pd.read_csv(calc_eval_path, sep='\t')
assert 'GroupWeight' in calc_eval.columns
assert np.allclose(calc_eval['GroupWeight'], test_group_weight)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multiclass_baseline_from_file(boosting_type, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path_0 = yatest.common.test_output_path('test_0.eval')
output_eval_path_1 = yatest.common.test_output_path('test_1.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'train_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--eval-file', output_eval_path_0,
)
execute_catboost_fit('CPU', cmd)
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'train_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--learn-baseline', output_eval_path_0,
'--test-baseline', output_eval_path_0,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--class-names', '0.,0.25,0.5,0.75',
'--eval-file', output_eval_path_1,
)
execute_catboost_fit('CPU', cmd)
try:
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'train_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--learn-baseline', output_eval_path_0,
'--test-baseline', output_eval_path_0,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--class-names', '0.5,0.25,0.75.,0.',
'--eval-file', output_eval_path_1,
)
execute_catboost_fit('CPU', cmd)
except:
return [local_canonical_file(output_eval_path_0), local_canonical_file(output_eval_path_1)]
assert False
def test_baseline_from_file_output_on_quantized_pool():
output_model_path = yatest.common.test_output_path('model.bin')
eval_0_path = yatest.common.test_output_path('test_0.eval')
eval_1_path = yatest.common.test_output_path('test_1.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--learn-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),
'--test-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),
'--column-description', data_file('higgs', 'train_baseline.cd'),
'--learning-rate', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_0_path,
)
execute_catboost_fit('CPU', cmd + ('-i', '10'))
execute_catboost_fit('CPU', cmd + (
'-i', '10',
'--learn-baseline', eval_0_path,
'--test-baseline', eval_0_path,
'--eval-file', eval_0_path))
execute_catboost_fit('CPU', cmd + (
'-i', '20',
'--eval-file', eval_1_path))
compare_evals(eval_0_path, eval_1_path)
def test_query_output():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--output-columns', 'SampleId,Label,RawFormulaVal,GroupId',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_subgroup_output():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.subgroup_id'),
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--output-columns', 'GroupId,SubgroupId,SampleId,Label,RawFormulaVal',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_without_cat_features(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-w', '0.1',
'--one-hot-max-size', '102',
'--bootstrap-type', 'No',
'--random-strength', '0',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def make_deterministic_train_cmd(loss_function, pool, train, test, cd, schema='', test_schema='', dev_score_calc_obj_block_size=None, other_options=()):
pool_path = schema + data_file(pool, train)
test_path = test_schema + data_file(pool, test)
cd_path = data_file(pool, cd)
cmd = (
'--loss-function', loss_function,
'-f', pool_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-w', '0.03',
'-T', '4',
'--random-strength', '0',
'--has-time',
'--bootstrap-type', 'No',
'--boosting-type', 'Plain',
)
if dev_score_calc_obj_block_size:
cmd += ('--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size)
return cmd + other_options
def run_dist_train(cmd, output_file_switch='--eval-file'):
eval_0_path = yatest.common.test_output_path('test_0.eval')
execute_catboost_fit('CPU', cmd + (output_file_switch, eval_0_path,))
eval_1_path = yatest.common.test_output_path('test_1.eval')
execute_dist_train(cmd + (output_file_switch, eval_1_path,))
eval_0 = np.loadtxt(eval_0_path, dtype='float', delimiter='\t', skiprows=1)
eval_1 = np.loadtxt(eval_1_path, dtype='float', delimiter='\t', skiprows=1)
assert(np.allclose(eval_0, eval_1, atol=1e-5))
return eval_1_path
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small',
test='test_small',
cd='train.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_with_weights(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small',
test='test_small',
cd='train_weight.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_with_baseline(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small',
test='test_small',
cd='train_baseline.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_multiclass(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MultiClass',
pool='cloudness_small',
train='train_small',
test='test_small',
cd='train_float.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_multiclass_weight(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MultiClass',
pool='cloudness_small',
train='train_small',
test='test_small',
cd='train_float_weight.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_quantized(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small_x128_greedylogsum.bin',
test='test_small',
cd='train.cd',
schema='quantized://',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum'))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
@pytest.mark.parametrize('pairs_file', ['train.pairs', 'train.pairs.weighted'])
@pytest.mark.parametrize('target', ['PairLogitPairwise', 'QuerySoftMax'])
def test_dist_train_quantized_groupid(dev_score_calc_obj_block_size, pairs_file, target):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function=target,
pool='querywise',
train='train_x128_greedylogsum_aqtaa.bin',
test='test',
cd='train.cd.query_id',
schema='quantized://',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',
'--learn-pairs', data_file('querywise', pairs_file)))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_quantized_group_weights(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='QueryRMSE',
pool='querywise',
train='train.quantized',
test='test',
cd='train.cd.query_id',
schema='quantized://',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',
'--learn-group-weights', data_file('querywise', 'train.group_weights')))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_quantized_baseline(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small_x128_greedylogsum.bin',
test='train_small_x128_greedylogsum.bin',
cd='train_baseline.cd',
schema='quantized://',
test_schema='quantized://',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',
'--test-baseline', data_file('higgs', 'test_baseline'),
'--learn-baseline', data_file('higgs', 'train_baseline')))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_queryrmse(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='QueryRMSE',
pool='querywise',
train='train',
test='test',
cd='train.cd.subgroup_id',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_subgroup(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='QueryRMSE',
pool='querywise',
train='train',
test='test',
cd='train.cd.subgroup_id',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--eval-metric', 'PFound')
), output_file_switch='--test-err-log'))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_pairlogit(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='PairLogit',
pool='querywise',
train='train',
test='test',
cd='train.cd.query_id',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--learn-pairs', data_file('querywise', 'train.pairs'))
)))]
@pytest.mark.parametrize('pairs_file', ['train.pairs', 'train.pairs.weighted'])
def test_dist_train_pairlogitpairwise(pairs_file):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='PairLogitPairwise',
pool='querywise',
train='train',
test='test',
cd='train.cd',
other_options=('--learn-pairs', data_file('querywise', pairs_file))
)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_querysoftmax(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='QuerySoftMax',
pool='querywise',
train='train',
test='test',
cd='train.cd.subgroup_id',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize('loss_func', ['Logloss', 'RMSE'])
def test_dist_train_auc(loss_func):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function=loss_func,
pool='higgs',
train='train_small',
test='test_small',
cd='train_baseline.cd',
other_options=('--eval-metric', 'AUC')
), output_file_switch='--test-err-log'))]
@pytest.mark.parametrize('loss_func', ['Logloss', 'RMSE'])
def test_dist_train_auc_weight(loss_func):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function=loss_func,
pool='higgs',
train='train_small',
test='test_small',
cd='train_weight.cd',
other_options=('--eval-metric', 'AUC', '--boost-from-average', '0')
), output_file_switch='--test-err-log'))]
@pytest.mark.xfail(reason='Boost from average for distributed training')
@pytest.mark.parametrize('schema,train', [('quantized://', 'train_small_x128_greedylogsum.bin'), ('', 'train_small')])
def test_dist_train_snapshot(schema, train):
train_cmd = make_deterministic_train_cmd(
loss_function='RMSE',
pool='higgs',
train=train,
test='test_small',
schema=schema,
cd='train.cd')
eval_10_trees_path = yatest.common.test_output_path('10_trees.eval')
execute_catboost_fit('CPU', train_cmd + ('-i', '10', '--eval-file', eval_10_trees_path,))
snapshot_path = yatest.common.test_output_path('snapshot')
execute_dist_train(train_cmd + ('-i', '5', '--snapshot-file', snapshot_path,))
eval_5_plus_5_trees_path = yatest.common.test_output_path('5_plus_5_trees.eval')
execute_dist_train(train_cmd + ('-i', '10', '--eval-file', eval_5_plus_5_trees_path, '--snapshot-file', snapshot_path,))
assert(filecmp.cmp(eval_10_trees_path, eval_5_plus_5_trees_path))
return [local_canonical_file(eval_5_plus_5_trees_path)]
def test_dist_train_yetirank():
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='YetiRank',
pool='querywise',
train='repeat_same_query_8_times',
test='repeat_same_query_8_times',
cd='train.cd'
), output_file_switch='--test-err-log'))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
@pytest.mark.parametrize(
'one_hot_max_size',
[2, 255],
ids=['one_hot_max_size=2', 'one_hot_max_size=255']
)
def test_dist_train_with_cat_features(dev_score_calc_obj_block_size, one_hot_max_size):
cmd = make_deterministic_train_cmd(
loss_function='Logloss',
pool='adult',
train='train_small',
test='test_small',
cd='train.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--one-hot-max-size', str(one_hot_max_size))
)
if one_hot_max_size == 2:
with pytest.raises(yatest.common.ExecutionError):
run_dist_train(cmd)
else:
return [local_canonical_file(run_dist_train(cmd))]
def test_no_target():
train_path = yatest.common.test_output_path('train')
cd_path = yatest.common.test_output_path('train.cd')
pairs_path = yatest.common.test_output_path('pairs')
np.savetxt(train_path, [[0], [1], [2], [3], [4]], delimiter='\t', fmt='%.4f')
np.savetxt(cd_path, [('0', 'Num')], delimiter='\t', fmt='%s')
np.savetxt(pairs_path, [[0, 1], [0, 2], [0, 3], [2, 4]], delimiter='\t', fmt='%i')
cmd = (
'-f', train_path,
'--cd', cd_path,
'--learn-pairs', pairs_path
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('loss_function', ALL_LOSSES)
def test_const_target(loss_function):
train_path = yatest.common.test_output_path('train')
cd_path = yatest.common.test_output_path('train.cd')
np.savetxt(
train_path,
[[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 0, 3],
[0, 0, 4]],
delimiter='\t',
fmt='%.4f'
)
np.savetxt(cd_path, [('0', 'Target'), ('1', 'GroupId')], delimiter='\t', fmt='%s')
cmd = (
'--loss-function', loss_function,
'-f', train_path,
'--cd', cd_path,
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def test_negative_weights():
train_path = yatest.common.test_output_path('train')
cd_path = yatest.common.test_output_path('train.cd')
open(cd_path, 'wt').write('0\tNum\n1\tWeight\n2\tTarget\n')
np.savetxt(train_path, [
[0, 1, 2],
[1, -1, 1]], delimiter='\t', fmt='%.4f')
cmd = ('-f', train_path,
'--cd', cd_path,
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def test_zero_learning_rate():
train_path = yatest.common.test_output_path('train')
cd_path = yatest.common.test_output_path('train.cd')
open(cd_path, 'wt').write(
'0\tNum\n'
'1\tNum\n'
'2\tTarget\n')
np.savetxt(train_path, [
[0, 1, 2],
[1, 1, 1]], delimiter='\t', fmt='%.4f')
cmd = ('-f', train_path,
'--cd', cd_path,
'--learning-rate', '0.0',
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function, additional_train_params=(), additional_eval_params=()):
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
'--loss-function', loss_function,
'--eval-metric', metric,
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--metric-period', metric_period
) + additional_train_params
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metric,
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--eval-period', metric_period,
'--save-stats'
) + additional_eval_params
yatest.common.execute(cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['Logloss', 'F1', 'Accuracy', 'PFound', 'TotalF1', 'MCC', 'PairAccuracy'])
def test_eval_metrics(metric, metric_period):
if metric == 'PFound':
train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'QueryRMSE'
elif metric == 'PairAccuracy':
# note: pairs are autogenerated
train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'PairLogitPairwise'
else:
train, test, cd, loss_function = data_file('adult', 'train_small'), data_file('adult', 'test_small'), data_file('adult', 'train.cd'), 'Logloss'
return do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function)
def test_eval_metrics_with_target_border():
return do_test_eval_metrics(
metric='Logloss',
metric_period='1',
train=data_file('adult_not_binarized', 'train_small'),
test=data_file('adult_not_binarized', 'test_small'),
cd=data_file('adult_not_binarized', 'train.cd'),
loss_function='Logloss',
additional_train_params=('--target-border', '0.4')
)
def test_eval_metrics_with_class_weights():
return do_test_eval_metrics(
metric='Logloss',
metric_period='1',
train=data_file('adult', 'train_small'),
test=data_file('adult', 'test_small'),
cd=data_file('adult', 'train.cd'),
loss_function='Logloss',
additional_train_params=('--class-weights', '0.25,0.75')
)
def test_eval_metrics_with_target_border_and_class_weights():
return do_test_eval_metrics(
metric='Logloss',
metric_period='1',
train=data_file('adult_not_binarized', 'train_small'),
test=data_file('adult_not_binarized', 'test_small'),
cd=data_file('adult_not_binarized', 'train.cd'),
loss_function='Logloss',
additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')
)
@pytest.mark.parametrize('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])
def test_eval_metrics_with_boost_from_average_and_model_shrinkage(config):
mode, rate, lr = config
train = data_file('higgs', 'train_small')
test = data_file('higgs', 'test_small')
cd = data_file('higgs', 'train.cd')
loss_function = 'Logloss'
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', loss_function,
'--eval-metric', 'Logloss',
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--metric-period', '10',
'--learn-err-log', learn_error_path,
'--model-shrink-mode', mode,
'--model-shrink-rate', str(rate),
'--boost-from-average', 'true'
)
execute_catboost_fit('CPU', cmd)
test_eval_path = yatest.common.test_output_path('test_output.tsv')
learn_eval_path = yatest.common.test_output_path('learn_output.tsv')
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'Logloss',
'--input-path', train,
'--column-description', cd,
'-m', output_model_path,
'-o', learn_eval_path,
'--block-size', '100',
'--eval-period', '10',
'--save-stats',
)
yatest.common.execute(cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'Logloss',
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', test_eval_path,
'--block-size', '100',
'--eval-period', '10',
'--save-stats',
)
yatest.common.execute(cmd)
test_first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1:], 8)
test_second_metrics = np.round(np.loadtxt(test_eval_path, skiprows=1)[:, 1:], 8)
learn_first_metrics = np.round(np.loadtxt(learn_error_path, skiprows=1)[:, 1:], 8)
learn_second_metrics = np.round(np.loadtxt(learn_eval_path, skiprows=1)[:, 1:], 8)
assert test_first_metrics[-1] == test_second_metrics[-1]
assert learn_first_metrics[-1] == learn_second_metrics[-1]
@pytest.mark.parametrize('metrics', ['AUC', 'AUC,Precision'])
def test_eval_metrics_with_binarized_target(metrics):
train = data_file('adult', 'train_small')
test = data_file('adult', 'test_small')
cd = data_file('adult', 'train.cd')
loss_function = 'Logloss'
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', loss_function,
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--target-border', '0.25',
'--custom-metric', metrics,
)
execute_catboost_fit('CPU', cmd)
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metrics,
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--save-stats',
)
yatest.common.execute(cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2:], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)
assert np.all(first_metrics == second_metrics)
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['MultiClass', 'MultiClassOneVsAll', 'F1', 'Accuracy', 'TotalF1', 'MCC', 'Precision', 'Recall'])
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('dataset', ['cloudness_small', 'cloudness_lost_class'])
def test_eval_metrics_multiclass(metric, loss_function, dataset, metric_period):
if metric in MULTICLASS_LOSSES and metric != loss_function:
# MultiClass and MultiClassOneVsAll are incompatible
return
train, test, cd = data_file(dataset, 'train_small'), data_file(dataset, 'test_small'), data_file(dataset, 'train.cd')
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
'--loss-function', loss_function,
'--custom-metric', metric,
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--classes-count', '3',
'--metric-period', metric_period
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metric,
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--eval-period', metric_period,
'--save-stats'
)
yatest.common.execute(cmd)
start_index = 1 if metric == loss_function else 2
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, start_index:], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(eval_path)]
def test_eval_metrics_class_names():
labels = ['a', 'b', 'c', 'd']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'MultiClass',
'--custom-metric', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--class-names', ','.join(labels),
)
execute_catboost_fit('CPU', cmd)
eval_cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'-o', eval_path,
'--block-size', '100',
'--save-stats'
)
execute_catboost_fit('CPU', cmd)
yatest.common.execute(eval_cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)
assert np.all(first_metrics == second_metrics)
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['Accuracy', 'AUC:type=Ranking'])
def test_eval_metrics_with_baseline(metric_period, metric):
train = data_file('adult_weight', 'train_weight')
test = data_file('adult_weight', 'test_weight')
cd = data_file('train_adult_baseline.cd')
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
'--loss-function', 'Logloss',
'--eval-metric', metric,
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--metric-period', metric_period
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metric,
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--eval-period', metric_period,
'--save-stats'
)
yatest.common.execute(cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['Accuracy'])
def test_eval_metrics_multiclass_with_baseline(metric_period, metric):
labels = [0, 1, 2, 3]
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
'--loss-function', 'MultiClass',
'--eval-metric', metric,
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--classes-count', '4',
'--metric-period', metric_period
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metric,
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--eval-period', metric_period,
'--save-stats'
)
yatest.common.execute(cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_ctr_leaf_count_limit(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'--ctr-leaf-count-limit', '10',
'-i', '30',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('loss_function', ['RMSE', 'Logloss', 'CrossEntropy'])
def test_boost_from_average(boosting_type, grow_policy, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
output_calc_eval_path = yatest.common.test_output_path('test_calc.eval')
output_eval_path_with_avg = yatest.common.test_output_path('test_avg.eval')
output_eval_path_with_baseline = yatest.common.test_output_path('test_baseline.eval')
baselined_train = yatest.common.test_output_path('baselined_train')
baselined_test = yatest.common.test_output_path('baselined_test')
baselined_cd = yatest.common.test_output_path('baselined.cd')
train_path = data_file('adult', 'train_small')
test_path = data_file('adult', 'test_small')
original_cd = data_file('adult', 'train.cd')
# use float32 beacause we use float in C++
sum_target = np.float32(0)
obj_count = np.float32(0)
with open(train_path) as train_f:
for line in train_f:
obj_count += 1
sum_target += np.float32(line.split()[1])
mean_target = sum_target / obj_count
if loss_function in ['Logloss', 'CrossEntropy']:
mean_target = -np.log(1 / mean_target - 1)
mean_target_str = str(mean_target)
def append_baseline_to_pool(source, target):
with open(source) as source_f, open(target, 'w') as target_f:
for line in source_f:
target_f.write(line.rstrip('\n') + '\t' + mean_target_str + '\n')
append_baseline_to_pool(train_path, baselined_train)
append_baseline_to_pool(test_path, baselined_test)
with open(baselined_cd, 'w') as cd_output, open(original_cd) as cd_input:
for line in cd_input:
cd_output.write(line)
cd_output.write('18\tBaseline\n')
base_cmd = (
'--loss-function', loss_function,
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '30',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', base_cmd + (
'-f', baselined_train,
'-t', baselined_test,
'--boost-from-average', '0',
'--column-description', baselined_cd,
'--eval-file', output_eval_path_with_baseline,
))
execute_catboost_fit('CPU', base_cmd + (
'-f', train_path,
'-t', test_path,
'--boost-from-average', '1',
'--column-description', original_cd,
'--eval-file', output_eval_path_with_avg,
))
yatest.common.execute((
CATBOOST_PATH, 'calc',
'--cd', original_cd,
'--input-path', test_path,
'-m', output_model_path,
'-T', '1',
'--output-path', output_calc_eval_path,
))
assert compare_fit_evals_with_precision(output_eval_path_with_avg, output_eval_path_with_baseline)
assert compare_evals(output_eval_path_with_avg, output_calc_eval_path)
return [local_canonical_file(output_eval_path_with_avg)]
@pytest.mark.parametrize('eval_period', ['1', '2'])
def test_eval_non_additive_metric(eval_period):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'AUC:hints=skip_train~false',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'-o', output_eval_path,
'--eval-period', eval_period,
'--block-size', '10'
)
yatest.common.execute(cmd)
output_eval_in_parts = yatest.common.test_output_path('eval_in_parts.eval')
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'AUC:hints=skip_train~false',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'-o', output_eval_in_parts,
'--eval-period', eval_period,
'--calc-on-parts',
'--block-size', '10'
)
yatest.common.execute(cmd)
first_metrics = np.loadtxt(output_eval_path, skiprows=1)
second_metrics = np.loadtxt(output_eval_in_parts, skiprows=1)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('max_ctr_complexity', [1, 2])
def test_eval_eq_calc(boosting_type, grow_policy, max_ctr_complexity):
one_hot_max_size = 2
cd_path = yatest.common.test_output_path('cd.txt')
train_path = yatest.common.test_output_path('train.txt')
test_path = yatest.common.test_output_path('test.txt')
model_path = yatest.common.test_output_path('model.bin')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
np.savetxt(cd_path, [['0', 'Target'],
['1', 'Categ'],
['2', 'Categ']
], fmt='%s', delimiter='\t')
np.savetxt(train_path, [['1', 'A', 'X'],
['1', 'B', 'Y'],
['1', 'C', 'Y'],
['0', 'A', 'Z'],
['0', 'B', 'Z'],
], fmt='%s', delimiter='\t')
np.savetxt(test_path, [['1', 'A', 'Y'],
['1', 'D', 'U'],
['1', 'D', 'U']
], fmt='%s', delimiter='\t')
cmd_fit = ('--loss-function', 'Logloss',
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--cd', cd_path,
'-f', train_path,
'-t', test_path,
'-m', model_path,
'--eval-file', test_eval_path,
'-i', '5',
'-T', '1',
'--max-ctr-complexity', str(max_ctr_complexity),
'--one-hot-max-size', str(one_hot_max_size),
)
cmd_calc = (CATBOOST_PATH, 'calc',
'--cd', cd_path,
'--input-path', test_path,
'-m', model_path,
'-T', '1',
'--output-path', calc_eval_path,
)
execute_catboost_fit('CPU', cmd_fit)
yatest.common.execute(cmd_calc)
assert(compare_evals(test_eval_path, calc_eval_path))
def do_test_object_importances(pool, loss_function, additional_train_params):
output_model_path = yatest.common.test_output_path('model.bin')
object_importances_path = yatest.common.test_output_path('object_importances.tsv')
cmd = (
'--loss-function', loss_function,
'-f', data_file(pool, 'train_small'),
'-t', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-i', '10',
'--boosting-type', 'Plain',
'-T', '4',
'-m', output_model_path,
'--use-best-model', 'false'
) + additional_train_params
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'ostr',
'-f', data_file(pool, 'train_small'),
'-t', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-m', output_model_path,
'-o', object_importances_path,
)
yatest.common.execute(cmd)
return [local_canonical_file(object_importances_path)]
@pytest.mark.parametrize('loss_function', ['RMSE', 'Logloss', 'Poisson'])
@pytest.mark.parametrize('leaf_estimation_iteration', ['1', '2'])
def test_object_importances(loss_function, leaf_estimation_iteration):
additional_train_params = (
'--leaf-estimation-method', 'Gradient',
'--leaf-estimation-iterations', leaf_estimation_iteration
)
return do_test_object_importances(
pool='adult',
loss_function=loss_function,
additional_train_params=additional_train_params
)
def test_object_importances_with_target_border():
return do_test_object_importances(
pool='adult_not_binarized',
loss_function='Logloss',
additional_train_params=('--target-border', '0.4')
)
def test_object_importances_with_class_weights():
return do_test_object_importances(
pool='adult',
loss_function='Logloss',
additional_train_params=('--class-weights', '0.25,0.75')
)
def test_object_importances_with_target_border_and_class_weights():
return do_test_object_importances(
pool='adult_not_binarized',
loss_function='Logloss',
additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')
)
# Create `num_tests` test files from `test_input_path`.
def split_test_to(num_tests, test_input_path):
test_input_lines = open(test_input_path).readlines()
test_paths = [yatest.common.test_output_path('test{}'.format(i)) for i in range(num_tests)]
for testno in range(num_tests):
test_path = test_paths[testno]
test_lines = test_input_lines[testno::num_tests]
open(test_path, 'wt').write(''.join(test_lines))
return test_paths
# Create a few shuffles from list of test files, for use with `-t` option.
def create_test_shuffles(test_paths, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
num_tests = len(test_paths)
num_shuffles = num_tests # if num_tests < 3 else num_tests * (num_tests - 1)
test_shuffles = set()
while len(test_shuffles) < num_shuffles:
test_shuffles.add(tuple(prng.permutation(test_paths)))
return [','.join(shuffle) for shuffle in test_shuffles]
def fit_calc_cksum(fit_stem, calc_stem, test_shuffles):
import hashlib
last_cksum = None
for i, shuffle in enumerate(test_shuffles):
model_path = yatest.common.test_output_path('model{}.bin'.format(i))
eval_path = yatest.common.test_output_path('eval{}.txt'.format(i))
execute_catboost_fit('CPU', fit_stem + (
'-t', shuffle,
'-m', model_path,
))
yatest.common.execute(calc_stem + (
'-m', model_path,
'--output-path', eval_path,
))
cksum = hashlib.md5(open(eval_path).read()).hexdigest()
if last_cksum is None:
last_cksum = cksum
continue
assert(last_cksum == cksum)
@pytest.mark.parametrize('num_tests', [3, 4])
@pytest.mark.parametrize('boosting_type', ['Plain', 'Ordered'])
def test_multiple_eval_sets_order_independent(boosting_type, num_tests):
train_path = data_file('adult', 'train_small')
cd_path = data_file('adult', 'train.cd')
test_input_path = data_file('adult', 'test_small')
fit_stem = (
'--loss-function', 'RMSE',
'-f', train_path,
'--cd', cd_path,
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'--use-best-model', 'false',
)
calc_stem = (
CATBOOST_PATH, 'calc',
'--cd', cd_path,
'--input-path', test_input_path,
'-T', '4',
)
# We use a few shuffles of tests and check equivalence of resulting models
prng = np.random.RandomState(seed=20181219)
test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)
fit_calc_cksum(fit_stem, calc_stem, test_shuffles)
@pytest.mark.parametrize('num_tests', [3, 4])
@pytest.mark.parametrize('boosting_type', ['Plain', 'Ordered'])
def test_multiple_eval_sets_querywise_order_independent(boosting_type, num_tests):
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd.query_id')
test_input_path = data_file('querywise', 'test')
fit_stem = (
'--loss-function', 'QueryRMSE',
'-f', train_path,
'--cd', cd_path,
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'--use-best-model', 'false',
)
calc_stem = (CATBOOST_PATH, 'calc',
'--cd', cd_path,
'--input-path', test_input_path,
'-T', '4',
)
# We use a few shuffles of tests and check equivalence of resulting models
prng = np.random.RandomState(seed=20181219)
test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)
fit_calc_cksum(fit_stem, calc_stem, test_shuffles)
def test_multiple_eval_sets_no_empty():
train_path = data_file('adult', 'train_small')
cd_path = data_file('adult', 'train.cd')
test_input_path = data_file('adult', 'test_small')
fit_stem = ('--loss-function', 'RMSE',
'-f', train_path,
'--cd', cd_path,
'-i', '5',
'-T', '4',
'--use-best-model', 'false',
)
test0_path = yatest.common.test_output_path('test0.txt')
open(test0_path, 'wt').write('')
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', fit_stem + (
'-t', ','.join((test_input_path, test0_path))
))
@pytest.mark.parametrize('loss_function', ['RMSE', 'QueryRMSE'])
def test_multiple_eval_sets(loss_function):
num_tests = 5
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd.query_id')
test_input_path = data_file('querywise', 'test')
eval_path = yatest.common.test_output_path('test.eval')
test_paths = list(reversed(split_test_to(num_tests, test_input_path)))
cmd = ('--loss-function', loss_function,
'-f', train_path,
'-t', ','.join(test_paths),
'--column-description', cd_path,
'-i', '5',
'-T', '4',
'--use-best-model', 'false',
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(eval_path)]
def test_multiple_eval_sets_err_log():
num_tests = 3
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd.query_id')
test_input_path = data_file('querywise', 'test')
test_err_log_path = yatest.common.test_output_path('test-err.log')
json_log_path = yatest.common.test_output_path('json.log')
test_paths = reversed(split_test_to(num_tests, test_input_path))
cmd = ('--loss-function', 'RMSE',
'-f', train_path,
'-t', ','.join(test_paths),
'--column-description', cd_path,
'-i', '5',
'-T', '4',
'--test-err-log', test_err_log_path,
'--json-log', json_log_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(test_err_log_path),
local_canonical_file(remove_time_from_json(json_log_path))]
# Cast<float>(CityHash('Quvena')) is QNaN
# Cast<float>(CityHash('Sineco')) is SNaN
@pytest.mark.parametrize('cat_value', ['Normal', 'Quvena', 'Sineco'])
def test_const_cat_feature(cat_value):
def make_a_set(nrows, value, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
label = prng.randint(0, nrows, [nrows, 1])
feature = np.full([nrows, 1], value, dtype='|S{}'.format(len(value)))
return np.concatenate([label, feature], axis=1)
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Categ']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=20181219)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
cmd = ('--loss-function', 'RMSE',
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '5',
'-T', '4',
'--eval-file', eval_path,
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def test_model_metadata():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '2',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-w', '0.1',
'--set-metadata-from-freeargs',
'A', 'A',
'BBB', 'BBB',
'CCC', 'A'
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'metadata', 'set',
'-m', output_model_path,
'--key', 'CCC',
'--value', 'CCC'
)
yatest.common.execute(calc_cmd)
calc_cmd = (
CATBOOST_PATH,
'metadata', 'set',
'-m', output_model_path,
'--key', 'CCC',
'--value', 'CCC'
)
yatest.common.execute(calc_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(output_model_path)
assert 'A' == py_catboost.get_metadata()['A']
assert 'BBB' == py_catboost.get_metadata()['BBB']
assert 'CCC' == py_catboost.get_metadata()['CCC']
def test_fit_multiclass_with_class_names():
labels = ['a', 'b', 'c', 'd']
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', 'MultiClass',
'--class-names', ','.join(labels),
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'--use-best-model', 'false',
'--prediction-type', 'RawFormulaVal,Class',
'--eval-file', eval_path
)
execute_catboost_fit('CPU', fit_cmd)
return [local_canonical_file(eval_path)]
def test_extract_multiclass_labels_from_class_names():
labels = ['a', 'b', 'c', 'd']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', 'MultiClass',
'--class-names', ','.join(labels),
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-T', '4',
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', 'RawFormulaVal,Class',
)
execute_catboost_fit('CPU', fit_cmd)
yatest.common.execute(calc_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['a', 'b', 'c', 'd']
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
assert json.loads(py_catboost.get_metadata()['params'])['data_processing_options']['class_names'] == ['a', 'b', 'c', 'd']
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('loss_function', ['MultiClass', 'MultiClassOneVsAll', 'Logloss', 'RMSE'])
def test_save_class_labels_from_data(loss_function):
labels = [10000000, 7, 0, 9999]
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
cmd = (
'--loss-function', loss_function,
'-f', train_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
if loss_function == 'Logloss':
cmd += ('--target-border', '0.5')
execute_catboost_fit('CPU', cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
if loss_function in MULTICLASS_LOSSES:
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
elif loss_function == 'Logloss':
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
else:
assert 'class_params' not in py_catboost.get_metadata()
@pytest.mark.parametrize('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])
def test_apply_multiclass_labels_from_data(prediction_type):
labels = [10000000, 7, 0, 9999]
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', 'MultiClass',
'-f', train_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', prediction_type,
)
execute_catboost_fit('CPU', fit_cmd)
yatest.common.execute(calc_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
if prediction_type in ['Probability', 'RawFormulaVal']:
with open(eval_path, "rt") as f:
for line in f:
assert line[:-1] == 'SampleId\t{}:Class=0.0\t{}:Class=7.0\t{}:Class=9999.0\t{}:Class=10000000.0' \
.format(prediction_type, prediction_type, prediction_type, prediction_type)
break
else: # Class
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if not i:
assert line[:-1] == 'SampleId\tClass'
else:
assert float(line[:-1].split()[1]) in labels
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])
def test_save_and_apply_multiclass_labels_from_classes_count(loss_function, prediction_type):
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, [0, 1, 2, 3], prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', loss_function,
'--classes-count', '4',
'-f', train_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', fit_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [1, 2]
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 4
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', prediction_type
)
yatest.common.execute(calc_cmd)
if prediction_type == 'RawFormulaVal':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'SampleId\t{}:Class=0\t{}:Class=1\t{}:Class=2\t{}:Class=3' \
.format(prediction_type, prediction_type, prediction_type, prediction_type)
else:
assert float(line[:-1].split()[1]) == float('-inf') and float(line[:-1].split()[4]) == float('-inf') # fictitious approxes must be negative infinity
if prediction_type == 'Probability':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'SampleId\t{}:Class=0\t{}:Class=1\t{}:Class=2\t{}:Class=3' \
.format(prediction_type, prediction_type, prediction_type, prediction_type)
else:
assert (abs(float(line[:-1].split()[1])) < 1e-307
and abs(float(line[:-1].split()[4])) < 1e-307) # fictitious probabilities must be virtually zero
if prediction_type == 'Class':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'SampleId\tClass'
else:
assert float(line[:-1].split()[1]) in [1, 2] # probability of 0,3 classes appearance must be zero
return [local_canonical_file(eval_path)]
def test_set_class_names_implicitly():
INPUT_CLASS_LABELS = ['a', 'bc', '7.', '8.0', '19.2']
SAVED_CLASS_LABELS = ['19.2', '7.', '8.0', 'a', 'bc']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', 'MultiClass',
'-f', train_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', 'RawFormulaVal,Class',
)
execute_catboost_fit('CPU', fit_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3, 4]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == SAVED_CLASS_LABELS
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
yatest.common.execute(calc_cmd)
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if not i:
assert line[:-1] == 'SampleId\t{}:Class=19.2\t{}:Class=7.\t{}:Class=8.0\t{}:Class=a\t{}:Class=bc\tClass' \
.format(*(['RawFormulaVal'] * 5))
else:
label = line[:-1].split()[-1]
assert label in SAVED_CLASS_LABELS
return [local_canonical_file(eval_path)]
CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH = data_file('', 'multiclass_model.bin')
@pytest.mark.parametrize('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])
def test_multiclass_model_backward_compatibility(prediction_type):
model = catboost.CatBoost()
model.load_model(CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH)
assert 'class_params' not in model.get_metadata()
pool = catboost.Pool(data_file('cloudness_small', 'train_small'),
column_description=data_file('cloudness_small', 'train.cd'))
model.predict(data=pool, prediction_type='Class')
model.eval_metrics(data=pool, metrics=['Accuracy'])
output_path = yatest.common.test_output_path('out.txt')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('cloudness_small', 'train_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'-m', CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH,
'--prediction-type', prediction_type,
'--output-path', output_path,
)
yatest.common.execute(calc_cmd)
return [local_canonical_file(output_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('use_best_model', ['true', 'false'])
def test_learning_rate_auto_set(boosting_type, use_best_model):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', use_best_model,
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--od-type', 'Iter',
'--od-wait', '2',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_paths_with_dsv_scheme():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', 'dsv://' + data_file('querywise', 'train'),
'-t', 'dsv://' + data_file('querywise', 'test'),
'--column-description', 'dsv://' + data_file('querywise', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_skip_train():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
json_log_path = yatest.common.test_output_path('json_log.json')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'-i', '20',
'-T', '4',
'--custom-metric', 'AverageGain:top=2;hints=skip_train~true',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--json-log', json_log_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(remove_time_from_json(json_log_path))]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_group_weight(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
def run_catboost(train_path, test_path, cd_path, eval_path):
cmd = (
'--loss-function', 'YetiRank',
'-f', data_file('querywise', train_path),
'-t', data_file('querywise', test_path),
'--column-description', data_file('querywise', cd_path),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd)
output_eval_path_first = yatest.common.test_output_path('test_first.eval')
output_eval_path_second = yatest.common.test_output_path('test_second.eval')
run_catboost('train', 'test', 'train.cd', output_eval_path_first)
run_catboost('train.const_group_weight', 'test.const_group_weight', 'train.cd.group_weight', output_eval_path_second)
assert filecmp.cmp(output_eval_path_first, output_eval_path_second)
run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('loss_function', ['QueryRMSE', 'RMSE'])
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_group_weight_and_object_weight(boosting_type, grow_policy, loss_function, dev_score_calc_obj_block_size):
def run_catboost(train_path, test_path, cd_path, eval_path):
cmd = (
'--loss-function', loss_function,
'-f', data_file('querywise', train_path),
'-t', data_file('querywise', test_path),
'--column-description', data_file('querywise', cd_path),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd)
output_eval_path_first = yatest.common.test_output_path('test_first.eval')
output_eval_path_second = yatest.common.test_output_path('test_second.eval')
run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path_first)
run_catboost('train', 'test', 'train.cd.weight', output_eval_path_second)
assert filecmp.cmp(output_eval_path_first, output_eval_path_second)
def test_snapshot_without_random_seed():
def run_catboost(iters, eval_path, additional_params=None):
cmd = [
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', str(iters),
'-T', '4',
'--use-best-model', 'False',
'--eval-file', eval_path,
]
if additional_params:
cmd += additional_params
tmpfile = 'test_data_dumps'
with open(tmpfile, 'w') as f:
execute_catboost_fit('CPU', cmd, stdout=f)
with open(tmpfile, 'r') as output:
line_count = sum(1 for line in output)
return line_count
model_path = yatest.common.test_output_path('model.bin')
eval_path = yatest.common.test_output_path('test.eval')
progress_path = yatest.common.test_output_path('test.cbp')
additional_params = ['--snapshot-file', progress_path, '-m', model_path]
first_line_count = run_catboost(15, eval_path, additional_params=additional_params)
second_line_count = run_catboost(30, eval_path, additional_params=additional_params)
third_line_count = run_catboost(45, eval_path, additional_params=additional_params)
assert first_line_count == second_line_count == third_line_count
canon_eval_path = yatest.common.test_output_path('canon_test.eval')
cb_model = catboost.CatBoost()
cb_model.load_model(model_path)
random_seed = cb_model.random_seed_
run_catboost(45, canon_eval_path, additional_params=['-r', str(random_seed)])
assert filecmp.cmp(canon_eval_path, eval_path)
def test_snapshot_with_interval():
def run_with_timeout(cmd, timeout):
try:
execute_catboost_fit('CPU', cmd, timeout=timeout)
except ExecutionTimeoutError:
return True
return False
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-T', '4',
]
measure_time_iters = 100
exec_time = timeit.timeit(lambda: execute_catboost_fit('CPU', cmd + ['-i', str(measure_time_iters)]), number=1)
SNAPSHOT_INTERVAL = 1
TIMEOUT = 5
TOTAL_TIME = 25
iters = int(TOTAL_TIME / (exec_time / measure_time_iters))
canon_eval_path = yatest.common.test_output_path('canon_test.eval')
canon_params = cmd + ['--eval-file', canon_eval_path, '-i', str(iters)]
execute_catboost_fit('CPU', canon_params)
eval_path = yatest.common.test_output_path('test.eval')
progress_path = yatest.common.test_output_path('test.cbp')
model_path = yatest.common.test_output_path('model.bin')
params = cmd + ['--snapshot-file', progress_path,
'--snapshot-interval', str(SNAPSHOT_INTERVAL),
'-m', model_path,
'--eval-file', eval_path,
'-i', str(iters)]
was_timeout = False
while run_with_timeout(params, TIMEOUT):
was_timeout = True
assert was_timeout
assert filecmp.cmp(canon_eval_path, eval_path)
def test_snapshot_with_different_params():
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-T', '4',
'-i', '10',
'--snapshot-file', 'snapshot.cbp'
]
cmd_1 = cmd + ['--eval-metric', 'Logloss']
cmd_2 = cmd + ['--eval-metric', 'Accuracy']
execute_catboost_fit('CPU', cmd_1)
try:
execute_catboost_fit('CPU', cmd_2)
except ExecutionError:
return
assert False
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('leaf_estimation_method', LEAF_ESTIMATION_METHOD)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_querysoftmax(boosting_type, grow_policy, leaf_estimation_method, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QuerySoftMax',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--leaf-estimation-method', leaf_estimation_method,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_shap_verbose():
output_model_path = yatest.common.test_output_path('model.bin')
output_values_path = yatest.common.test_output_path('shapval')
output_log = yatest.common.test_output_path('log')
cmd_fit = [
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '250',
'-T', '4',
'-m', output_model_path,
]
execute_catboost_fit('CPU', cmd_fit)
cmd_shap = [
CATBOOST_PATH,
'fstr',
'-o', output_values_path,
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--verbose', '12',
'--fstr-type', 'ShapValues',
'-T', '4',
'-m', output_model_path,
]
with open(output_log, 'w') as log:
yatest.common.execute(cmd_shap, stdout=log)
with open(output_log, 'r') as log:
line_count = sum(1 for line in log)
assert line_count == 5
def test_shap_approximate():
output_model_path = yatest.common.test_output_path('model.bin')
output_values_path = yatest.common.test_output_path('shapval')
cmd_fit = [
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '250',
'-T', '4',
'-m', output_model_path,
]
execute_catboost_fit('CPU', cmd_fit)
cmd_shap = [
CATBOOST_PATH,
'fstr',
'-o', output_values_path,
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--verbose', '0',
'--fstr-type', 'ShapValues',
'--shap-calc-type', 'Approximate',
'-T', '4',
'-m', output_model_path,
]
yatest.common.execute(cmd_shap)
return [local_canonical_file(output_values_path)]
def test_shap_exact():
output_model_path = yatest.common.test_output_path('model.bin')
output_values_path = yatest.common.test_output_path('shapval')
cmd_fit = [
CATBOOST_PATH,
'fit',
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '250',
'-T', '4',
'-m', output_model_path,
]
yatest.common.execute(cmd_fit)
cmd_shap = [
CATBOOST_PATH,
'fstr',
'-o', output_values_path,
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--verbose', '0',
'--fstr-type', 'ShapValues',
'--shap-calc-type', 'Exact',
'-T', '4',
'-m', output_model_path,
]
yatest.common.execute(cmd_shap)
return [local_canonical_file(output_values_path)]
@pytest.mark.parametrize('bagging_temperature', ['0', '1'])
@pytest.mark.parametrize('sampling_unit', SAMPLING_UNIT_TYPES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_querywise_bayesian_bootstrap(bagging_temperature, sampling_unit, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--bootstrap-type', 'Bayesian',
'--sampling-unit', sampling_unit,
'--bagging-temperature', bagging_temperature,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('subsample', ['0.5', '1'])
@pytest.mark.parametrize('sampling_unit', SAMPLING_UNIT_TYPES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_querywise_bernoulli_bootstrap(subsample, sampling_unit, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--bootstrap-type', 'Bernoulli',
'--sampling-unit', sampling_unit,
'--subsample', subsample,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING = ['YetiRankPairwise', 'PairLogitPairwise']
@pytest.mark.parametrize('bagging_temperature', ['0', '1'])
@pytest.mark.parametrize('sampling_unit', SAMPLING_UNIT_TYPES)
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_pairwise_bayesian_bootstrap(bagging_temperature, sampling_unit, loss_function, dev_score_calc_obj_block_size):
if loss_function == 'YetiRankPairwise' and sampling_unit == 'Group' and bagging_temperature == '1':
return pytest.xfail(reason='MLTOOLS-1801')
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', loss_function,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--bootstrap-type', 'Bayesian',
'--sampling-unit', sampling_unit,
'--bagging-temperature', bagging_temperature,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('subsample', ['0.5', '1'])
@pytest.mark.parametrize('sampling_unit', SAMPLING_UNIT_TYPES)
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_pairwise_bernoulli_bootstrap(subsample, sampling_unit, loss_function, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', loss_function,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--bootstrap-type', 'Bernoulli',
'--sampling-unit', sampling_unit,
'--subsample', subsample,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd, env=dict(MKL_CBWR='SSE4_2'))
eps = 0 if yatest.common.context.sanitize is None else 0.1
return [local_canonical_file(output_eval_path, diff_tool=diff_tool(eps))]
@pytest.mark.parametrize('loss_function', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'QueryRMSE'])
@pytest.mark.parametrize('metric', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'AUC', 'PFound'])
def test_bad_metrics_combination(loss_function, metric):
BAD_PAIRS = {
'Logloss': ['RMSE', 'MultiClass'],
'RMSE': ['Logloss', 'MultiClass'],
'MultiClass': ['Logloss', 'RMSE', 'QuerySoftMax', 'PFound'],
'QuerySoftMax': ['RMSE', 'MultiClass', 'QueryRMSE'],
'QueryRMSE': ['Logloss', 'MultiClass', 'QuerySoftMax'],
'YetiRank': ['Logloss', 'RMSE', 'MultiClass']
}
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'QueryId']], fmt='%s', delimiter='\t')
data = np.array([[0, 1, 0, 1, 0], [0, 0, 1, 1, 2], [1, 2, 3, 4, 5]]).T
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, data, fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, data, fmt='%s', delimiter='\t')
cmd = (
'--loss-function', loss_function,
'--custom-metric', metric,
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '4',
'-T', '4',
)
try:
execute_catboost_fit('CPU', cmd)
except Exception:
assert metric in BAD_PAIRS[loss_function]
return
assert metric not in BAD_PAIRS[loss_function]
@pytest.mark.parametrize('metric', [('good', ',AUC,'), ('bad', ',')])
def test_extra_commas(metric):
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-w', '0.03',
'-i', '10',
'-T', '4',
'--custom-metric', metric[1]
)
if metric[0] == 'good':
execute_catboost_fit('CPU', cmd)
if metric[0] == 'bad':
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def execute_fit_for_test_quantized_pool(loss_function, pool_path, test_path, cd_path, eval_path,
border_count=128, other_options=()):
model_path = yatest.common.test_output_path('model.bin')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', pool_path,
'-t', test_path,
'--cd', cd_path,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-x', str(border_count),
'--feature-border-type', 'GreedyLogSum',
'-m', model_path,
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd + other_options)
def test_quantized_pool():
test_path = data_file('higgs', 'test_small')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='Logloss',
pool_path=data_file('higgs', 'train_small'),
test_path=test_path,
cd_path=data_file('higgs', 'train.cd'),
eval_path=tsv_eval_path
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='Logloss',
pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),
test_path=test_path,
cd_path=data_file('higgs', 'train.cd'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_ignored_features():
test_path = data_file('higgs', 'test_small')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='Logloss',
pool_path=data_file('higgs', 'train_small'),
test_path=test_path,
cd_path=data_file('higgs', 'train.cd'),
eval_path=tsv_eval_path,
other_options=('-I', '5',)
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='Logloss',
pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),
test_path=test_path,
cd_path=data_file('higgs', 'train.cd'),
eval_path=quantized_eval_path,
other_options=('-I', '5',)
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_groupid():
test_path = data_file('querywise', 'test')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path=data_file('querywise', 'train'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=tsv_eval_path
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_ignored_during_quantization():
test_path = data_file('querywise', 'test')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path=data_file('querywise', 'train'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=tsv_eval_path,
other_options=('-I', '18-36',)
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa_ignore_18_36.bin'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_quantized_test():
test_path = data_file('querywise', 'test')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path=data_file('querywise', 'train'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=tsv_eval_path
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),
test_path='quantized://' + data_file('querywise', 'test_borders_from_train_aqtaa.bin'),
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_with_large_grid():
test_path = data_file('querywise', 'test')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path=data_file('querywise', 'train'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=tsv_eval_path,
border_count=1024
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path='quantized://' + data_file('querywise', 'train.quantized_x1024'),
test_path='quantized://' + data_file('querywise', 'test.quantized_x1024'),
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_learn_without_header_eval_with_header():
train_path = yatest.common.test_output_path('airlines_without_header')
with open(data_file('airlines_5K', 'train'), 'r') as with_header_file:
with open(train_path, 'w') as without_header_file:
without_header_file.writelines(with_header_file.readlines()[1:])
model_path = yatest.common.test_output_path('model.bin')
cmd_fit = (
'--loss-function', 'Logloss',
'-f', train_path,
'--cd', data_file('airlines_5K', 'cd'),
'-i', '10',
'-m', model_path
)
execute_catboost_fit('CPU', cmd_fit)
cmd_calc = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('airlines_5K', 'test'),
'--cd', data_file('airlines_5K', 'cd'),
'-m', model_path,
'--has-header'
)
yatest.common.execute(cmd_calc)
def test_group_weights_file():
first_eval_path = yatest.common.test_output_path('first.eval')
second_eval_path = yatest.common.test_output_path('second.eval')
def run_catboost(eval_path, cd_file, is_additional_query_weights):
cmd = [
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', cd_file),
'-i', '5',
'-T', '4',
'--eval-file', eval_path,
]
if is_additional_query_weights:
cmd += [
'--learn-group-weights', data_file('querywise', 'train.group_weights'),
'--test-group-weights', data_file('querywise', 'test.group_weights'),
]
execute_catboost_fit('CPU', cmd)
run_catboost(first_eval_path, 'train.cd', True)
run_catboost(second_eval_path, 'train.cd.group_weight', False)
assert filecmp.cmp(first_eval_path, second_eval_path)
return [local_canonical_file(first_eval_path)]
def test_group_weights_file_quantized():
first_eval_path = yatest.common.test_output_path('first.eval')
second_eval_path = yatest.common.test_output_path('second.eval')
def run_catboost(eval_path, train, test, is_additional_query_weights):
cmd = [
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', 'quantized://' + data_file('querywise', train),
'-t', 'quantized://' + data_file('querywise', test),
'-i', '5',
'-T', '4',
'--eval-file', eval_path,
]
if is_additional_query_weights:
cmd += [
'--learn-group-weights', data_file('querywise', 'train.group_weights'),
'--test-group-weights', data_file('querywise', 'test.group_weights'),
]
execute_catboost_fit('CPU', cmd)
run_catboost(first_eval_path, 'train.quantized', 'test.quantized', True)
run_catboost(second_eval_path, 'train.quantized.group_weight', 'test.quantized.group_weight', False)
assert filecmp.cmp(first_eval_path, second_eval_path)
return [local_canonical_file(first_eval_path)]
def test_mode_roc():
eval_path = yatest.common.test_output_path('eval.tsv')
output_roc_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'--counter-calc-method', 'SkipTest',
'--eval-file', eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
roc_cmd = (
CATBOOST_PATH,
'roc',
'--eval-file', eval_path,
'--output-path', output_roc_path
)
yatest.common.execute(roc_cmd)
return local_canonical_file(output_roc_path)
@pytest.mark.parametrize('pool', ['adult', 'higgs', 'adult_nan'])
def test_convert_model_to_json(pool):
output_model_path = yatest.common.test_output_path('model')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'-f', data_file(pool, 'train_small'),
'-t', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-i', '20',
'-T', '4',
'--eval-file', output_eval_path,
'-m', output_model_path,
'--nan-mode', 'Max' if pool == 'adult_nan' else 'Forbidden',
'--model-format', 'CatboostBinary,Json'
)
execute_catboost_fit('CPU', cmd)
formula_predict_path_bin = yatest.common.test_output_path('predict_test_bin.eval')
formula_predict_path_json = yatest.common.test_output_path('predict_test_json.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-m', output_model_path + '.json',
'--model-format', 'Json',
'--output-path', formula_predict_path_json
)
yatest.common.execute(calc_cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-m', output_model_path + '.bin',
'--output-path', formula_predict_path_bin
)
yatest.common.execute(calc_cmd)
assert (compare_evals_with_precision(output_eval_path, formula_predict_path_bin))
assert (compare_evals_with_precision(output_eval_path, formula_predict_path_json))
LOSS_FUNCTIONS_NO_MAPE = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson']
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS_NO_MAPE)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantized_adult_pool(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
quantized_train_file = 'quantized://' + data_file('quantized_adult', 'train.qbin')
quantized_test_file = 'quantized://' + data_file('quantized_adult', 'test.qbin')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', quantized_train_file,
'-t', quantized_test_file,
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', cmd)
cd_file = data_file('quantized_adult', 'pool.cd')
test_file = data_file('quantized_adult', 'test_small.tsv')
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantized_with_one_thread(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
quantized_train_file = 'quantized://' + data_file('querywise', 'train.quantized')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', quantized_train_file,
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '1',
'-m', output_model_path,
'--target-border', '0.5',
)
print(cmd)
execute_catboost_fit('CPU', cmd)
def test_eval_result_on_different_pool_type():
output_eval_path = yatest.common.test_output_path('test.eval')
output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')
def run_catboost(train, test, eval_path):
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--border-count', '128',
'-f', train,
'-t', test,
'--cd', data_file('querywise', 'train.cd'),
'-i', '10',
'-T', '4',
'--target-border', '0.5',
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd)
def get_pool_path(set_name, is_quantized=False):
path = data_file('querywise', set_name)
return 'quantized://' + path + '.quantized' if is_quantized else path
run_catboost(get_pool_path('train'), get_pool_path('test'), output_eval_path)
run_catboost(get_pool_path('train', True), get_pool_path('test', True), output_quantized_eval_path)
assert filecmp.cmp(output_eval_path, output_quantized_eval_path)
return [local_canonical_file(output_eval_path)]
def test_apply_on_different_pool_type():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')
def get_pool_path(set_name, is_quantized=False):
path = data_file('querywise', set_name)
return 'quantized://' + path + '.quantized' if is_quantized else path
cd_file = data_file('querywise', 'train.cd')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--learn-set', get_pool_path('train', True),
'--test-set', get_pool_path('test', True),
'--column-description', cd_file,
'-i', '10',
'-T', '4',
'--target-border', '0.5',
'--model-file', output_model_path,
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH, 'calc',
'--input-path', get_pool_path('test'),
'--column-description', cd_file,
'--model-file', output_model_path,
'--output-path', output_eval_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(cmd)
cmd = (
CATBOOST_PATH, 'calc',
'--input-path', get_pool_path('test', True),
'--model-file', output_model_path,
'--output-path', output_quantized_eval_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(cmd)
assert filecmp.cmp(output_eval_path, output_quantized_eval_path)
def test_apply_output_column_by_idx():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn = data_file('black_friday', 'train')
test = data_file('black_friday', 'test')
cd = data_file('black_friday', 'cd')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'--learn-set', learn,
'--test-set', test,
'--column-description', cd,
'-i', '10',
'-T', '4',
'--model-file', output_model_path,
'--has-header'
)
execute_catboost_fit('CPU', cmd)
column_names = [
'Gender',
'Age',
'Occupation',
'City_Category',
'Stay_In_Current_City_Years',
'Marital_Status',
'Product_Category_1',
'Product_Category_2',
'Product_Category_3',
]
output_columns = ['#{}:{}'.format(idx, name) for idx, name in enumerate(column_names)]
output_columns = ['RawFormulaVal'] + ['GroupId', 'SampleId'] + output_columns + ['Label']
output_columns = ','.join(output_columns)
cmd = (
CATBOOST_PATH, 'calc',
'--input-path', test,
'--column-description', cd,
'--model-file', output_model_path,
'--output-path', output_eval_path,
'--output-columns', output_columns,
'--has-header'
)
yatest.common.execute(cmd)
with open(output_eval_path, 'r') as f:
f.readline()
eval_lines = f.readlines()
with open(test, 'r') as f:
f.readline()
test_lines = f.readlines()
assert len(eval_lines) == len(test_lines)
for i in range(len(eval_lines)):
eval_line = eval_lines[i].split('\t')[1:] # skip RawFormulaVal
test_line = test_lines[i].split('\t')
for eval_column, test_column in zip(eval_line, test_line):
assert eval_column == test_column
@pytest.mark.parametrize(
'dataset_name,loss_function,has_pairs,has_group_weights',
[
('adult_small_broken_features', 'Logloss', False, False),
('querywise_broken_pairs', 'RMSE', True, False),
('querywise_broken_group_weights', 'RMSE', False, True),
]
)
def test_broken_dsv_format(dataset_name, loss_function, has_pairs, has_group_weights):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
# iterations and threads are specified just to finish fast if test is xpass
cmd = (
'--loss-function', loss_function,
'--learn-set', data_file('broken_format', dataset_name, 'train'),
'--test-set', data_file('broken_format', dataset_name, 'test'),
'--column-description', data_file('broken_format', dataset_name, 'train.cd'),
'-i', '1',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
if has_pairs:
cmd += (
'--learn-pairs', data_file('broken_format', dataset_name, 'train.pairs'),
'--test-pairs', data_file('broken_format', dataset_name, 'test.pairs'),
)
if has_group_weights:
cmd += (
'--learn-group-weights', data_file('broken_format', dataset_name, 'train.group_weights'),
'--test-group-weights', data_file('broken_format', dataset_name, 'test.group_weights'),
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.use_fixtures('compressed_data')
@pytest.mark.parametrize(
'loss_function,eval_metric,boosting_type',
[
('QueryRMSE', 'NDCG', 'Plain'),
('QueryRMSE', 'NDCG', 'Ordered'),
# Boosting type 'Ordered' is not supported for YetiRankPairwise and PairLogitPairwise
('YetiRankPairwise', 'NDCG', 'Plain'),
('PairLogit:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),
('PairLogitPairwise:max_pairs=30', 'NDCG', 'Plain'),
('PairLogitPairwise:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),
],
ids=[
'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Plain',
'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Ordered',
'loss_function=YetiRankPairwise,eval_metric=NDCG,boosting_type=Plain',
'loss_function=PairLogit:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain',
'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=NDCG,boosting_type=Plain',
'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain'
]
)
def test_groupwise_with_cat_features(compressed_data, loss_function, eval_metric, boosting_type):
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', loss_function,
'-f', os.path.join(compressed_data.name, 'mslr_web1k', 'train'),
'-t', os.path.join(compressed_data.name, 'mslr_web1k', 'test'),
'--column-description', os.path.join(compressed_data.name, 'mslr_web1k', 'cd.with_cat_features'),
'--boosting-type', boosting_type,
'-i', '100',
'-T', '8',
'--eval-metric', eval_metric,
'--metric-period', '100',
'--use-best-model', 'false',
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(test_error_path, diff_tool=diff_tool(1e-5))]
def test_gradient_walker():
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '20',
'-T', '4',
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--boosting-type', 'Ordered',
'--max-ctr-complexity', '4',
'--leaf-estimation-iterations', '10',
'--leaf-estimation-backtracking', 'AnyImprovement',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
# training with pairwise scoring with categorical features on CPU does not yet support one-hot features
# so they are disabled by default, explicit non-default specification should be an error
@pytest.mark.parametrize(
'loss_function', ['YetiRankPairwise', 'PairLogitPairwise'],
ids=['loss_function=YetiRankPairwise', 'loss_function=PairLogitPairwise']
)
def test_groupwise_with_bad_one_hot_max_size(loss_function):
cmd = (
'--loss-function', loss_function,
'--has-header',
'-f', data_file('black_friday', 'train'),
'-t', data_file('black_friday', 'test'),
'--column-description', data_file('black_friday', 'cd'),
'--boosting-type', 'Plain',
'-i', '10',
'-T', '4',
'--eval-metric', 'NDCG',
'--one_hot_max_size', '10'
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def test_load_quantized_pool_with_double_baseline():
# Dataset with 3 random columns, first column is Target, seconds columns is Num, third column
# is Baseline.
#
# There are only 10 rows in dataset.
cmd = (
'-f', 'quantized://' + data_file('quantized_with_baseline', 'dataset.qbin'),
'-i', '10')
execute_catboost_fit('CPU', cmd)
def test_write_predictions_to_streams():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
calc_output_eval_path_redirected = yatest.common.test_output_path('calc_test.eval')
cmd = (
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--eval-file', output_eval_path,
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-m', output_model_path
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'--output-path', 'stream://stdout',
)
with open(calc_output_eval_path_redirected, 'w') as catboost_stdout:
yatest.common.execute(calc_cmd, stdout=catboost_stdout)
assert compare_evals(output_eval_path, calc_output_eval_path_redirected)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'--output-path', 'stream://stderr'
)
with open(calc_output_eval_path_redirected, 'w') as catboost_stderr:
yatest.common.execute(calc_cmd, stderr=catboost_stderr)
assert compare_evals(output_eval_path, calc_output_eval_path_redirected)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_mvs_bootstrap(boosting_type):
def run_catboost(eval_path, mvs_sample_rate):
cmd = [
'--use-best-model', 'false',
'--allow-writing-files', 'false',
'--loss-function', 'Logloss',
'--max-ctr-complexity', '5',
'-f', data_file('airlines_5K', 'train'),
'-t', data_file('airlines_5K', 'test'),
'--column-description', data_file('airlines_5K', 'cd'),
'--has-header',
'--boosting-type', boosting_type,
'--bootstrap-type', 'MVS',
'--subsample', mvs_sample_rate,
'-i', '50',
'-w', '0.03',
'-T', '6',
'-r', '0',
'--leaf-estimation-iterations', '10',
'--eval-file', eval_path,
]
execute_catboost_fit('CPU', cmd)
ref_eval_path = yatest.common.test_output_path('test.eval')
run_catboost(ref_eval_path, '0.5')
for sample_rate in ('0.1', '0.9'):
eval_path = yatest.common.test_output_path('test_{}.eval'.format(sample_rate))
run_catboost(eval_path, sample_rate)
assert (filecmp.cmp(ref_eval_path, eval_path) is False)
return [local_canonical_file(ref_eval_path)]
def test_simple_ctr():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
simple_ctr = ','.join((
'Borders:TargetBorderCount=15',
'Buckets:TargetBorderCount=15',
'Borders:TargetBorderType=MinEntropy',
'Counter:CtrBorderCount=20',
))
execute_catboost_fit('CPU', (
'--loss-function', 'RMSE',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--simple-ctr', simple_ctr,
))
return [local_canonical_file(output_eval_path)]
def test_output_options():
output_options_path = 'training_options.json'
train_dir = 'catboost_info'
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'--train-dir', train_dir,
'--training-options-file', output_options_path,
)
execute_catboost_fit('CPU', cmd)
return local_canonical_file(os.path.join(train_dir, output_options_path))
def test_target_border():
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'-i', '20',
'-T', '4',
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--target-border', '0.3'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_monotonic_constraint():
train_pool = catboost.Pool(
data_file('higgs', 'train_small'),
column_description=data_file('higgs', 'train.cd')
)
test_pool = catboost.Pool(
data_file('higgs', 'test_small'),
column_description=data_file('higgs', 'train.cd')
)
monotone_constraints = [0, 0, 1, -1, 0, 0, 1, 0, -1, 1, 1, -1, 0, 1, 0, 0, -1, 1, 1, -1, 0, 0, 0, 0, 0, -1, 0, -1]
model = catboost.CatBoostRegressor(
n_estimators=100,
learning_rate=0.2,
monotone_constraints=monotone_constraints,
verbose=False
).fit(train_pool, eval_set=test_pool)
dummy_data = np.zeros((1, test_pool.num_col()))
dummy_target = np.zeros(len(dummy_data))
feature_stats = model.calc_feature_statistics(dummy_data, dummy_target, plot=False)
for feature_index, feature_name in enumerate(model.feature_names_):
monotonicity = monotone_constraints[feature_index]
if monotonicity == 0:
continue
feature_borders = feature_stats[feature_name]['borders']
if len(feature_borders) == 0:
continue
mid_values = (feature_borders[:-1] + feature_borders[1:]) / 2
min_value = feature_borders[0] - 1
max_value = feature_borders[-1] + 1
feature_values = np.array([min_value] + list(mid_values) + [max_value])
for obj in test_pool.get_features():
obj_variations = np.zeros((len(feature_values), test_pool.num_col()))
obj_variations[:] = obj.reshape((1, -1))
obj_variations[:, feature_index] = feature_values
model_predicts = model.predict(obj_variations)
prediction_deltas = model_predicts[1:] - model_predicts[:-1]
assert np.all(prediction_deltas * monotonicity >= 0)
def test_different_formats_of_monotone_constraints():
eval_path = yatest.common.test_output_path('eval.tsv')
eval_path_with_monotone1 = yatest.common.test_output_path('eval_monotone1.tsv')
eval_path_with_monotone2 = yatest.common.test_output_path('eval_monotone2.tsv')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train_with_id.cd'),
'-i', '20'
]
execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path])
execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone1, '--monotone-constraints', '(0,0,0,1,0,-1)'])
assert not filecmp.cmp(eval_path_with_monotone1, eval_path)
for constraints in ['3:1,5:-1', 'F0:1,F1:-1']:
execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--monotone-constraints', constraints])
assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)
params_file = yatest.common.test_output_path("params.json")
for constraints in ['3:1,5:-1', 'F0:1,F1:-1', [0, 0, 0, 1, 0, -1], {3: 1, 5: -1}, {'F0': 1, 'F1': -1}]:
json.dump({'monotone_constraints': constraints}, open(params_file, 'w'))
execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--params-file', params_file])
assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)
class TestModelWithoutParams(object):
@pytest.fixture(
params=[
('cut-info', 'RMSE'),
('cut-params', 'RMSE'),
('cut-info', 'QueryRMSE'),
('cut-params', 'QueryRMSE'),
],
ids=lambda param: '-'.join(param),
)
def model_etc(self, request):
cut, loss = request.param
model_json = yatest.common.test_output_path('model.json')
learn_set = data_file('querywise', 'train')
test_set = data_file('querywise', 'test')
cd = data_file('querywise', 'train.cd')
cmd = (
'--loss-function', loss,
'--learn-set', learn_set,
'--test-set', test_set,
'--column-description', cd,
'--iterations', '10',
'--model-file', model_json,
'--model-format', 'Json',
'--use-best-model', 'false'
)
execute_catboost_fit('CPU', cmd)
model = json.load(open(model_json))
if cut == 'cut-info':
model.pop('model_info')
if cut == 'cut-params':
model['model_info'].pop('params')
json.dump(model, open(model_json, 'wt'))
return model_json, learn_set, test_set, cd
def test_ostr(self, model_etc):
model_json, train_set, test_set, cd = model_etc
ostr_result = yatest.common.test_output_path('result.txt')
ostr_cmd = (
CATBOOST_PATH, 'ostr',
'--learn-set', train_set,
'--test-set', test_set,
'--column-description', cd,
'--model-file', model_json,
'--model-format', 'Json',
'--output-path', ostr_result,
)
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(ostr_cmd)
@pytest.mark.parametrize('should_fail,fstr_type', [
(False, 'FeatureImportance'),
(False, 'PredictionValuesChange'),
(True, 'LossFunctionChange'),
(False, 'ShapValues'),
])
def test_fstr(self, model_etc, fstr_type, should_fail):
model_json, train_set, _, cd = model_etc
fstr_result = yatest.common.test_output_path('result.txt')
fstr_cmd = (
CATBOOST_PATH, 'fstr',
'--input-path', train_set,
'--column-description', cd,
'--model-file', model_json,
'--model-format', 'Json',
'--output-path', fstr_result,
'--fstr-type', fstr_type,
)
if should_fail:
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(fstr_cmd)
else:
yatest.common.execute(fstr_cmd)
def test_equal_feature_names():
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', (
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd.equal_names'),
))
def enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=False):
if eval_mode == 'OneVsOthers':
baseline = 'Baseline_set_{set_idx}_fold_{fold_idx}'
else:
baseline = 'Baseline_fold_{fold_idx}'
if not only_baseline:
testing = 'Testing_set_{set_idx}_fold_{fold_idx}'
dirs = []
for set_idx in range(set_count):
for fold_idx in range(offset, offset + fold_count):
fold = baseline.format(fold_idx=fold_idx, set_idx=set_idx)
if fold not in dirs:
dirs += [fold]
if not only_baseline:
fold = testing.format(fold_idx=fold_idx, set_idx=set_idx)
dirs += [fold]
return dirs
@pytest.mark.parametrize('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])
@pytest.mark.parametrize('features_to_eval', ['0-6', '0-6;7-13'], ids=['one_set', 'two_sets'])
@pytest.mark.parametrize('offset', [0, 2])
def test_eval_feature(eval_mode, features_to_eval, offset):
output_eval_path = yatest.common.test_output_path('feature.eval')
test_err_log = 'test_error.log'
fstr_file = 'fstrs'
train_dir = yatest.common.test_output_path('')
fold_count = 2
cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('higgs', 'train_small'),
'--cd', data_file('higgs', 'train.cd'),
'--features-to-evaluate', features_to_eval,
'--feature-eval-mode', eval_mode,
'-i', '30',
'-T', '4',
'-w', '0.7',
'--feature-eval-output-file', output_eval_path,
'--offset', str(offset),
'--fold-count', str(fold_count),
'--fold-size-unit', 'Object',
'--fold-size', '20',
'--test-err-log', test_err_log,
'--train-dir', train_dir,
'--fstr-file', fstr_file,
)
yatest.common.execute(cmd)
pj = os.path.join
set_count = len(features_to_eval.split(';'))
artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):
artifacts += [
local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),
local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),
]
return artifacts
@pytest.mark.parametrize('offset', [0, 2])
def test_eval_feature_empty_feature_set(offset):
output_eval_path = yatest.common.test_output_path('feature.eval')
test_err_log = 'test_error.log'
fstr_file = 'fstrs'
train_dir = yatest.common.test_output_path('')
fold_count = 2
eval_mode = 'OneVsNone'
cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('higgs', 'train_small'),
'--cd', data_file('higgs', 'train.cd'),
'--feature-eval-mode', eval_mode,
'-i', '30',
'-T', '4',
'-w', '0.7',
'--feature-eval-output-file', output_eval_path,
'--offset', str(offset),
'--fold-count', str(fold_count),
'--fold-size-unit', 'Object',
'--fold-size', '20',
'--test-err-log', test_err_log,
'--train-dir', train_dir,
'--fstr-file', fstr_file,
)
yatest.common.execute(cmd)
pj = os.path.join
set_count = 1
artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=True):
artifacts += [
local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),
local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),
]
return artifacts
@pytest.mark.parametrize('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])
@pytest.mark.parametrize('fold_size_unit', ['Object', 'Group'])
def test_eval_feature_timesplit(eval_mode, fold_size_unit):
output_eval_path = yatest.common.test_output_path('feature.eval')
test_err_log = 'test_error.log'
fstr_file = 'fstrs'
train_dir = yatest.common.test_output_path('')
fold_count = 2
features_to_eval = '2-5;10-15'
offset = 2
fold_size = 500
cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'--features-to-evaluate', features_to_eval,
'--feature-eval-mode', eval_mode,
'-i', '30',
'-T', '4',
'-w', '0.7',
'--feature-eval-output-file', output_eval_path,
'--offset', str(offset),
'--fold-count', str(fold_count),
'--fold-size-unit', fold_size_unit,
'--fold-size', str(fold_size),
'--test-err-log', test_err_log,
'--train-dir', train_dir,
'--fstr-file', fstr_file,
'--learn-timestamps', data_file('querywise', 'train.timestamps'),
'--timesplit-quantile', '0.75'
)
yatest.common.execute(cmd)
pj = os.path.join
set_count = len(features_to_eval.split(';'))
artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):
artifacts += [
local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),
local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),
]
return artifacts
@pytest.mark.parametrize('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])
@pytest.mark.parametrize('features_to_eval', ['2-5', '2-5;10-15'], ids=['one_set', 'two_sets'])
@pytest.mark.parametrize('offset', [0, 2])
@pytest.mark.parametrize('fstr_mode', ['fstr', 'model'])
def test_eval_feature_snapshot(eval_mode, features_to_eval, offset, fstr_mode):
test_err_log = 'test_error.log'
fstr_file = 'fstrs'
model_file = 'model.bin'
fold_count = 2
snapshot_interval = 1
def make_cmd(summary, train_dir):
cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '200',
'-T', '4',
'-w', '0.1',
'--boost-from-average', 'False',
'--permutations', '1',
'--snapshot-interval', str(snapshot_interval),
'--features-to-evaluate', features_to_eval,
'--feature-eval-mode', eval_mode,
'--feature-eval-output-file', summary,
'--offset', str(offset),
'--fold-count', str(fold_count),
'--fold-size-unit', 'Group',
'--fold-size', '40',
'--test-err-log', test_err_log,
'--train-dir', train_dir,
)
if fstr_mode == 'fstr':
cmd += ('--fstr-file', fstr_file,)
else:
cmd += (
'--model-file', model_file,
'--use-best-model', 'False',
)
return cmd
reference_summary = yatest.common.test_output_path('reference_feature.eval')
reference_dir = yatest.common.test_output_path('reference')
yatest.common.execute(make_cmd(summary=reference_summary, train_dir=reference_dir))
snapshot_summary = yatest.common.test_output_path('snapshot_feature.eval')
snapshot_dir = yatest.common.test_output_path('snapshot')
snapshot = yatest.common.test_output_path('eval_feature.snapshot')
eval_with_snapshot_cmd = make_cmd(summary=snapshot_summary, train_dir=snapshot_dir) + ('--snapshot-file', snapshot,)
def stop_after_timeout(cmd, timeout):
try:
yatest.common.execute(cmd, timeout=timeout)
except ExecutionTimeoutError:
pass
resume_from_snapshot_count = 15
for idx in range(resume_from_snapshot_count):
timeout = 0.5 if idx % 2 == 0 else snapshot_interval + 0.1
stop_after_timeout(cmd=eval_with_snapshot_cmd, timeout=timeout)
yatest.common.execute(['rm', '-rf', snapshot_dir])
yatest.common.execute(eval_with_snapshot_cmd)
assert filecmp.cmp(reference_summary, snapshot_summary)
pj = os.path.join
set_count = len(features_to_eval.split(';'))
for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):
assert filecmp.cmp(pj(reference_dir, output_dir, test_err_log), pj(snapshot_dir, output_dir, test_err_log))
if fstr_mode == 'fstr':
assert filecmp.cmp(pj(reference_dir, output_dir, fstr_file), pj(snapshot_dir, output_dir, fstr_file))
else:
def load_json_model(model_path):
model = catboost.CatBoost()
model.load_model(model_path)
model.save_model(model_path + '.json', format='json')
with open(model_path + '.json') as json_model_file:
json_model = json.load(json_model_file)
json_model["model_info"]["output_options"] = ""
json_model["model_info"]["train_finish_time"] = ""
json_model["model_info"]["model_guid"] = ""
json_model["model_info"]["params"]["flat_params"]["snapshot_file"] = ""
json_model["model_info"]["params"]["flat_params"]["save_snapshot"] = ""
json_model["model_info"]["params"]["flat_params"]["train_dir"] = ""
return json_model
assert load_json_model(pj(reference_dir, output_dir, model_file)) == load_json_model(pj(snapshot_dir, output_dir, model_file))
def test_eval_feature_snapshot_wrong_options():
summary = yatest.common.test_output_path('eval_feature_summary')
snapshot = yatest.common.test_output_path('eval_feature_snapshot')
def make_cmd(fold_size):
return (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '600',
'-T', '4',
'-w', '0.1',
'--permutations', '1',
'--snapshot-interval', '1',
'--features-to-evaluate', '2-5',
'--feature-eval-mode', 'OneVsAll',
'--feature-eval-output-file', summary,
'--offset', '0',
'--fold-count', '5',
'--fold-size-unit', 'Group',
'--fold-size', str(fold_size),
'--snapshot-file', snapshot
)
def stop_after_timeout(cmd, timeout):
try:
yatest.common.execute(cmd, timeout=timeout)
except ExecutionTimeoutError:
pass
stop_after_timeout(cmd=make_cmd(fold_size=40), timeout=3)
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(make_cmd(fold_size=20))
def test_eval_feature_parse_timestamps():
summary = yatest.common.test_output_path('eval_feature_summary')
def make_cmd(timestamps_file):
return (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '600',
'-T', '4',
'-w', '0.1',
'--permutations', '1',
'--snapshot-interval', '1',
'--features-to-evaluate', '2-5',
'--feature-eval-mode', 'OneVsAll',
'--feature-eval-output-file', summary,
'--offset', '0',
'--fold-count', '5',
'--fold-size-unit', 'Group',
'--fold-size', '40',
'--learn-timestamps', data_file('querywise', timestamps_file),
'--timesplit-quantile', '0.75'
)
yatest.common.execute(make_cmd('train.timestamps'))
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(make_cmd('train.group_weights'))
def test_eval_feature_relative_fold_size():
summary = yatest.common.test_output_path('eval_feature_summary')
def make_cmd():
return (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '100',
'-T', '4',
'-w', '0.1',
'--permutations', '1',
'--snapshot-interval', '1',
'--features-to-evaluate', '2-5',
'--feature-eval-mode', 'OneVsAll',
'--feature-eval-output-file', summary,
'--offset', '0',
'--fold-count', '5',
'--fold-size-unit', 'Group',
'--relative-fold-size', '0.1',
)
yatest.common.execute(make_cmd())
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(make_cmd() + ('--fold-size', '40',))
TEST_METRIC_DESCRIPTION_METRICS_LIST = ['Logloss', 'Precision', 'AUC']
@pytest.mark.parametrize('dataset_has_weights', [True, False], ids=['dataset_has_weights=True', 'dataset_has_weights=False'])
@pytest.mark.parametrize('eval_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,
ids=['eval_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])
@pytest.mark.parametrize('eval_metric_use_weights', [True, False, None],
ids=['eval_weights=' + str(mode) for mode in [True, False, None]])
@pytest.mark.parametrize('custom_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,
ids=['custom_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])
@pytest.mark.parametrize('custom_metric_use_weights', [True, False, None],
ids=['custom_weights=' + str(mode) for mode in [True, False, None]])
def test_metric_description(dataset_has_weights, eval_metric_loss, eval_metric_use_weights, custom_metric_loss, custom_metric_use_weights):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
if dataset_has_weights:
train_pool_filename = data_file('adult_weight', 'train_weight')
test_pool_filename = data_file('adult_weight', 'test_weight')
pool_cd_filename = data_file('adult_weight', 'train.cd')
else:
train_pool_filename = data_file('adult', 'train_small')
test_pool_filename = data_file('adult', 'test_small')
pool_cd_filename = data_file('adult', 'train.cd')
eval_metric = eval_metric_loss
if eval_metric == 'AUC':
eval_metric += ':hints=skip_train~false'
if eval_metric_use_weights is not None:
eval_metric += ';' if eval_metric_loss == 'AUC' else ':'
eval_metric += 'use_weights=' + str(eval_metric_use_weights)
custom_metric = custom_metric_loss
if custom_metric == 'AUC':
custom_metric += ':hints=skip_train~false'
if custom_metric_use_weights is not None:
custom_metric += ';' if custom_metric_loss == 'AUC' else ':'
custom_metric += 'use_weights=' + str(custom_metric_use_weights)
cmd = (
'--loss-function', 'Logloss',
'-f', train_pool_filename,
'-t', test_pool_filename,
'--cd', pool_cd_filename,
'-i', '10',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-metric', eval_metric,
'--custom-metric', custom_metric,
)
should_fail = not dataset_has_weights and (eval_metric_use_weights is not None or custom_metric_use_weights is not None)
try:
execute_catboost_fit('CPU', cmd)
except ExecutionError:
assert should_fail
return
for filename in [learn_error_path, test_error_path]:
with open(filename, 'r') as f:
metrics_descriptions = f.readline().split('\t')[1:] # without 'iter' column
metrics_descriptions[-1] = metrics_descriptions[-1][:-1] # remove '\n' symbol
unique_metrics_descriptions = set([s.lower() for s in metrics_descriptions])
assert len(metrics_descriptions) == len(unique_metrics_descriptions)
expected_objective_metric_description = 'Logloss'
if dataset_has_weights:
expected_eval_metric_description = \
eval_metric_loss if eval_metric_use_weights is None else eval_metric_loss + ':use_weights=' + str(eval_metric_use_weights)
if custom_metric_loss == 'AUC':
expected_custom_metrics_descriptions = \
['AUC' if custom_metric_use_weights is None else 'AUC:use_weights=' + str(custom_metric_use_weights)]
else:
expected_custom_metrics_descriptions = (
[custom_metric_loss + ':use_weights=False', custom_metric_loss + ':use_weights=True']
if custom_metric_use_weights is None
else [custom_metric_loss + ':use_weights=' + str(custom_metric_use_weights)])
else:
expected_eval_metric_description = eval_metric_loss
expected_custom_metrics_descriptions = [custom_metric_loss]
assert unique_metrics_descriptions == set(s.lower() for s in [expected_objective_metric_description] + [expected_eval_metric_description] + expected_custom_metrics_descriptions)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_leafwise_scoring():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'--learn-err-log', learn_error_path
]
execute_catboost_fit('CPU', cmd)
learn_errors_log = open(learn_error_path).read()
execute_catboost_fit('CPU', cmd + ['--dev-leafwise-scoring'])
new_learn_errors_log = open(learn_error_path).read()
assert new_learn_errors_log == learn_errors_log
def test_group_features():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_predictions_path = yatest.common.test_output_path('test_predictions.tsv')
model_path = yatest.common.test_output_path('model.bin')
fit_cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'-m', model_path,
'--learn-err-log', learn_error_path
]
execute_catboost_fit('CPU', fit_cmd)
calc_cmd = [
CATBOOST_PATH,
'calc',
'-m', model_path,
'--input-path', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'--output-path', test_predictions_path,
'--output-columns', 'Probability'
]
yatest.common.execute(calc_cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_predictions_path)]
def test_model_sum():
model_path = yatest.common.test_output_path('model.bin')
model_eval = yatest.common.test_output_path('model_eval.txt')
execute_catboost_fit('CPU', [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '10',
'-m', model_path,
'-t', data_file('adult', 'test_small'),
'--eval-file', model_eval,
'--output-columns', 'SampleId,RawFormulaVal',
])
sum_path = yatest.common.test_output_path('sum.bin')
yatest.common.execute([
CATBOOST_PATH,
'model-sum',
'--model-with-weight', '{}={}'.format(model_path, 0.75),
'--model-with-weight', '{}={}'.format(model_path, 0.25),
'--output-path', sum_path,
])
sum_eval = yatest.common.test_output_path('sum_eval.txt')
yatest.common.execute([
CATBOOST_PATH,
'calc',
'-m', sum_path,
'--input-path', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'--output-path', sum_eval,
])
yatest.common.execute(get_limited_precision_dsv_diff_tool(0) + [model_eval, sum_eval])
def test_external_feature_names():
fstr_cd_with_id_path = yatest.common.test_output_path('fstr_cd_with_id.tsv')
fstr_cd_without_id_path = yatest.common.test_output_path('fstr_cd_without_id.tsv')
for cd_has_feature_names in [False, True]:
if cd_has_feature_names:
cd_file = data_file('adult', 'train_with_id.cd')
fstr_path = fstr_cd_with_id_path
else:
cd_file = data_file('adult', 'train.cd')
fstr_path = fstr_cd_without_id_path
cmd = (
'--loss-function', 'Logloss',
'--target-border', '0.5',
'-f', data_file('adult', 'train_small'),
'--column-description', cd_file,
'-i', '10',
'-T', '4',
'--feature-names-path', data_file('adult', 'feature_names'),
'--fstr-type', 'FeatureImportance',
'--fstr-file', fstr_path
)
execute_catboost_fit('CPU', cmd)
assert filecmp.cmp(fstr_cd_with_id_path, fstr_cd_without_id_path)
return [local_canonical_file(fstr_cd_with_id_path)]
def test_diffusion_temperature():
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'--langevin', 'True',
'--diffusion-temperature', '1000',
'--eval-file', output_eval_path
]
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])
def test_model_shrink_correct(config):
mode, rate, lr = config
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'--eval-file', output_eval_path,
'--model-shrink-mode', mode,
'--model-shrink-rate', str(rate),
'--learning-rate', str(lr)
]
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('config', [('Constant', 20, 0.1), ('Constant', 10, 0.1), ('Decreasing', 2, 0.1)])
def test_model_shrink_incorrect(config):
mode, rate, lr = config
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'--eval-file', output_eval_path,
'--model-shrink-mode', mode,
'--model-shrink-rate', str(rate),
'--learning-rate', str(lr)
]
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('average', ['Macro', 'Micro', 'Weighted'])
def test_total_f1_params(average):
return do_test_eval_metrics(
metric='TotalF1:average=' + average,
metric_period='1',
train=data_file('cloudness_small', 'train_small'),
test=data_file('cloudness_small', 'test_small'),
cd=data_file('cloudness_small', 'train.cd'),
loss_function='MultiClass'
)
def test_eval_metrics_with_pairs():
do_test_eval_metrics(
metric='PairAccuracy',
metric_period='1',
train=data_file('querywise', 'train'),
test=data_file('querywise', 'test'),
cd=data_file('querywise', 'train.cd'),
loss_function='PairLogit',
additional_train_params=(
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs')
),
additional_eval_params=(
'--input-pairs', data_file('querywise', 'test.pairs')
)
)
def test_tweedie():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
cmd = (
'--loss-function', 'Tweedie:variance_power=1.5',
'-f', data_file('adult_crossentropy', 'train_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'-i', '100',
'--learning-rate', '0.5',
'--learn-err-log', learn_error_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('separator_type', SEPARATOR_TYPES)
@pytest.mark.parametrize('feature_estimators', TEXT_FEATURE_ESTIMATORS)
def test_fit_binclass_with_text_features(boosting_type, separator_type, feature_estimators):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd_binclass')
cmd = (
'--loss-function', 'Logloss',
'--eval-metric', 'AUC',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--text-processing', json.dumps(text_processing),
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [
local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(test_eval_path)
]
@pytest.mark.parametrize('separator_type', SEPARATOR_TYPES)
@pytest.mark.parametrize('feature_estimators', TEXT_FEATURE_ESTIMATORS)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_fit_multiclass_with_text_features(separator_type, feature_estimators, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd')
cmd = (
'--loss-function', loss_function,
'--eval-metric', 'Accuracy',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--text-processing', json.dumps(text_processing),
'--column-description', cd_file,
'--boosting-type', 'Plain',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [
local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(test_eval_path)
]
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_shrink_model_with_text_features(grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
loss_function = 'MultiClass'
feature_estimators = 'BoW,NaiveBayes,BM25'
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd')
cmd = (
'--loss-function', loss_function,
'--eval-metric', 'Accuracy',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--column-description', cd_file,
'--text-processing', json.dumps(text_processing),
'--grow-policy', grow_policy,
'--boosting-type', 'Plain',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'true',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('loss_function', ['RMSE', 'RMSEWithUncertainty', 'Logloss'])
def test_virtual_ensembles(loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
train_path = data_file('querywise', 'train') if loss_function in REGRESSION_LOSSES else data_file('adult', 'train_small')
test_path = data_file('querywise', 'test') if loss_function in REGRESSION_LOSSES else data_file('adult', 'test_small')
cd_path = data_file('querywise', 'train.cd') if loss_function in REGRESSION_LOSSES else data_file('adult', 'train.cd')
test_eval_path = yatest.common.test_output_path('test.eval')
cmd = [
'--use-best-model', 'false',
'-f', train_path,
'-t', test_path,
'--loss-function', loss_function,
'--column-description', cd_path,
'--posterior-sampling', 'true',
'--eval-file', test_eval_path,
'-i', '20',
'-T', '4',
'-m', output_model_path,
]
if loss_function == 'RMSEWithUncertainty':
cmd += ['--prediction-type', 'RMSEWithUncertainty']
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--virtual-ensembles-count', '1',
'--prediction-type', 'VirtEnsembles',
)
yatest.common.execute(calc_cmd)
assert compare_evals(test_eval_path, formula_predict_path, skip_header=True)
@pytest.mark.parametrize('virtual_ensembles_count', ['1', '10'])
@pytest.mark.parametrize('prediction_type', ['TotalUncertainty', 'VirtEnsembles'])
@pytest.mark.parametrize('loss_function', ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MultiClass'])
def test_uncertainty_prediction(virtual_ensembles_count, prediction_type, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
pool_names = {
'RMSE' : 'querywise',
'RMSEWithUncertainty' : 'querywise',
'Logloss' : 'adult',
'MultiClass' : 'cloudness_small'
}
pool_name = pool_names[loss_function]
train_path = data_file(pool_name, 'train') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'train_small')
test_path = data_file(pool_name, 'test') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'test_small')
cd_path = data_file(pool_name, 'train.cd') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'train.cd')
cmd = (
'--use-best-model', 'false',
'-f', train_path,
'-t', test_path,
'--loss-function', loss_function,
'--column-description', cd_path,
'--posterior-sampling', 'true',
'-i', '200',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--virtual-ensembles-count', virtual_ensembles_count,
'--prediction-type', prediction_type,
)
yatest.common.execute(calc_cmd)
model = catboost.CatBoost()
model.load_model(output_model_path)
pool = catboost.Pool(test_path, column_description=cd_path)
py_preds = model.virtual_ensembles_predict(
pool,
prediction_type=prediction_type,
virtual_ensembles_count=int(virtual_ensembles_count))
cli_preds = np.genfromtxt(
formula_predict_path,
delimiter='\t',
dtype=float,
skip_header=True)
assert(np.allclose(py_preds.reshape(-1,), cli_preds[:, 1:].reshape(-1,), rtol=1e-10))
return local_canonical_file(formula_predict_path)
@pytest.mark.parametrize('loss_function', ['RMSE', 'RMSEWithUncertainty'])
def test_uncertainty_prediction_requirements(loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
train_path = data_file('querywise', 'train')
test_path = data_file('querywise', 'test')
cd_path = data_file('querywise', 'train.cd')
cmd = (
'--use-best-model', 'false',
'-f', train_path,
'-t', test_path,
'--loss-function', loss_function,
'--column-description', cd_path,
'-i', '200',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'VirtEnsembles'
)
try:
yatest.common.execute(calc_cmd)
except:
return
# assert replaced to warning
# assert False
DICTIONARIES_OPTIONS = [
{
"Simple": "token_level_type=Word:occurrence_lower_bound=50"
},
{
"UniGramOccur5": "occurrence_lower_bound=5:token_level_type=Letter",
"BiGramOccur2": "occurrence_lower_bound=2:gram_order=2:token_level_type=Letter",
"WordDictOccur1": "occurrence_lower_bound=1:token_level_type=Word",
"WordDictOccur2": "occurrence_lower_bound=2:token_level_type=Word",
"WordDictOccur3": "occurrence_lower_bound=3:token_level_type=Word"
},
{
"Unigram": "gram_order=1:token_level_type=Letter:occurrence_lower_bound=50",
"Bigram": "gram_order=2:token_level_type=Letter:occurrence_lower_bound=50",
"Trigram": "gram_order=3:token_level_type=Letter:occurrence_lower_bound=50"
},
{
"Letter": "token_level_type=Letter:occurrence_lower_bound=50",
"Word": "token_level_type=Word:occurrence_lower_bound=50"
}
]
@pytest.mark.parametrize('dictionaries', DICTIONARIES_OPTIONS)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_text_processing_options(dictionaries, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
dictionaries = ','.join([key + ':' + value for key, value in dictionaries.items()])
feature_estimators = 'BM25,BoW,NaiveBayes'
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd')
cmd = (
'--loss-function', loss_function,
'--eval-metric', 'Accuracy',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--column-description', cd_file,
'--dictionaries', dictionaries,
'--feature-calcers', feature_estimators,
'--boosting-type', 'Plain',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_fit_with_per_feature_text_options(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
text_processing = {
'tokenizers': [
{'tokenizer_id': 'Space', 'delimiter': ' '},
{'tokenizer_id': 'Comma', 'delimiter': ','},
],
'dictionaries': [
{'dictionary_id': 'Word', 'token_level_type': 'Word', 'occurrence_lower_bound': '50'},
{'dictionary_id': 'Bigram', 'token_level_type': 'Word', 'gram_order': '2', 'occurrence_lower_bound': '50'},
{'dictionary_id': 'Trigram', 'token_level_type': 'Letter', 'gram_order': '3', 'occurrence_lower_bound': '50'},
],
'feature_processing': {
'0': [
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes']},
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Bigram', 'Trigram'], 'feature_calcers': ['BoW']},
],
'1': [
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes', 'BM25']},
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Trigram'], 'feature_calcers': ['BoW', 'BM25']},
],
'2': [
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Word', 'Bigram', 'Trigram'], 'feature_calcers': ['BoW']},
],
}
}
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd_binclass')
cmd = (
'--loss-function', 'Logloss',
'--eval-metric', 'AUC',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--text-processing', json.dumps(text_processing),
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_embeddings_train(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
cmd = (
'--loss-function', 'Logloss',
'--eval-metric', 'AUC',
'-f', ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,
'-t', ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,
'--column-description', ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE,
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(
output_model_path,
ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,
ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE,
calc_eval_path,
output_columns=['RawFormulaVal']
)
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_dump_options():
snapshot_path = yatest.common.test_output_path('snapshot.bin')
key = 'summary'
value = '{"key1":"value1", "key2":"value2"}'
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '20',
'-T', '4',
'--snapshot-file', snapshot_path,
'--use-best-model', 'false',
'--set-metadata-from-freeargs', '--', key, value,
)
execute_catboost_fit('CPU', cmd)
options_path = yatest.common.test_output_path('options.json')
dump_options_cmd = (
get_catboost_binary_path(),
'dump-options',
'--input', snapshot_path,
'--output', options_path
)
yatest.common.execute(dump_options_cmd)
with open(options_path) as options:
options_json = json.load(options)
assert options_json['metadata'][key] == value
def prepare_pool_metainfo_with_feature_tags():
pool_metainfo = {
'tags': {
'A': {
'features': [0, 1, 2, 3, 4, 5, 6, 7]
},
'B': {
'features': [12, 13, 14, 15, 16]
},
'C': {
'features': [5, 6, 7, 8, 9, 10, 11, 12, 13]
}
}
}
pool_metainfo_path = yatest.common.test_output_path('pool_metainfo.json')
with open(pool_metainfo_path, 'w') as f:
json.dump(pool_metainfo, f)
return pool_metainfo, pool_metainfo_path
def test_feature_tags_in_ignore_features():
pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()
base_cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '50',
'-T', '4',
)
for ignored_tags in (['A'], ['A', 'B'], ['B', 'C']):
output_eval_path_1 = yatest.common.test_output_path('1_test.eval')
ignored_features = sum((pool_metainfo['tags'][tag]['features'] for tag in ignored_tags), [])
cmd_1 = base_cmd + (
'--eval-file', output_eval_path_1,
'--ignore-features', ':'.join(map(str, ignored_features)),
)
output_eval_path_2 = yatest.common.test_output_path('2_test.eval')
cmd_2 = base_cmd + (
'--eval-file', output_eval_path_2,
'--ignore-features', ':'.join('#{}'.format(tag) for tag in ignored_tags),
'--pool-metainfo-path', pool_metainfo_path,
)
yatest.common.execute(cmd_1)
yatest.common.execute(cmd_2)
assert filecmp.cmp(output_eval_path_1, output_eval_path_2)
def test_feature_tags_in_features_for_select():
pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()
base_cmd = (
CATBOOST_PATH,
'select-features',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '50',
'-T', '4',
'--num-features-to-select', '3',
'--features-selection-algorithm', 'RecursiveByPredictionValuesChange',
'--features-selection-steps', '2',
'--train-final-model',
)
for selection_tags in (['A', 'B'], ['A', 'C'], ['B', 'C'], ['A', 'B', 'C']):
output_summary_path_1 = yatest.common.test_output_path('1_summary.json')
features_for_select = sum((pool_metainfo['tags'][tag]['features'] for tag in selection_tags), [])
cmd_1 = base_cmd + (
'--features-selection-result-path', output_summary_path_1,
'--features-for-select', ','.join(map(str, features_for_select)),
)
output_summary_path_2 = yatest.common.test_output_path('2_summary.json')
cmd_2 = base_cmd + (
'--features-selection-result-path', output_summary_path_2,
'--features-for-select', ','.join('#{}'.format(tag) for tag in selection_tags),
'--pool-metainfo-path', pool_metainfo_path,
)
yatest.common.execute(cmd_1)
yatest.common.execute(cmd_2)
assert filecmp.cmp(output_summary_path_1, output_summary_path_2)
def test_feature_tags_in_features_to_evaluate():
pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()
base_cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--feature-eval-mode', 'OneVsAll',
'-i', '30',
'-T', '4',
'--fold-count', '2',
'--fold-size-unit', 'Object',
'--fold-size', '50'
)
features_to_evaluate_1 = []
features_to_evaluate_2 = []
for tags_set in (['A'], ['A', 'B'], ['B', 'C']):
features_set = sum((pool_metainfo['tags'][tag]['features'] for tag in tags_set), [])
features_to_evaluate_1.append(','.join(map(str, features_set)))
features_to_evaluate_2.append(','.join('#{}'.format(tag) for tag in tags_set))
output_eval_path_1 = yatest.common.test_output_path('1_feature.eval')
cmd_1 = base_cmd + (
'--feature-eval-output-file', output_eval_path_1,
'--features-to-evaluate', ';'.join(map(str, features_to_evaluate_1)),
)
output_eval_path_2 = yatest.common.test_output_path('2_feature.eval')
cmd_2 = base_cmd + (
'--feature-eval-output-file', output_eval_path_2,
'--features-to-evaluate', ';'.join(features_to_evaluate_2),
'--pool-metainfo-path', pool_metainfo_path,
)
yatest.common.execute(cmd_1)
yatest.common.execute(cmd_2)
assert filecmp.cmp(output_eval_path_1, output_eval_path_2)
def test_feature_tags_in_options_file():
pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()
training_options_path = yatest.common.test_output_path('training_options.json')
cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '50',
'-T', '4',
'--pool-metainfo-path', pool_metainfo_path,
'--training-options-file', training_options_path,
)
yatest.common.execute(cmd)
with open(training_options_path) as f:
options = json.load(f)
assert options['pool_metainfo_options'] == pool_metainfo
| 37.475653 | 189 | 0.63598 | from itertools import permutations
import yatest.common
from yatest.common import ExecutionTimeoutError, ExecutionError
import pytest
import os
import filecmp
import numpy as np
import pandas as pd
import timeit
import json
import catboost
from catboost_pytest_lib import (
apply_catboost,
compare_evals_with_precision,
compare_fit_evals_with_precision,
compare_evals,
data_file,
execute_catboost_fit,
execute_dist_train,
format_crossvalidation,
generate_concatenated_random_labeled_dataset,
get_catboost_binary_path,
get_limited_precision_dsv_diff_tool,
local_canonical_file,
permute_dataset_columns,
remove_time_from_json,
)
CATBOOST_PATH = yatest.common.binary_path("catboost/app/catboost")
BOOSTING_TYPE = ['Ordered', 'Plain']
GROW_POLICIES = ['SymmetricTree', 'Lossguide', 'Depthwise']
BOOSTING_TYPE_WITH_GROW_POLICIES = [('Ordered', 'SymmetricTree'), ('Plain', 'SymmetricTree'),
('Plain', 'Lossguide'), ('Plain', 'Depthwise')]
PREDICTION_TYPES = ['Probability', 'RawFormulaVal', 'Class']
BINCLASS_LOSSES = ['Logloss', 'CrossEntropy']
MULTICLASS_LOSSES = ['MultiClass', 'MultiClassOneVsAll']
CLASSIFICATION_LOSSES = BINCLASS_LOSSES + MULTICLASS_LOSSES
REGRESSION_LOSSES = ['MAE', 'MAPE', 'Poisson', 'Quantile', 'RMSE', 'RMSEWithUncertainty', 'LogLinQuantile', 'Lq']
PAIRWISE_LOSSES = ['PairLogit', 'PairLogitPairwise']
GROUPWISE_LOSSES = ['YetiRank', 'YetiRankPairwise', 'QueryRMSE', 'QuerySoftMax']
RANKING_LOSSES = PAIRWISE_LOSSES + GROUPWISE_LOSSES
ALL_LOSSES = CLASSIFICATION_LOSSES + REGRESSION_LOSSES + RANKING_LOSSES
SAMPLING_UNIT_TYPES = ['Object', 'Group']
OVERFITTING_DETECTOR_TYPE = ['IncToDec', 'Iter']
LOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile',
'Poisson', 'MAPE', 'MultiClass', 'MultiClassOneVsAll']
LEAF_ESTIMATION_METHOD = ['Gradient', 'Newton']
SCORE_CALC_OBJ_BLOCK_SIZES = ['60', '5000000']
SCORE_CALC_OBJ_BLOCK_SIZES_IDS = ['calc_block=60', 'calc_block=5000000']
SEPARATOR_TYPES = [
'ByDelimiter',
'BySense',
]
TEXT_FEATURE_ESTIMATORS = [
'BoW',
'NaiveBayes',
'BM25',
'BoW,NaiveBayes',
'BoW,NaiveBayes,BM25'
]
ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE = data_file('rotten_tomatoes_small_with_embeddings', 'train')
ROTTEN_TOMATOES_WITH_EMBEDDINGS_CD_BINCLASS_FILE = data_file(
'rotten_tomatoes_small_with_embeddings',
'cd_binclass'
)
ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE = data_file(
'rotten_tomatoes_small_with_embeddings',
'cd_binclass_only_embeddings'
)
def diff_tool(threshold=None):
return get_limited_precision_dsv_diff_tool(threshold, True)
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv_multiregression(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiRMSE',
'-f', data_file('multiregression', 'train'),
'--column-description', data_file('multiregression', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--cv', format_crossvalidation(is_inverted, 2, 10),
'--cv-rand', '42',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_multiregression(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MultiRMSE',
pool='multiregression',
train='train',
test='test',
cd='train.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--boost-from-average', '0'))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_multiregression_single(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MultiRMSE',
pool='multiregression',
train='train',
test='test',
cd='train_single.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--boost-from-average', '0'))))]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('n_trees', [100, 500])
def test_multiregression(boosting_type, grow_policy, n_trees):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_calc_path = yatest.common.test_output_path('test.calc')
output_metric_path = yatest.common.test_output_path('test.metric')
cmd_fit = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', data_file('multiregression', 'train'),
'-t', data_file('multiregression', 'test'),
'--column-description', data_file('multiregression', 'train.cd'),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--grow-policy', grow_policy
)
execute_catboost_fit('CPU', cmd_fit)
cmd_calc = (
CATBOOST_PATH,
'calc',
'--column-description', data_file('multiregression', 'train.cd'),
'-T', '4',
'-m', output_model_path,
'--input-path', data_file('multiregression', 'test'),
'-o', output_calc_path
)
yatest.common.execute(cmd_calc)
cmd_metric = (
CATBOOST_PATH,
'eval-metrics',
'--column-description', data_file('multiregression', 'train.cd'),
'-T', '4',
'-m', output_model_path,
'--input-path', data_file('multiregression', 'test'),
'-o', output_metric_path,
'--metrics', 'MultiRMSE'
)
yatest.common.execute(cmd_metric)
return [
local_canonical_file(output_eval_path),
local_canonical_file(output_calc_path),
local_canonical_file(output_metric_path)
]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('n_trees', [100, 500])
@pytest.mark.parametrize('target_count', [1, 2, 3])
def test_multiregression_target_permutation_invariance(boosting_type, n_trees, target_count):
np.random.seed(42)
X_COUNT = 200
X_DIM = 5
x = np.random.randn(X_COUNT, X_DIM)
y = np.stack([
np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))
for i in range(target_count)
], axis=1)
test_size = X_COUNT // 2
x_test, y_test = x[:test_size], y[:test_size]
x_train, y_train = x[test_size:], y[test_size:]
train_file = yatest.common.test_output_path('train')
test_file = yatest.common.test_output_path('test')
get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))
get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))
get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))
with open(get_cd_path(target_count), 'w') as cd:
cd.write(''.join(('{}\tTarget\tm\n'.format(i) for i in range(target_count))))
evals = []
for perm in permutations(range(target_count)):
inv_perm = range(target_count)
for i, j in enumerate(perm):
inv_perm[j] = i
np.savetxt(train_file, np.hstack([y_train[:, perm], x_train]), delimiter='\t')
np.savetxt(test_file, np.hstack([y_test[:, perm], x_test]), delimiter='\t')
fit_cmd = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', train_file,
'-t', test_file,
'--column-description', get_cd_path(target_count),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', get_model_path(target_count),
'--eval-file', get_eval_path(target_count),
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', fit_cmd)
eval = np.loadtxt(get_eval_path(target_count), delimiter='\t', skiprows=1, usecols=range(1, target_count + 1)).reshape((-1, target_count))
evals.append(eval[:, inv_perm])
for eva in evals:
assert np.allclose(eva, evals[0])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('n_trees', [10, 100, 1000])
@pytest.mark.parametrize('target_count', [1, 2, 3])
def test_compare_multiregression_with_regression(boosting_type, n_trees, target_count):
np.random.seed(42)
ERR_PERC = 0.1
X_COUNT = 200
X_DIM = 5
x = np.random.randn(X_COUNT, X_DIM)
y = np.stack([
np.sin(np.sum([np.pi * x[:, j] * (1 if np.random.randn() > 0 else -1) for j in range(X_DIM)], axis=0))
for i in range(target_count)
], axis=1)
test_size = X_COUNT // 2
x_test, y_test = x[:test_size], y[:test_size]
x_train, y_train = x[test_size:], y[test_size:]
train_file = yatest.common.test_output_path('train')
test_file = yatest.common.test_output_path('test')
np.savetxt(train_file, np.hstack([y_train, x_train]), delimiter='\t')
np.savetxt(test_file, np.hstack([y_test, x_test]), delimiter='\t')
get_eval_path = lambda i: yatest.common.test_output_path('test_{}.eval'.format(i))
get_model_path = lambda i: yatest.common.test_output_path('model_{}.bin'.format(i))
get_cd_path = lambda i: yatest.common.test_output_path('cd_{}'.format(i))
with open(get_cd_path(target_count), 'w') as cd:
cd.write(''.join(('{}\tTarget\tm\n'.format(i) for i in range(target_count))))
fit_cmd = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', train_file,
'-t', test_file,
'--column-description', get_cd_path(target_count),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', get_model_path(target_count),
'--eval-file', get_eval_path(target_count),
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', fit_cmd)
for i in range(target_count):
with open(get_cd_path(i), 'w') as cd:
cd.write(''.join((('{}\tTarget\n'.format(j) if j == i else '{}\tAuxiliary\n'.format(j)) for j in range(target_count))))
rmse_fit_cmd = (
'--loss-function', 'RMSE',
'--boosting-type', boosting_type,
'-f', train_file,
'-t', test_file,
'--column-description', get_cd_path(i),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', get_model_path(i),
'--eval-file', get_eval_path(i),
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', rmse_fit_cmd)
multirmse_eval = np.loadtxt(get_eval_path(target_count), delimiter='\t', skiprows=1, usecols=range(1, target_count + 1))
rmse_eval = np.stack([
np.loadtxt(get_eval_path(i), delimiter='\t', skiprows=1, usecols=1)
for i in range(target_count)
], axis=1)
multi_rmse_loss = np.mean((multirmse_eval - y_test)**2)
rmse_loss = np.mean((rmse_eval - y_test)**2)
assert rmse_loss.shape == multi_rmse_loss.shape
assert multi_rmse_loss < rmse_loss * (1 + ERR_PERC)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('n_trees', [100, 500])
def test_multiregression_single(boosting_type, n_trees):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_calc_path = yatest.common.test_output_path('test.calc')
output_metric_path = yatest.common.test_output_path('test.metric')
cmd_fit = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', data_file('multiregression', 'train'),
'-t', data_file('multiregression', 'test'),
'--column-description', data_file('multiregression', 'train_single.cd'),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd_fit)
cmd_calc = (
CATBOOST_PATH,
'calc',
'--column-description', data_file('multiregression', 'train_single.cd'),
'-T', '4',
'-m', output_model_path,
'--input-path', data_file('multiregression', 'test'),
'-o', output_calc_path
)
yatest.common.execute(cmd_calc)
cmd_metric = (
CATBOOST_PATH,
'eval-metrics',
'--column-description', data_file('multiregression', 'train_single.cd'),
'-T', '4',
'-m', output_model_path,
'--input-path', data_file('multiregression', 'test'),
'-o', output_metric_path,
'--metrics', 'MultiRMSE'
)
yatest.common.execute(cmd_metric)
return [
local_canonical_file(output_eval_path),
local_canonical_file(output_calc_path),
local_canonical_file(output_metric_path)
]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('n_trees', [100, 500])
def test_multiregression_with_cat_features(boosting_type, n_trees):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd_fit = (
'--loss-function', 'MultiRMSE',
'--boosting-type', boosting_type,
'-f', data_file('multiregression', 'train'),
'-t', data_file('multiregression', 'test'),
'--column-description', data_file('multiregression', 'train_with_cat_features.cd'),
'-i', '{}'.format(n_trees),
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd_fit)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_queryrmse(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--grow-policy', grow_policy
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_queryrmse_newton_gradient(boosting_type, dev_score_calc_obj_block_size):
newton_eval_path = yatest.common.test_output_path('newton.eval')
gradient_eval_path = yatest.common.test_output_path('gradient.eval')
def run_catboost(eval_path, leaf_estimation_method):
cmd = [
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'--leaf-estimation-method', leaf_estimation_method,
'-i', '20',
'-T', '4',
'--eval-file', eval_path,
'--use-best-model', 'false',
]
execute_catboost_fit('CPU', cmd)
run_catboost(newton_eval_path, 'Newton')
run_catboost(gradient_eval_path, 'Gradient')
assert filecmp.cmp(newton_eval_path, gradient_eval_path)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_pool_with_QueryId(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.query_id'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--grow-policy', grow_policy
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_rmse_on_qwise_pool(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--grow-policy', grow_policy
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_averagegain(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'AverageGain:top=2;hints=skip_train~false',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_queryauc(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'QueryAUC:hints=skip_train~false',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_queryaverage(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'QueryAverage:top=2;hints=skip_train~false',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('sigma', ['sigma=' + str(sigma) for sigma in [0.01, 1, 10]])
@pytest.mark.parametrize('num_estimations', ['num_estimations=' + str(n_estim) for n_estim in [1, 100]])
def test_stochastic_filter(sigma, num_estimations):
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('pool.cd')
train_path = yatest.common.test_output_path('train.txt')
test_path = yatest.common.test_output_path('test.txt')
prng = np.random.RandomState(seed=0)
n_samples_by_query = 20
n_features = 10
n_queries = 50
n_samples = n_samples_by_query * n_queries
features = prng.uniform(0, 1, size=(n_samples, n_features))
weights = prng.uniform(0, 1, size=n_features)
labels = np.dot(features, weights)
query_ids = np.arange(0, n_samples) // n_queries
money = (n_queries - np.arange(0, n_samples) % n_queries) * 10
labels = labels.reshape((n_samples, 1))
query_ids = query_ids.reshape((n_samples, 1))
money = money.reshape((n_samples, 1))
features = np.hstack((labels, query_ids, money, features))
n_learn = int(0.7 * n_samples)
learn = features[:n_learn, :]
test = features[n_learn:, :]
np.savetxt(train_path, learn, fmt='%.5f', delimiter='\t')
np.savetxt(test_path, test, fmt='%.5f', delimiter='\t')
np.savetxt(cd_path, [[0, 'Target'], [1, 'GroupId']], fmt='%s', delimiter='\t')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_one_thread_path = yatest.common.test_output_path('learn_error_one_thread.tsv')
test_error_one_thread_path = yatest.common.test_output_path('test_error_one_thread.tsv')
loss_description = 'StochasticFilter:' + sigma + ';' + num_estimations
cmd = [
'--loss-function', loss_description,
'--leaf-estimation-backtracking', 'No',
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'--boosting-type', 'Plain',
'-i', '20',
'-m', model_path,
'--use-best-model', 'false',
]
cmd_one_thread = cmd + [
'--learn-err-log', learn_error_one_thread_path,
'--test-err-log', test_error_one_thread_path,
'-T', '1'
]
cmd_four_thread = cmd + [
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'-T', '4'
]
execute_catboost_fit('CPU', cmd_one_thread)
execute_catboost_fit('CPU', cmd_four_thread)
compare_evals(learn_error_one_thread_path, learn_error_path)
compare_evals(test_error_one_thread_path, test_error_path)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path)]
@pytest.mark.parametrize('metric', ['DCG', 'NDCG'])
@pytest.mark.parametrize('top', [-1, 1, 10])
@pytest.mark.parametrize('dcg_type', ['Base', 'Exp'])
@pytest.mark.parametrize('denominator', ['Position', 'LogPosition'])
def test_stochastic_rank(metric, top, dcg_type, denominator):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
loss = 'StochasticRank:metric={};top={};type={};denominator={};hints=skip_train~false'.format(
metric, top, dcg_type, denominator)
cmd = (
'--loss-function', loss,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--cd', data_file('querywise', 'train.cd.query_id'),
'-i', '10',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path)]
@pytest.mark.parametrize('top', [-1, 1, 10])
@pytest.mark.parametrize('decay', [1.0, 0.6, 0.0])
def test_stochastic_rank_pfound(top, decay):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)
cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', loss,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--cd', data_file('querywise', 'train.cd.query_id'),
'-i', '10',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path
)
yatest.common.execute(cmd)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path)]
@pytest.mark.parametrize('top', [-1, 1, 10])
@pytest.mark.parametrize('decay', [1.0, 0.6, 0.0])
def test_stochastic_rank_pfound_with_many_ones(top, decay):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
loss = 'StochasticRank:metric=PFound;top={};decay={};hints=skip_train~false'.format(top, decay)
np.random.seed(0)
train_with_ones = yatest.common.test_output_path('train_with_ones')
TARGET_COLUMN = 2
with open(data_file('querywise', 'train')) as fin:
with open(train_with_ones, 'w') as fout:
for line in fin.readlines():
if np.random.random() < 0.25:
parts = line.split('\t')
parts[TARGET_COLUMN] = '1.0'
line = '\t'.join(parts)
fout.write(line)
cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', loss,
'-f', train_with_ones,
'--cd', data_file('querywise', 'train.cd.query_id'),
'-i', '10',
'--learn-err-log', learn_error_path
)
yatest.common.execute(cmd)
return [local_canonical_file(learn_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('top', [2, 100])
def test_averagegain_with_query_weights(boosting_type, top):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.group_weight'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'--custom-metric', 'AverageGain:top={};hints=skip_train~false'.format(top),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('top_size', [2, 5, 10, -1])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('cd_file', ['train.cd', 'train.cd.subgroup_id'])
def test_pfound(top_size, boosting_type, cd_file):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', cd_file),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'PFound:top={};hints=skip_train~false'.format(top_size),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_params_ordering():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
learn_error_reversed_path = yatest.common.test_output_path('learn_error_reversed.tsv')
test_error_path = yatest.common.test_output_path('ignored.tsv')
def get_cmd(custom_metric, learn_error_path):
return (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '20',
'-T', '4',
'--custom-metric', custom_metric,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', get_cmd("PFound:top=1;decay=0.6;hints=skip_train~false", learn_error_path))
execute_catboost_fit('CPU', get_cmd("PFound:decay=0.6;top=1;hints=skip_train~false", learn_error_reversed_path))
with open(learn_error_path) as f:
assert 'PFound:top=1;decay=0.6' in f.read()
with open(learn_error_reversed_path) as f:
assert 'PFound:decay=0.6;top=1' in f.read()
def test_recall_at_k():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '10',
'-T', '4',
'--custom-metric', 'RecallAt:top=3',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_precision_at_k():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '10',
'-T', '4',
'--custom-metric', 'PrecisionAt:top=3',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_mapk(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', 'MAP:top={}'.format(10),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('ndcg_power_mode', ['Base', 'Exp'])
@pytest.mark.parametrize('metric_type', ['DCG', 'NDCG'])
@pytest.mark.parametrize('ndcg_denominator', ['None', 'LogPosition', 'Position'])
def test_ndcg(boosting_type, ndcg_power_mode, metric_type, ndcg_denominator):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
denominator = '' if ndcg_denominator == 'None' else ';denominator={}'.format(ndcg_denominator)
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'--custom-metric', '{}:top={};type={};hints=skip_train~false{}'.format(metric_type, 10, ndcg_power_mode, denominator),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_queryrmse_approx_on_full_history():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--approx-on-full-history',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--boosting-type', 'Ordered',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_pairlogit(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
def run_catboost(eval_path, learn_pairs):
cmd = [
'--loss-function', 'PairLogit',
'--eval-metric', 'PairAccuracy',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', learn_pairs),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'--ctr', 'Borders,Counter',
'--l2-leaf-reg', '0',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
]
execute_catboost_fit('CPU', cmd)
run_catboost(output_eval_path, 'train.pairs')
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(output_eval_path)]
def test_pairs_generation():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
def run_catboost(eval_path):
cmd = [
'--loss-function', 'PairLogit',
'--eval-metric', 'PairAccuracy',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--ctr', 'Borders,Counter',
'--l2-leaf-reg', '0',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
]
execute_catboost_fit('CPU', cmd)
run_catboost(output_eval_path)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(output_eval_path)]
def test_pairs_generation_with_max_pairs():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
def run_catboost(eval_path):
cmd = [
'--loss-function', 'PairLogit:max_pairs=30',
'--eval-metric', 'PairLogit:max_pairs=30',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--ctr', 'Borders,Counter',
'--l2-leaf-reg', '0',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--fstr-file', output_fstr_path,
]
execute_catboost_fit('CPU', cmd)
run_catboost(output_eval_path)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(output_eval_path),
local_canonical_file(output_fstr_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_pairlogit_no_target(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'PairLogit',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.no_target'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_pairlogit_approx_on_full_history():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'PairLogit',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--approx-on-full-history',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--boosting-type', 'Ordered',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
@pytest.mark.parametrize('pairs_file', ['train.pairs', 'train.pairs.weighted'])
def test_pairlogit_pairwise(pairs_file, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'PairLogitPairwise',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_yetirank(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'YetiRank',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])
def test_pairwise_reproducibility(loss_function):
def run_catboost(threads, model_path, eval_path):
cmd = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '5',
'-T', str(threads),
'-m', model_path,
'--eval-file', eval_path,
]
execute_catboost_fit('CPU', cmd)
model_1 = yatest.common.test_output_path('model_1.bin')
eval_1 = yatest.common.test_output_path('test_1.eval')
run_catboost(1, model_1, eval_1)
model_4 = yatest.common.test_output_path('model_4.bin')
eval_4 = yatest.common.test_output_path('test_4.eval')
run_catboost(4, model_4, eval_4)
assert filecmp.cmp(eval_1, eval_4)
def test_pairs_vs_grouped_pairs():
output_model_path = yatest.common.test_output_path('model.bin')
def run_catboost(learn_pairs_path_with_scheme, test_pairs_path_with_scheme, eval_path):
cmd = [
'--loss-function', 'PairLogit',
'--eval-metric', 'PairAccuracy',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', learn_pairs_path_with_scheme,
'--test-pairs', test_pairs_path_with_scheme,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
'--use-best-model', 'false',
]
execute_catboost_fit('CPU', cmd)
eval_path_ungrouped = yatest.common.test_output_path('test_eval_ungrouped')
run_catboost(
data_file('querywise', 'train.pairs'),
data_file('querywise', 'test.pairs'),
eval_path_ungrouped
)
eval_path_grouped = yatest.common.test_output_path('test_eval_grouped')
run_catboost(
'dsv-grouped://' + data_file('querywise', 'train.grouped_pairs'),
'dsv-grouped://' + data_file('querywise', 'test.grouped_pairs'),
eval_path_grouped
)
assert filecmp.cmp(eval_path_ungrouped, eval_path_grouped)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_yetirank_with_params(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'YetiRank:permutations=5;decay=0.9',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_yetirank_pairwise(dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'YetiRankPairwise',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', ('YetiRank', 'YetiRankPairwise'))
def test_yetirank_default_metric(loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', loss_function,
'--has-header',
'-f', data_file('black_friday', 'train'),
'-t', data_file('black_friday', 'test'),
'--column-description', data_file('black_friday', 'cd'),
'--model-file', output_model_path,
'--boosting-type', 'Plain',
'-i', '5',
'-T', '4',
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(test_error_path)]
@pytest.mark.parametrize('eval_metric', ['MRR', 'MRR:top=1', 'ERR', 'ERR:top=1'])
def test_reciprocal_rank_metrics(eval_metric):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'YetiRank',
'--eval-metric', eval_metric,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.query_id'),
'--boosting-type', 'Plain',
'-i', '20',
'-T', '4',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
NAN_MODE = ['Min', 'Max']
@pytest.mark.parametrize('nan_mode', NAN_MODE)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_nan_mode(nan_mode, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'-f', data_file('adult_nan', 'train_small'),
'-t', data_file('adult_nan', 'test_small'),
'--column-description', data_file('adult_nan', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--nan-mode', nan_mode,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult_nan', 'test_small'),
'--column-description', data_file('adult_nan', 'train.cd'),
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert (compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('border_count', [64, 255, 350, 1000, 2500])
def test_different_border_count(border_count):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
train_path = data_file('querywise', 'train')
test_path = data_file('querywise', 'test')
cd_path = data_file('querywise', 'train.cd')
cmd = (
'--use-best-model', 'false',
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '20',
'-T', '4',
'-x', str(border_count),
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert (compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_nan_mode_forbidden(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--nan-mode', 'Forbidden',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_overfit_detector_iter(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '2000',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.5',
'--rsm', '1',
'--od-type', 'Iter',
'--od-wait', '2',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_overfit_detector_inc_to_dec(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '2000',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.5',
'--rsm', '1',
'--od-pval', '0.5',
'--od-type', 'IncToDec',
'--od-wait', '2',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('overfitting_detector_type', OVERFITTING_DETECTOR_TYPE)
def test_overfit_detector_with_resume_from_snapshot(boosting_type, grow_policy, overfitting_detector_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
snapshot_path = yatest.common.test_output_path('snapshot')
cmd_prefix = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.5',
'--rsm', '1',
'--leaf-estimation-iterations', '10',
'--max-ctr-complexity', '4',
'--snapshot-file', snapshot_path,
'--od-type', overfitting_detector_type
)
if overfitting_detector_type == 'IncToDec':
cmd_prefix += (
'--od-wait', '2',
'--od-pval', '0.5'
)
elif overfitting_detector_type == 'Iter':
cmd_prefix += ('--od-wait', '2')
cmd_first = cmd_prefix + ('-i', '10')
execute_catboost_fit('CPU', cmd_first)
cmd_second = cmd_prefix + ('-i', '2000')
execute_catboost_fit('CPU', cmd_second)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('leaf_estimation_method', LEAF_ESTIMATION_METHOD)
def test_per_object_approx_on_full_history(leaf_estimation_method):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', 'Ordered',
'--approx-on-full-history',
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-w', '0.5',
'--od-pval', '0.99',
'--rsm', '1',
'--leaf-estimation-method', leaf_estimation_method,
'--leaf-estimation-iterations', '20',
'--use-best-model', 'false')
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_shrink_model(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '1',
'--od-pval', '0.99',
'--rsm', '1',
'--use-best-model', 'true'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('leaf_estimation_method', LEAF_ESTIMATION_METHOD)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_multi_leaf_estimation_method(leaf_estimation_method, boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'MultiClass',
'-f', data_file('cloudness_small', 'train_small'),
'-t', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', leaf_estimation_method,
'--leaf-estimation-iterations', '2',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
LOSS_FUNCTIONS_SHORT = ['Logloss', 'MultiClass']
@pytest.mark.parametrize(
'loss_function',
LOSS_FUNCTIONS_SHORT,
ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]
)
@pytest.mark.parametrize(
'column_name',
['doc_id', 'sample_id'],
ids=['column_name=doc_id', 'column_name=sample_id']
)
def test_sample_id(loss_function, column_name):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
column_description = data_file('adult_' + column_name, 'train.cd')
cmd = (
'--loss-function', loss_function,
'-f', data_file('adult_doc_id', 'train'),
'-t', data_file('adult_doc_id', 'test'),
'--column-description', column_description,
'--boosting-type', 'Plain',
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult_doc_id', 'test'),
'--column-description', column_description,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(cmd)
assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
POOLS = ['amazon', 'adult']
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_apply_missing_vals(boosting_type, grow_policy):
model_path = yatest.common.test_output_path('adult_model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('test_adult_missing_val.tsv'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'--output-path', output_eval_path
)
yatest.common.execute(calc_cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_crossentropy(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'CrossEntropy',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_permutation_block(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--fold-permutation-block', '239',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_ignored_features(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'-I', '0:1:3:5-7:10000',
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_ignored_features_names():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'RMSE',
'--has-header',
'--learn-set', data_file('black_friday', 'train'),
'--test-set', data_file('black_friday', 'test'),
'--column-description', data_file('black_friday', 'cd'),
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-I', 'Stay_In_Current_City_Years:Product_Category_2:Gender',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_ignored_features_not_read():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
input_cd_path = data_file('adult', 'train.cd')
cd_path = yatest.common.test_output_path('train.cd')
with open(input_cd_path, "rt") as f:
cd_lines = f.readlines()
with open(cd_path, "wt") as f:
for cd_line in cd_lines:
if cd_line.split() == ('5', 'Categ'):
cd_line = cd_line.replace('Categ', 'Num')
if cd_line.split() == ('7', 'Categ'):
cd_line = cd_line.replace('Categ', 'Num')
f.write(cd_line)
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', cd_path,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'-I', '4:6',
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
def test_ignored_features_not_read_names():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
input_cd_path = data_file('black_friday', 'cd')
cd_path = yatest.common.test_output_path('cd')
with open(input_cd_path, "rt") as f:
cd_lines = f.readlines()
with open(cd_path, "wt") as f:
for cd_line in cd_lines:
if cd_line.split() == ('2', 'Categ', 'Gender'):
cd_line = cd_line.replace('2', 'Num', 'Gender')
if cd_line.split() == ('10', 'Categ', 'Product_Category_3'):
cd_line = cd_line.replace('10', 'Num', 'Product_Category_3')
f.write(cd_line)
cmd = (
'--loss-function', 'RMSE',
'--has-header',
'--learn-set', data_file('black_friday', 'train'),
'--test-set', data_file('black_friday', 'test'),
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-I', 'Gender:Product_Category_3',
)
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_baseline(boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('train_adult_baseline.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('train_adult_baseline.cd'),
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multiclass_baseline(boosting_type, loss_function):
labels = ['0', '1', '2', '3']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
cmd = (
'--loss-function', loss_function,
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', model_path,
'--eval-file', eval_path,
'--use-best-model', 'false',
'--classes-count', '4'
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert(compare_evals(eval_path, formula_predict_path))
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multiclass_baseline_lost_class(boosting_type, loss_function):
labels = [0, 1, 2, 3]
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
cmd = (
'--loss-function', loss_function,
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', model_path,
'--eval-file', eval_path,
'--use-best-model', 'false',
'--classes-count', '4',
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_weights(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_weights_no_bootstrap(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'--bootstrap-type', 'No',
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_weights_gradient(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', 'Gradient'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_logloss_with_not_binarized_target(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_not_binarized', 'train_small'),
'-t', data_file('adult_not_binarized', 'test_small'),
'--column-description', data_file('adult_not_binarized', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--target-border', '0.5',
'--eval-file', output_eval_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_all_targets(loss_function, boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_model_path_without_test = yatest.common.test_output_path('model_without_test.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
base_cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'--counter-calc-method', 'SkipTest',
'-w', '0.03',
'-T', '4',
)
train_with_test_cmd = base_cmd + (
'-t', data_file('adult', 'test_small'),
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', train_with_test_cmd)
train_without_test_cmd = base_cmd + (
'-m', output_model_path_without_test,
)
execute_catboost_fit('CPU', train_without_test_cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
formula_predict_without_test_path = yatest.common.test_output_path('predict_without_test.eval')
base_calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--prediction-type', 'RawFormulaVal'
)
calc_cmd = base_calc_cmd + (
'-m', output_model_path,
'--output-path', formula_predict_path,
)
calc_cmd_without_test = base_calc_cmd + (
'-m', output_model_path_without_test,
'--output-path', formula_predict_without_test_path,
)
yatest.common.execute(calc_cmd)
yatest.common.execute(calc_cmd_without_test)
if loss_function == 'MAPE':
return [local_canonical_file(output_eval_path), local_canonical_file(formula_predict_path)]
else:
assert(compare_evals(output_eval_path, formula_predict_path))
assert(filecmp.cmp(formula_predict_without_test_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_cv(is_inverted, boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--cv', format_crossvalidation(is_inverted, 2, 10),
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv_for_query(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--cv', format_crossvalidation(is_inverted, 2, 7),
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_cv_for_pairs(is_inverted, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'PairLogit',
'-f', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--cv', format_crossvalidation(is_inverted, 2, 7),
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('bad_cv_params', ['XX', 'YY', 'XY'])
def test_multiple_cv_spec(bad_cv_params):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
if bad_cv_params == 'XX':
cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),
'--cv', format_crossvalidation(is_inverted=False, n=4, k=7))
elif bad_cv_params == 'XY':
cmd += ('--cv', format_crossvalidation(is_inverted=False, n=2, k=10),
'--cv', format_crossvalidation(is_inverted=True, n=4, k=7))
elif bad_cv_params == 'YY':
cmd += ('--cv', format_crossvalidation(is_inverted=True, n=2, k=10),
'--cv', format_crossvalidation(is_inverted=True, n=4, k=7))
else:
raise Exception('bad bad_cv_params value:' + bad_cv_params)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('is_inverted', [False, True], ids=['', 'inverted'])
@pytest.mark.parametrize('error_type', ['0folds', 'fold_idx_overflow'])
def test_bad_fold_cv_spec(is_inverted, error_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'-m', output_model_path,
('--cv:Inverted' if is_inverted else '--cv:Classical'),
{'0folds': '0/0', 'fold_idx_overflow': '3/2'}[error_type],
'--eval-file', output_eval_path,
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_empty_eval(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_time(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--has-time',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_gradient(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-method', 'Gradient',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'loss_function',
LOSS_FUNCTIONS_SHORT,
ids=['loss_function=%s' % loss_function for loss_function in LOSS_FUNCTIONS_SHORT]
)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_gradient_with_leafwise_approxes(loss_function, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')
cmd = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', 'Plain',
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-method', 'Gradient',
'--eval-file', output_eval_path,
]
execute_catboost_fit('CPU', cmd)
cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']
execute_catboost_fit('CPU', cmd)
assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_newton(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-iterations', '1',
'--leaf-estimation-method', 'Newton',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_newton_with_leafwise_approxes(dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')
cmd = [
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', 'Plain',
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-iterations', '1',
'--leaf-estimation-method', 'Newton',
'--eval-file', output_eval_path,
]
execute_catboost_fit('CPU', cmd)
cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']
execute_catboost_fit('CPU', cmd)
assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_newton_on_pool_with_weights(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '40',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-method', 'Newton',
'--leaf-estimation-iterations', '7',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_priors(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--ctr', 'Borders:Prior=-2:Prior=0:Prior=8:Prior=1:Prior=-1:Prior=3,'
'Counter:Prior=0',
'--per-feature-ctr', '4:Borders:Prior=0.444,Counter:Prior=0.444;'
'6:Borders:Prior=0.666,Counter:Prior=0.666;'
'8:Borders:Prior=-0.888:Prior=0.888,Counter:Prior=-0.888:Prior=0.888',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_ctr_buckets(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiClass',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--ctr', 'Buckets'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_fold_len_multiplier(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiClass',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--fold-len-multiplier', '1.5'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction', 'ShapValues', 'PredictionDiff']
DATASET_DEPENDENT_FSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'LossFunctionChange', 'ShapValues', 'PredictionDiff']
@pytest.mark.parametrize('fstr_type', FSTR_TYPES)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_fstr(fstr_type, boosting_type, grow_policy):
pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train_small'),
cd_path=data_file(pool, 'train.cd'),
boosting_type=boosting_type,
grow_policy=grow_policy,
normalize=False,
additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())
)
@pytest.mark.parametrize('fstr_type', ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction'])
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_fstr_with_text_features(fstr_type, boosting_type, grow_policy):
pool = 'rotten_tomatoes'
separator_type = 'ByDelimiter'
feature_estimators = 'BoW,NaiveBayes,BM25'
tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train'),
cd_path=data_file(pool, 'cd_binclass'),
boosting_type=boosting_type,
grow_policy=grow_policy,
normalize=False,
additional_train_params=('--text-processing', json.dumps(text_processing)) +
(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())
)
@pytest.mark.parametrize('fstr_type', ['LossFunctionChange', 'ShapValues'])
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_fstr_with_text_features_shap(fstr_type, boosting_type, grow_policy):
pool = 'rotten_tomatoes'
separator_type = 'ByDelimiter'
feature_estimators = 'NaiveBayes'
tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train'),
cd_path=data_file(pool, 'cd_binclass'),
boosting_type=boosting_type,
grow_policy=grow_policy,
normalize=False,
additional_train_params=('--random-strength', '0', '--text-processing', json.dumps(text_processing)) +
(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())
)
@pytest.mark.parametrize('fstr_type', FSTR_TYPES)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_fstr_normalized_model(fstr_type, grow_policy):
pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train_small'),
cd_path=data_file(pool, 'train.cd'),
boosting_type='Plain',
grow_policy=grow_policy,
normalize=True,
additional_train_params=(('--max-ctr-complexity', '1') if fstr_type == 'ShapValues' else ())
)
@pytest.mark.parametrize('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_fstr_with_target_border(fstr_type, grow_policy):
if fstr_type == 'PredictionDiff':
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd')
else:
train_path = data_file('adult_not_binarized', 'train_small')
cd_path = data_file('adult_not_binarized', 'train.cd')
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=train_path,
cd_path=cd_path,
boosting_type='Plain',
grow_policy=grow_policy,
normalize=False,
additional_train_params=('--target-border', '0.4')
)
@pytest.mark.parametrize('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_fstr_with_weights(fstr_type, grow_policy):
return do_test_fstr(
fstr_type,
loss_function='RMSE',
input_path=data_file('querywise', 'train'),
cd_path=data_file('querywise', 'train.cd.weight'),
boosting_type='Plain',
grow_policy=grow_policy,
normalize=False
)
@pytest.mark.parametrize('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_fstr_with_class_weights(fstr_type, grow_policy):
pool = 'adult' if fstr_type != 'PredictionDiff' else 'higgs'
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=data_file(pool, 'train_small'),
cd_path=data_file(pool, 'train.cd'),
boosting_type='Plain',
grow_policy=grow_policy,
normalize=False,
additional_train_params=('--class-weights', '0.25,0.75')
)
@pytest.mark.parametrize('fstr_type', DATASET_DEPENDENT_FSTR_TYPES)
def test_fstr_with_target_border_and_class_weights(fstr_type):
if fstr_type == 'PredictionDiff':
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd')
else:
train_path = data_file('adult_not_binarized', 'train_small')
cd_path = data_file('adult_not_binarized', 'train.cd')
return do_test_fstr(
fstr_type,
loss_function='Logloss',
input_path=train_path,
cd_path=cd_path,
boosting_type='Plain',
grow_policy='SymmetricTree',
normalize=False,
additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')
)
def do_test_fstr(
fstr_type,
loss_function,
input_path,
cd_path,
boosting_type,
grow_policy,
normalize,
additional_train_params=()
):
model_path = yatest.common.test_output_path('model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', input_path,
'--column-description', cd_path,
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '10',
'-w', '0.03',
'-T', '4',
'--one-hot-max-size', '10',
'-m', model_path
) + additional_train_params
execute_catboost_fit('CPU', cmd)
if fstr_type == 'PredictionDiff':
with open(input_path) as input:
fstr_pool_path = yatest.common.test_output_path('input.tsv')
with open(fstr_pool_path, "w") as output:
output.write(input.readline())
output.write(input.readline())
input_path = fstr_pool_path
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', input_path,
'--column-description', cd_path,
'-m', model_path,
'-o', output_fstr_path,
'--fstr-type', fstr_type
)
if normalize:
make_model_normalized(model_path)
if not(
fstr_type == 'PredictionValuesChange' or
fstr_type == 'InternalFeatureImportance' and loss_function not in RANKING_LOSSES
):
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(fstr_cmd)
return
yatest.common.execute(fstr_cmd)
return local_canonical_file(output_fstr_path)
def make_model_normalized(model_path):
yatest.common.execute([
CATBOOST_PATH,
'normalize-model',
'--model-path', model_path,
'--output-model', model_path,
'--set-scale', '0.5',
'--set-bias', '0.125',
])
@pytest.mark.parametrize('loss_function', ['QueryRMSE', 'PairLogit', 'YetiRank', 'PairLogitPairwise', 'YetiRankPairwise'])
def test_loss_change_fstr(loss_function):
return do_test_loss_change_fstr(loss_function, normalize=False)
def test_loss_change_fstr_normalized():
return do_test_loss_change_fstr('QueryRMSE', normalize=True)
def do_test_loss_change_fstr(loss_function, normalize):
model_path = yatest.common.test_output_path('model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
train_fstr_path = yatest.common.test_output_path('t_fstr.tsv')
def add_loss_specific_params(cmd, fstr_mode):
if loss_function in ['PairLogit', 'PairLogitPairwise']:
cmd += ('--column-description', data_file('querywise', 'train.cd.no_target'))
if fstr_mode:
cmd += ('--input-pairs', data_file('querywise', 'train.pairs'))
else:
cmd += ('--learn-pairs', data_file('querywise', 'train.pairs'))
else:
cmd += ('--column-description', data_file('querywise', 'train.cd'))
return cmd
cmd_prefix = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'--learn-set', data_file('querywise', 'train'),
'--boosting-type', 'Plain',
'-i', '10',
'-w', '0.03',
'-T', '4',
'--one-hot-max-size', '10',
'--fstr-file', train_fstr_path,
'--fstr-type', 'LossFunctionChange',
'--model-file', model_path
)
cmd = add_loss_specific_params(cmd_prefix, fstr_mode=False)
execute_catboost_fit('CPU', cmd)
fstr_cmd_prefix = (
CATBOOST_PATH,
'fstr',
'--input-path', data_file('querywise', 'train'),
'--model-file', model_path,
'--output-path', output_fstr_path,
'--fstr-type', 'LossFunctionChange',
)
fstr_cmd = add_loss_specific_params(fstr_cmd_prefix, fstr_mode=True)
if normalize:
make_model_normalized(model_path)
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(fstr_cmd)
return
yatest.common.execute(fstr_cmd)
fit_output = np.loadtxt(train_fstr_path, dtype='float', delimiter='\t')
fstr_output = np.loadtxt(output_fstr_path, dtype='float', delimiter='\t')
assert(np.allclose(fit_output, fstr_output, rtol=1e-6))
return [local_canonical_file(output_fstr_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('ranking_parameters', [
{'loss-function': 'PairLogit', 'fstr-type': 'LossFunctionChange'},
{'loss-function': 'Logloss', 'fstr-type': 'PredictionValuesChange'}
])
def test_fstr_feature_importance_default_value(boosting_type, ranking_parameters):
model_path = yatest.common.test_output_path('model.bin')
fstr_path_0 = yatest.common.test_output_path('fstr_0.tsv')
fstr_path_1 = yatest.common.test_output_path('fstr_1.tsv')
internal_fstr_path_0 = yatest.common.test_output_path('internal_fstr_0.tsv')
internal_fstr_path_1 = yatest.common.test_output_path('internal_fstr_1.tsv')
pool = 'adult' if ranking_parameters['loss-function'] == 'Logloss' else 'black_friday'
pool_path = data_file(pool, 'train_small' if pool == 'adult' else 'train')
cd_path = data_file(pool, 'train.cd' if pool == 'adult' else 'cd')
has_header_suffix = ('--has-header',) if pool == 'black_friday' else ()
cmd = (
'--use-best-model', 'false',
'--learn-set', pool_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'--one-hot-max-size', '10',
'--model-file', model_path,
'--loss-function', ranking_parameters['loss-function']
) + has_header_suffix
if ranking_parameters['loss-function'] == 'Logloss':
cmd += ('--target-border', '0.5')
execute_catboost_fit(
'CPU',
cmd + ('--fstr-file', fstr_path_0,
'--fstr-internal-file', internal_fstr_path_0,
'--fstr-type', 'FeatureImportance')
)
execute_catboost_fit(
'CPU',
cmd + ('--fstr-file', fstr_path_1,
'--fstr-internal-file', internal_fstr_path_1,
'--fstr-type', ranking_parameters['fstr-type'])
)
assert filecmp.cmp(fstr_path_0, fstr_path_1)
assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', pool_path,
'--column-description', cd_path,
'--model-file', model_path,
) + has_header_suffix
yatest.common.execute(
fstr_cmd + ('--output-path', fstr_path_1,
'--fstr-type', 'FeatureImportance')
)
yatest.common.execute(
fstr_cmd + ('--output-path', internal_fstr_path_1,
'--fstr-type', 'InternalFeatureImportance')
)
assert filecmp.cmp(fstr_path_0, fstr_path_1)
assert filecmp.cmp(internal_fstr_path_0, internal_fstr_path_1)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_loss_change_fstr_without_pairs(boosting_type):
model_path = yatest.common.test_output_path('model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'PairLogit',
'--learn-set', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--boosting-type', boosting_type,
'-i', '10',
'--learning-rate', '0.03',
'-T', '4',
'--one-hot-max-size', '10',
'--model-file', model_path
)
execute_catboost_fit('CPU', cmd)
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd'),
'--model-file', model_path,
'--output-path', output_fstr_path,
'--fstr-type', 'LossFunctionChange',
)
yatest.common.execute(fstr_cmd)
try:
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd.no_target'),
'--model-file', model_path,
'--fstr-type', 'LossFunctionChange',
)
yatest.common.execute(fstr_cmd)
except:
return [local_canonical_file(output_fstr_path)]
assert False
def test_loss_change_fstr_on_different_pool_type():
output_model_path = yatest.common.test_output_path('model.bin')
output_dsv_fstr_path = yatest.common.test_output_path('fstr.tsv')
output_quantized_fstr_path = yatest.common.test_output_path('fstr.tsv.quantized')
train_fstr_path = yatest.common.test_output_path('train_fstr.tsv')
def get_pool_path(set_name, is_quantized=False):
path = data_file('querywise', set_name)
return 'quantized://' + path + '.quantized' if is_quantized else path
cd_file = data_file('querywise', 'train.cd')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'PairLogit',
'--learn-set', get_pool_path('train', True),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'-i', '10',
'-T', '4',
'--fstr-file', train_fstr_path,
'--fstr-type', 'LossFunctionChange',
'--model-file', output_model_path,
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH, 'fstr',
'--input-path', get_pool_path('train'),
'--column-description', cd_file,
'--input-pairs', data_file('querywise', 'train.pairs'),
'--model-file', output_model_path,
'--output-path', output_dsv_fstr_path,
'--fstr-type', 'LossFunctionChange',
)
yatest.common.execute(cmd)
cmd = (
CATBOOST_PATH, 'fstr',
'--input-path', get_pool_path('train', True),
'--input-pairs', data_file('querywise', 'train.pairs'),
'--model-file', output_model_path,
'--output-path', output_quantized_fstr_path,
'--fstr-type', 'LossFunctionChange',
)
yatest.common.execute(cmd)
fstr_dsv = np.loadtxt(output_dsv_fstr_path, dtype='float', delimiter='\t')
fstr_quantized = np.loadtxt(output_quantized_fstr_path, dtype='float', delimiter='\t')
train_fstr = np.loadtxt(train_fstr_path, dtype='float', delimiter='\t')
assert(np.allclose(fstr_dsv, fstr_quantized, rtol=1e-6))
assert(np.allclose(fstr_dsv, train_fstr, rtol=1e-6))
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS)
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_reproducibility(loss_function, grow_policy, dev_score_calc_obj_block_size):
def run_catboost(threads, model_path, eval_path):
cmd = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '25',
'-T', str(threads),
'-m', model_path,
'--eval-file', eval_path,
]
execute_catboost_fit('CPU', cmd)
model_1 = yatest.common.test_output_path('model_1.bin')
eval_1 = yatest.common.test_output_path('test_1.eval')
run_catboost(1, model_1, eval_1)
model_4 = yatest.common.test_output_path('model_4.bin')
eval_4 = yatest.common.test_output_path('test_4.eval')
run_catboost(4, model_4, eval_4)
assert filecmp.cmp(eval_1, eval_4)
BORDER_TYPES = ['Median', 'GreedyLogSum', 'UniformAndQuantiles', 'MinEntropy', 'MaxLogSum', 'Uniform']
@pytest.mark.parametrize('border_type', BORDER_TYPES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_feature_border_types(border_type, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--feature-border-type', border_type,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('depth', [4, 8])
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_deep_tree_classification(depth, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'--depth', str(depth),
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_regularization(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--leaf-estimation-method', 'Newton',
'--eval-file', output_eval_path,
'--l2-leaf-reg', '5'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
REG_LOSS_FUNCTIONS = ['RMSE', 'RMSEWithUncertainty', 'MAE', 'Lq:q=1', 'Lq:q=1.5', 'Lq:q=3', 'Quantile', 'LogLinQuantile', 'Poisson', 'MAPE',
'Huber:delta=1.0']
@pytest.mark.parametrize('loss_function', REG_LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_reg_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_multi_targets(loss_function, boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_eval_path_dev_approxes = yatest.common.test_output_path('test_dev_approxes.eval')
cmd = [
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('cloudness_small', 'train_small'),
'-t', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path
]
execute_catboost_fit('CPU', cmd)
if boosting_type == 'Plain':
cmd = cmd[:-1] + [output_eval_path_dev_approxes, '--dev-leafwise-approxes']
execute_catboost_fit('CPU', cmd)
assert filecmp.cmp(output_eval_path, output_eval_path_dev_approxes)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(calc_cmd)
assert(compare_evals(output_eval_path, formula_predict_path))
return [local_canonical_file(output_eval_path)]
BORDER_TYPES = ['MinEntropy', 'Median', 'UniformAndQuantiles', 'MaxLogSum', 'GreedyLogSum', 'Uniform']
@pytest.mark.parametrize(
'border_type',
BORDER_TYPES,
ids=lambda border_type: 'border_type=%s' % border_type
)
@pytest.mark.parametrize(
'border_count',
[1, 3, 10],
ids=lambda border_count: 'border_count=%d' % border_count
)
@pytest.mark.parametrize(
'boosting_type',
BOOSTING_TYPE,
ids=lambda boosting_type: 'boosting_type=%s' % boosting_type
)
def test_ctr_target_quantization(border_type, border_count, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '3',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--ctr', 'Borders:TargetBorderType=' + border_type,
'--ctr-target-border-count', str(border_count)
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
COUNTER_METHODS = ['Full', 'SkipTest']
@pytest.mark.parametrize('counter_calc_method', COUNTER_METHODS)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_counter_calc(counter_calc_method, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '60',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--counter-calc-method', counter_calc_method
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
CTR_TYPES = ['Borders', 'Buckets', 'BinarizedTargetMeanValue:TargetBorderCount=10', 'Borders,BinarizedTargetMeanValue:TargetBorderCount=10', 'Buckets,Borders']
@pytest.mark.parametrize('ctr_type', CTR_TYPES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_ctr_type(ctr_type, boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '3',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--ctr', ctr_type
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_overfitting_detector_metric(boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--eval-metric', 'AUC:hints=skip_train~false',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_same_metric_skip_different(boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path_with_custom_metric = yatest.common.test_output_path('test_error_with_custom_metric.tsv')
learn_error_path_with_custom_metric = yatest.common.test_output_path('learn_error_with_custom_metric.tsv')
cmd = [
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
]
cmd_without_custom_metric = cmd + [
'--eval-metric', 'AUC:hints=skip_train~false',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
]
cmd_with_custom_metric = cmd + [
'--eval-metric', 'AUC:hints=skip_train~true',
'--custom-metric', 'AUC:hints=skip_train~false',
'--learn-err-log', learn_error_path_with_custom_metric,
'--test-err-log', test_error_path_with_custom_metric,
]
execute_catboost_fit('CPU', cmd_without_custom_metric)
execute_catboost_fit('CPU', cmd_with_custom_metric)
assert filecmp.cmp(learn_error_path_with_custom_metric, learn_error_path)
@pytest.mark.parametrize('loss_function', BINCLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_loss_for_classification(loss_function, boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
custom_metrics = [
metric for metric in
[
'AUC:hints=skip_train~false',
'Logloss',
'CrossEntropy',
'Accuracy',
'Precision',
'Recall',
'F1',
'TotalF1',
'MCC',
'BalancedAccuracy',
'BalancedErrorRate',
'Kappa',
'WKappa',
'BrierScore',
'ZeroOneLoss',
'HammingLoss',
'HingeLoss',
'NormalizedGini'
]
if metric != loss_function
]
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-w', '0.03',
'-i', '10',
'-T', '4',
'--custom-metric', ','.join(custom_metrics),
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
if loss_function == 'Logloss':
cmd += ('--target-border', '0.5')
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_loglikelihood_of_prediction(boosting_type):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'--boosting-type', boosting_type,
'-w', '0.03',
'-i', '10',
'-T', '4',
'--custom-metric', 'LogLikelihoodOfPrediction',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path, diff_tool(1e-7)), local_canonical_file(test_error_path, diff_tool(1e-7))]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_loss_for_multiclassification(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiClass',
'-f', data_file('cloudness_small', 'train_small'),
'-t', data_file('cloudness_small', 'test_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--custom-metric',
'AUC:hints=skip_train~false;type=OneVsAll,Accuracy,Precision,Recall,F1,TotalF1,MCC,Kappa,WKappa,ZeroOneLoss,HammingLoss,HingeLoss,NormalizedGini',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_calc_prediction_type(boosting_type):
model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'--output-path', output_eval_path,
'--prediction-type', 'Probability'
)
yatest.common.execute(calc_cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_calc_no_target(boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
fit_output_eval_path = yatest.common.test_output_path('fit_test.eval')
calc_output_eval_path = yatest.common.test_output_path('calc_test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', model_path,
'--counter-calc-method', 'SkipTest',
'--eval-file', fit_output_eval_path
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('train_notarget.cd'),
'-m', model_path,
'--output-path', calc_output_eval_path
)
yatest.common.execute(calc_cmd)
assert(compare_evals(fit_output_eval_path, calc_output_eval_path))
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_classification_progress_restore(boosting_type):
def run_catboost(iters, model_path, eval_path, additional_params=None):
import random
import shutil
import string
letters = string.ascii_lowercase
train_random_name = ''.join(random.choice(letters) for i in xrange(8))
shutil.copy(data_file('adult', 'train_small'), train_random_name)
cmd = [
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', train_random_name,
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', str(iters),
'-T', '4',
'-m', model_path,
'--eval-file', eval_path,
]
if additional_params:
cmd += additional_params
execute_catboost_fit('CPU', cmd)
canon_model_path = yatest.common.test_output_path('canon_model.bin')
canon_eval_path = yatest.common.test_output_path('canon_test.eval')
run_catboost(30, canon_model_path, canon_eval_path)
model_path = yatest.common.test_output_path('model.bin')
eval_path = yatest.common.test_output_path('test.eval')
progress_path = yatest.common.test_output_path('test.cbp')
run_catboost(15, model_path, eval_path, additional_params=['--snapshot-file', progress_path])
run_catboost(30, model_path, eval_path, additional_params=['--snapshot-file', progress_path])
assert filecmp.cmp(canon_eval_path, eval_path)
@pytest.mark.parametrize('loss_function', CLASSIFICATION_LOSSES)
@pytest.mark.parametrize('prediction_type', PREDICTION_TYPES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_prediction_type(prediction_type, loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--prediction-type', prediction_type
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_const_feature(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
train_path = yatest.common.test_output_path('train_small')
test_path = yatest.common.test_output_path('test_small')
train_dataset = np.loadtxt(data_file('adult', 'train_small'), dtype=str, delimiter='\t')
test_dataset = np.loadtxt(data_file('adult', 'test_small'), dtype=str, delimiter='\t')
train_dataset[:, 14] = '0'
test_dataset[:, 14] = '0'
np.savetxt(train_path, train_dataset, fmt='%s', delimiter='\t')
np.savetxt(test_path, test_dataset[:10, :], fmt='%s', delimiter='\t')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', train_path,
'-t', test_path,
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
QUANTILE_LOSS_FUNCTIONS = ['Quantile', 'LogLinQuantile']
@pytest.mark.parametrize('loss_function', QUANTILE_LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
def test_quantile_targets(loss_function, boosting_type, grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function + ':alpha=0.9',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '5',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantile_targets_exact(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Quantile:alpha=0.9',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', 'Exact'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantile_weights(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Quantile:alpha=0.9',
'-f', data_file('higgs', 'train_small'),
'-t', data_file('higgs', 'test_small'),
'--column-description', data_file('higgs', 'train_weight.cd'),
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', 'Exact'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantile_categorical(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Quantile:alpha=0.9',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--leaf-estimation-method', 'Exact'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_quantile_exact_distributed():
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MAE',
pool='higgs',
train='train_small',
test='test_small',
cd='train.cd',
other_options=(
'--leaf-estimation-method', 'Exact',
'--boost-from-average', 'False'
)
)))]
CUSTOM_LOSS_FUNCTIONS = ['RMSE,MAE', 'Quantile:alpha=0.9', 'MSLE,MedianAbsoluteError,SMAPE',
'NumErrors:greater_than=0.01,NumErrors:greater_than=0.1,NumErrors:greater_than=0.5',
'FairLoss:smoothness=0.9']
@pytest.mark.parametrize('custom_loss_function', CUSTOM_LOSS_FUNCTIONS)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_custom_loss(custom_loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult_crossentropy', 'train_proba'),
'-t', data_file('adult_crossentropy', 'test_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '50',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--custom-metric', custom_loss_function,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
eps = 0 if 'MSLE' not in custom_loss_function else 1e-9
return [local_canonical_file(learn_error_path, diff_tool=diff_tool(eps)),
local_canonical_file(test_error_path, diff_tool=diff_tool(eps))]
def test_train_dir():
output_model_path = 'model.bin'
output_eval_path = 'test.eval'
train_dir_path = 'trainDir'
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '2',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--train-dir', train_dir_path,
'--fstr-file', 'fstr.tsv',
'--fstr-internal-file', 'ifstr.tsv'
)
execute_catboost_fit('CPU', cmd)
outputs = ['time_left.tsv', 'learn_error.tsv', 'test_error.tsv', output_model_path, output_eval_path, 'fstr.tsv', 'ifstr.tsv']
for output in outputs:
assert os.path.isfile(train_dir_path + '/' + output)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('qwise_loss', ['QueryRMSE', 'RMSE'])
def test_train_on_binarized_equal_train_on_float(boosting_type, qwise_loss):
output_model_path = yatest.common.test_output_path('model.bin')
output_model_path_binarized = yatest.common.test_output_path('model_binarized.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
borders_file = yatest.common.test_output_path('borders.tsv')
borders_file_output = borders_file + '.out'
predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')
predictions_path_learn_binarized = yatest.common.test_output_path('predictions_learn_binarized.tsv')
predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')
predictions_path_test_binarized = yatest.common.test_output_path('predictions_test_binarized.tsv')
learn_file = data_file('querywise', 'train')
cd_file = data_file('querywise', 'train.cd')
test_file = data_file('querywise', 'test')
params = {"--loss-function": qwise_loss,
"-f": learn_file,
"-t": test_file,
'--column-description': cd_file,
'--boosting-type': boosting_type,
'-i': '100',
'-T': '4',
'-m': output_model_path,
'--learn-err-log': learn_error_path,
'--test-err-log': test_error_path,
'--use-best-model': 'false',
'--output-borders-file': borders_file_output,
}
params_binarized = dict(params)
params_binarized['--input-borders-file'] = borders_file_output
params_binarized['--output-borders-file'] = borders_file
params_binarized['-m'] = output_model_path_binarized
execute_catboost_fit(task_type='CPU', params=params)
apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)
apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)
execute_catboost_fit(
task_type='CPU',
params=params_binarized,
)
apply_catboost(output_model_path_binarized, learn_file, cd_file, predictions_path_learn_binarized)
apply_catboost(output_model_path_binarized, test_file, cd_file, predictions_path_test_binarized)
assert (filecmp.cmp(predictions_path_learn, predictions_path_learn_binarized))
assert (filecmp.cmp(predictions_path_test, predictions_path_test_binarized))
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(predictions_path_test),
local_canonical_file(predictions_path_learn),
local_canonical_file(borders_file)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_feature_id_fstr(boosting_type):
model_path = yatest.common.test_output_path('adult_model.bin')
output_fstr_path = yatest.common.test_output_path('fstr.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', model_path,
)
execute_catboost_fit('CPU', cmd)
fstr_cmd = (
CATBOOST_PATH,
'fstr',
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train_with_id.cd'),
'-m', model_path,
'-o', output_fstr_path,
)
yatest.common.execute(fstr_cmd)
return local_canonical_file(output_fstr_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_names_logloss(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--class-names', '1,0'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_names_multiclass(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'test_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--eval-file', output_eval_path,
'--class-names', '0.,0.5,1.,0.25,0.75'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_names_multiclass_last_class_missed(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'test_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--eval-file', output_eval_path,
'--class-names', '0.,0.5,0.25,0.75,1.',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_weight_logloss(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--class-weights', '0.5,2'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_weight_multiclass(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--class-weights', '0.5,2'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_params_from_file(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '6',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--params-file', data_file('params.json')
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_lost_class(boosting_type, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('cloudness_lost_class', 'train_small'),
'-t', data_file('cloudness_lost_class', 'test_small'),
'--column-description', data_file('cloudness_lost_class', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--classes-count', '3',
'--prediction-type', 'RawFormulaVal,Class',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_class_weight_with_lost_class(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'MultiClass',
'-f', data_file('cloudness_lost_class', 'train_small'),
'-t', data_file('cloudness_lost_class', 'test_small'),
'--column-description', data_file('cloudness_lost_class', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--classes-count', '3',
'--class-weights', '0.5,2,2',
'--prediction-type', 'RawFormulaVal,Class',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_one_hot(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.1',
'--one-hot-max-size', '10'
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'--output-path', calc_eval_path
)
yatest.common.execute(calc_cmd)
assert(compare_evals(output_eval_path, calc_eval_path))
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_random_strength(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.1',
'--random-strength', '100'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_only_categorical_features(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult_all_categorical.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '100',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-x', '1',
'-n', '8',
'-w', '0.1',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_weight_sampling_per_tree(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--sampling-frequency', 'PerTree',
)
execute_catboost_fit('CPU', cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('used_ram_limit', ['1Kb', '4Gb'])
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
['600', '5000000'],
ids=['calc_block=600', 'calc_block=5000000']
)
def test_allow_writing_files_and_used_ram_limit(boosting_type, used_ram_limit, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--allow-writing-files', 'false',
'--used-ram-limit', used_ram_limit,
'--loss-function', 'Logloss',
'--max-ctr-complexity', '5',
'--depth', '7',
'-f', data_file('airlines_5K', 'train'),
'-t', data_file('airlines_5K', 'test'),
'--column-description', data_file('airlines_5K', 'cd'),
'--has-header',
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-w', '0.03',
'-T', '6',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'ignored_features',
[True, False],
ids=['ignored_features=True', 'ignored_features=False']
)
def test_apply_with_permuted_columns(ignored_features):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('airlines_5K', 'train'),
'-t', data_file('airlines_5K', 'test'),
'--column-description', data_file('airlines_5K', 'cd'),
'--has-header',
'-i', '20',
'-w', '0.03',
'-T', '6',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
if ignored_features:
cmd += ('--ignore-features', '0:2:5')
execute_catboost_fit('CPU', cmd)
permuted_test_path, permuted_cd_path = permute_dataset_columns(
data_file('airlines_5K', 'test'),
data_file('airlines_5K', 'cd'),
seed=123)
permuted_predict_path = yatest.common.test_output_path('permuted_predict.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', permuted_test_path,
'--has-header',
'--column-description', permuted_cd_path,
'-m', output_model_path,
'--output-path', permuted_predict_path,
'--output-columns', 'SampleId,RawFormulaVal,Label'
)
yatest.common.execute(calc_cmd)
assert filecmp.cmp(output_eval_path, permuted_predict_path)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_subsample_per_tree(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--sampling-frequency', 'PerTree',
'--bootstrap-type', 'Bernoulli',
'--subsample', '0.5',
)
execute_catboost_fit('CPU', cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_subsample_per_tree_level(boosting_type, grow_policy, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--sampling-frequency', 'PerTreeLevel',
'--bootstrap-type', 'Bernoulli',
'--subsample', '0.5',
)
if grow_policy == 'Lossguide':
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
else:
execute_catboost_fit('CPU', cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_bagging_per_tree_level(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--bagging-temperature', '0.5',
)
execute_catboost_fit('CPU', cmd)
return local_canonical_file(output_eval_path)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_plain(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--boosting-type', 'Plain',
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_bootstrap(boosting_type, dev_score_calc_obj_block_size):
bootstrap_option = {
'no': ('--bootstrap-type', 'No',),
'bayes': ('--bootstrap-type', 'Bayesian', '--bagging-temperature', '0.0',),
'bernoulli': ('--bootstrap-type', 'Bernoulli', '--subsample', '1.0',)
}
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-w', '0.03',
'-T', '4',
)
for bootstrap in bootstrap_option:
model_path = yatest.common.test_output_path('model_' + bootstrap + '.bin')
eval_path = yatest.common.test_output_path('test_' + bootstrap + '.eval')
execute_catboost_fit('CPU', cmd + ('-m', model_path, '--eval-file', eval_path,) + bootstrap_option[bootstrap])
ref_eval_path = yatest.common.test_output_path('test_no.eval')
assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bayes.eval')))
assert(filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bernoulli.eval')))
return [local_canonical_file(ref_eval_path)]
def test_json_logging():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
json_path = yatest.common.test_output_path('catboost_training.json')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-w', '0.03',
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--json-log', json_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(remove_time_from_json(json_path))]
def test_json_logging_metric_period():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
json_path = yatest.common.test_output_path('catboost_training.json')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--json-log', json_path,
'--metric-period', '2',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(remove_time_from_json(json_path))]
def test_output_columns_format():
model_path = yatest.common.test_output_path('adult_model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'-m', model_path,
'--output-columns', 'SampleId,RawFormulaVal,#2,Label',
'--eval-file', output_eval_path
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'--output-path', formula_predict_path,
'--output-columns', 'SampleId,RawFormulaVal'
)
yatest.common.execute(calc_cmd)
return local_canonical_file(output_eval_path, formula_predict_path)
def test_eval_period():
model_path = yatest.common.test_output_path('adult_model.bin')
cmd = (
'--use-best-model', 'false',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'-m', model_path,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', model_path,
'--output-path', formula_predict_path,
'--eval-period', '2'
)
yatest.common.execute(calc_cmd)
return local_canonical_file(formula_predict_path)
def test_weights_output():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('adult_weight', 'train.cd'),
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--output-columns', 'SampleId,RawFormulaVal,Weight,Label',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_baseline_output():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult_weight', 'train_weight'),
'-t', data_file('adult_weight', 'test_weight'),
'--column-description', data_file('train_adult_baseline.cd'),
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--output-columns', 'SampleId,RawFormulaVal,Baseline,Label',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_baseline_from_file_output():
output_model_path = yatest.common.test_output_path('model.bin')
eval_0_path = yatest.common.test_output_path('test_0.eval')
eval_1_path = yatest.common.test_output_path('test_1.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--learn-set', data_file('higgs', 'train_small'),
'--test-set', data_file('higgs', 'test_small'),
'--column-description', data_file('higgs', 'train_baseline.cd'),
'-i', '10',
'--learning-rate', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_0_path,
'--output-columns', 'SampleId,RawFormulaVal',
)
execute_catboost_fit('CPU', cmd)
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--learn-set', data_file('higgs', 'train_small'),
'--test-set', data_file('higgs', 'test_small'),
'--column-description', data_file('higgs', 'train_weight.cd'),
'--learn-baseline', data_file('higgs', 'train_baseline'),
'--test-baseline', data_file('higgs', 'test_baseline'),
'-i', '10',
'--ignore-features', '0',
'--learning-rate', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_1_path,
'--output-columns', 'SampleId,RawFormulaVal',
)
execute_catboost_fit('CPU', cmd)
compare_evals(eval_0_path, eval_1_path)
def test_group_weight_output():
model_path = yatest.common.test_output_path('model.bin')
fit_eval_path = yatest.common.test_output_path('test_0.eval')
calc_eval_path = yatest.common.test_output_path('test_1.eval')
fit_cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', 'QueryRMSE',
'--learn-set', data_file('querywise', 'train'),
'--test-set', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.group_weight'),
'-i', '10',
'-m', model_path,
'--eval-file', fit_eval_path,
'--output-columns', 'SampleId,RawFormulaVal,GroupWeight'
)
yatest.common.execute(fit_cmd)
fit_eval = pd.read_csv(fit_eval_path, sep='\t')
test_group_weight = pd.read_csv(data_file('querywise', 'test'), sep='\t', header=None)[0]
assert 'GroupWeight' in fit_eval.columns
assert np.allclose(fit_eval['GroupWeight'], test_group_weight)
calc_cmd = (
CATBOOST_PATH,
'calc',
'-m', model_path,
'--input-path', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.group_weight'),
'--output-path', calc_eval_path,
'--output-columns', 'SampleId,RawFormulaVal,GroupWeight'
)
yatest.common.execute(calc_cmd)
calc_eval = pd.read_csv(calc_eval_path, sep='\t')
assert 'GroupWeight' in calc_eval.columns
assert np.allclose(calc_eval['GroupWeight'], test_group_weight)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_multiclass_baseline_from_file(boosting_type, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path_0 = yatest.common.test_output_path('test_0.eval')
output_eval_path_1 = yatest.common.test_output_path('test_1.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'train_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--eval-file', output_eval_path_0,
)
execute_catboost_fit('CPU', cmd)
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'train_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--learn-baseline', output_eval_path_0,
'--test-baseline', output_eval_path_0,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--class-names', '0.,0.25,0.5,0.75',
'--eval-file', output_eval_path_1,
)
execute_catboost_fit('CPU', cmd)
try:
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', data_file('precipitation_small', 'train_small'),
'-t', data_file('precipitation_small', 'train_small'),
'--column-description', data_file('precipitation_small', 'train.cd'),
'--learn-baseline', output_eval_path_0,
'--test-baseline', output_eval_path_0,
'--boosting-type', boosting_type,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--prediction-type', 'RawFormulaVal,Class',
'--class-names', '0.5,0.25,0.75.,0.',
'--eval-file', output_eval_path_1,
)
execute_catboost_fit('CPU', cmd)
except:
return [local_canonical_file(output_eval_path_0), local_canonical_file(output_eval_path_1)]
assert False
def test_baseline_from_file_output_on_quantized_pool():
output_model_path = yatest.common.test_output_path('model.bin')
eval_0_path = yatest.common.test_output_path('test_0.eval')
eval_1_path = yatest.common.test_output_path('test_1.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--learn-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),
'--test-set', 'quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),
'--column-description', data_file('higgs', 'train_baseline.cd'),
'--learning-rate', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_0_path,
)
execute_catboost_fit('CPU', cmd + ('-i', '10'))
execute_catboost_fit('CPU', cmd + (
'-i', '10',
'--learn-baseline', eval_0_path,
'--test-baseline', eval_0_path,
'--eval-file', eval_0_path))
execute_catboost_fit('CPU', cmd + (
'-i', '20',
'--eval-file', eval_1_path))
compare_evals(eval_0_path, eval_1_path)
def test_query_output():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--output-columns', 'SampleId,Label,RawFormulaVal,GroupId',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_subgroup_output():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd.subgroup_id'),
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--output-columns', 'GroupId,SubgroupId,SampleId,Label,RawFormulaVal',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_without_cat_features(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-w', '0.1',
'--one-hot-max-size', '102',
'--bootstrap-type', 'No',
'--random-strength', '0',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def make_deterministic_train_cmd(loss_function, pool, train, test, cd, schema='', test_schema='', dev_score_calc_obj_block_size=None, other_options=()):
pool_path = schema + data_file(pool, train)
test_path = test_schema + data_file(pool, test)
cd_path = data_file(pool, cd)
cmd = (
'--loss-function', loss_function,
'-f', pool_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-w', '0.03',
'-T', '4',
'--random-strength', '0',
'--has-time',
'--bootstrap-type', 'No',
'--boosting-type', 'Plain',
)
if dev_score_calc_obj_block_size:
cmd += ('--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size)
return cmd + other_options
def run_dist_train(cmd, output_file_switch='--eval-file'):
eval_0_path = yatest.common.test_output_path('test_0.eval')
execute_catboost_fit('CPU', cmd + (output_file_switch, eval_0_path,))
eval_1_path = yatest.common.test_output_path('test_1.eval')
execute_dist_train(cmd + (output_file_switch, eval_1_path,))
eval_0 = np.loadtxt(eval_0_path, dtype='float', delimiter='\t', skiprows=1)
eval_1 = np.loadtxt(eval_1_path, dtype='float', delimiter='\t', skiprows=1)
assert(np.allclose(eval_0, eval_1, atol=1e-5))
return eval_1_path
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small',
test='test_small',
cd='train.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_with_weights(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small',
test='test_small',
cd='train_weight.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_with_baseline(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small',
test='test_small',
cd='train_baseline.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_multiclass(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MultiClass',
pool='cloudness_small',
train='train_small',
test='test_small',
cd='train_float.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_multiclass_weight(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='MultiClass',
pool='cloudness_small',
train='train_small',
test='test_small',
cd='train_float_weight.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_quantized(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small_x128_greedylogsum.bin',
test='test_small',
cd='train.cd',
schema='quantized://',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum'))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
@pytest.mark.parametrize('pairs_file', ['train.pairs', 'train.pairs.weighted'])
@pytest.mark.parametrize('target', ['PairLogitPairwise', 'QuerySoftMax'])
def test_dist_train_quantized_groupid(dev_score_calc_obj_block_size, pairs_file, target):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function=target,
pool='querywise',
train='train_x128_greedylogsum_aqtaa.bin',
test='test',
cd='train.cd.query_id',
schema='quantized://',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',
'--learn-pairs', data_file('querywise', pairs_file)))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_quantized_group_weights(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='QueryRMSE',
pool='querywise',
train='train.quantized',
test='test',
cd='train.cd.query_id',
schema='quantized://',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',
'--learn-group-weights', data_file('querywise', 'train.group_weights')))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_quantized_baseline(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='Logloss',
pool='higgs',
train='train_small_x128_greedylogsum.bin',
test='train_small_x128_greedylogsum.bin',
cd='train_baseline.cd',
schema='quantized://',
test_schema='quantized://',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('-x', '128', '--feature-border-type', 'GreedyLogSum',
'--test-baseline', data_file('higgs', 'test_baseline'),
'--learn-baseline', data_file('higgs', 'train_baseline')))))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_queryrmse(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='QueryRMSE',
pool='querywise',
train='train',
test='test',
cd='train.cd.subgroup_id',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_subgroup(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='QueryRMSE',
pool='querywise',
train='train',
test='test',
cd='train.cd.subgroup_id',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--eval-metric', 'PFound')
), output_file_switch='--test-err-log'))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_pairlogit(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='PairLogit',
pool='querywise',
train='train',
test='test',
cd='train.cd.query_id',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--learn-pairs', data_file('querywise', 'train.pairs'))
)))]
@pytest.mark.parametrize('pairs_file', ['train.pairs', 'train.pairs.weighted'])
def test_dist_train_pairlogitpairwise(pairs_file):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='PairLogitPairwise',
pool='querywise',
train='train',
test='test',
cd='train.cd',
other_options=('--learn-pairs', data_file('querywise', pairs_file))
)))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_dist_train_querysoftmax(dev_score_calc_obj_block_size):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='QuerySoftMax',
pool='querywise',
train='train',
test='test',
cd='train.cd.subgroup_id',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size)))]
@pytest.mark.parametrize('loss_func', ['Logloss', 'RMSE'])
def test_dist_train_auc(loss_func):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function=loss_func,
pool='higgs',
train='train_small',
test='test_small',
cd='train_baseline.cd',
other_options=('--eval-metric', 'AUC')
), output_file_switch='--test-err-log'))]
@pytest.mark.parametrize('loss_func', ['Logloss', 'RMSE'])
def test_dist_train_auc_weight(loss_func):
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function=loss_func,
pool='higgs',
train='train_small',
test='test_small',
cd='train_weight.cd',
other_options=('--eval-metric', 'AUC', '--boost-from-average', '0')
), output_file_switch='--test-err-log'))]
@pytest.mark.xfail(reason='Boost from average for distributed training')
@pytest.mark.parametrize('schema,train', [('quantized://', 'train_small_x128_greedylogsum.bin'), ('', 'train_small')])
def test_dist_train_snapshot(schema, train):
train_cmd = make_deterministic_train_cmd(
loss_function='RMSE',
pool='higgs',
train=train,
test='test_small',
schema=schema,
cd='train.cd')
eval_10_trees_path = yatest.common.test_output_path('10_trees.eval')
execute_catboost_fit('CPU', train_cmd + ('-i', '10', '--eval-file', eval_10_trees_path,))
snapshot_path = yatest.common.test_output_path('snapshot')
execute_dist_train(train_cmd + ('-i', '5', '--snapshot-file', snapshot_path,))
eval_5_plus_5_trees_path = yatest.common.test_output_path('5_plus_5_trees.eval')
execute_dist_train(train_cmd + ('-i', '10', '--eval-file', eval_5_plus_5_trees_path, '--snapshot-file', snapshot_path,))
assert(filecmp.cmp(eval_10_trees_path, eval_5_plus_5_trees_path))
return [local_canonical_file(eval_5_plus_5_trees_path)]
def test_dist_train_yetirank():
return [local_canonical_file(run_dist_train(make_deterministic_train_cmd(
loss_function='YetiRank',
pool='querywise',
train='repeat_same_query_8_times',
test='repeat_same_query_8_times',
cd='train.cd'
), output_file_switch='--test-err-log'))]
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
@pytest.mark.parametrize(
'one_hot_max_size',
[2, 255],
ids=['one_hot_max_size=2', 'one_hot_max_size=255']
)
def test_dist_train_with_cat_features(dev_score_calc_obj_block_size, one_hot_max_size):
cmd = make_deterministic_train_cmd(
loss_function='Logloss',
pool='adult',
train='train_small',
test='test_small',
cd='train.cd',
dev_score_calc_obj_block_size=dev_score_calc_obj_block_size,
other_options=('--one-hot-max-size', str(one_hot_max_size))
)
if one_hot_max_size == 2:
with pytest.raises(yatest.common.ExecutionError):
run_dist_train(cmd)
else:
return [local_canonical_file(run_dist_train(cmd))]
def test_no_target():
train_path = yatest.common.test_output_path('train')
cd_path = yatest.common.test_output_path('train.cd')
pairs_path = yatest.common.test_output_path('pairs')
np.savetxt(train_path, [[0], [1], [2], [3], [4]], delimiter='\t', fmt='%.4f')
np.savetxt(cd_path, [('0', 'Num')], delimiter='\t', fmt='%s')
np.savetxt(pairs_path, [[0, 1], [0, 2], [0, 3], [2, 4]], delimiter='\t', fmt='%i')
cmd = (
'-f', train_path,
'--cd', cd_path,
'--learn-pairs', pairs_path
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('loss_function', ALL_LOSSES)
def test_const_target(loss_function):
train_path = yatest.common.test_output_path('train')
cd_path = yatest.common.test_output_path('train.cd')
np.savetxt(
train_path,
[[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 0, 3],
[0, 0, 4]],
delimiter='\t',
fmt='%.4f'
)
np.savetxt(cd_path, [('0', 'Target'), ('1', 'GroupId')], delimiter='\t', fmt='%s')
cmd = (
'--loss-function', loss_function,
'-f', train_path,
'--cd', cd_path,
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def test_negative_weights():
train_path = yatest.common.test_output_path('train')
cd_path = yatest.common.test_output_path('train.cd')
open(cd_path, 'wt').write('0\tNum\n1\tWeight\n2\tTarget\n')
np.savetxt(train_path, [
[0, 1, 2],
[1, -1, 1]], delimiter='\t', fmt='%.4f')
cmd = ('-f', train_path,
'--cd', cd_path,
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def test_zero_learning_rate():
train_path = yatest.common.test_output_path('train')
cd_path = yatest.common.test_output_path('train.cd')
open(cd_path, 'wt').write(
'0\tNum\n'
'1\tNum\n'
'2\tTarget\n')
np.savetxt(train_path, [
[0, 1, 2],
[1, 1, 1]], delimiter='\t', fmt='%.4f')
cmd = ('-f', train_path,
'--cd', cd_path,
'--learning-rate', '0.0',
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function, additional_train_params=(), additional_eval_params=()):
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
'--loss-function', loss_function,
'--eval-metric', metric,
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--metric-period', metric_period
) + additional_train_params
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metric,
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--eval-period', metric_period,
'--save-stats'
) + additional_eval_params
yatest.common.execute(cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['Logloss', 'F1', 'Accuracy', 'PFound', 'TotalF1', 'MCC', 'PairAccuracy'])
def test_eval_metrics(metric, metric_period):
if metric == 'PFound':
train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'QueryRMSE'
elif metric == 'PairAccuracy':
train, test, cd, loss_function = data_file('querywise', 'train'), data_file('querywise', 'test'), data_file('querywise', 'train.cd'), 'PairLogitPairwise'
else:
train, test, cd, loss_function = data_file('adult', 'train_small'), data_file('adult', 'test_small'), data_file('adult', 'train.cd'), 'Logloss'
return do_test_eval_metrics(metric, metric_period, train, test, cd, loss_function)
def test_eval_metrics_with_target_border():
return do_test_eval_metrics(
metric='Logloss',
metric_period='1',
train=data_file('adult_not_binarized', 'train_small'),
test=data_file('adult_not_binarized', 'test_small'),
cd=data_file('adult_not_binarized', 'train.cd'),
loss_function='Logloss',
additional_train_params=('--target-border', '0.4')
)
def test_eval_metrics_with_class_weights():
return do_test_eval_metrics(
metric='Logloss',
metric_period='1',
train=data_file('adult', 'train_small'),
test=data_file('adult', 'test_small'),
cd=data_file('adult', 'train.cd'),
loss_function='Logloss',
additional_train_params=('--class-weights', '0.25,0.75')
)
def test_eval_metrics_with_target_border_and_class_weights():
return do_test_eval_metrics(
metric='Logloss',
metric_period='1',
train=data_file('adult_not_binarized', 'train_small'),
test=data_file('adult_not_binarized', 'test_small'),
cd=data_file('adult_not_binarized', 'train.cd'),
loss_function='Logloss',
additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')
)
@pytest.mark.parametrize('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])
def test_eval_metrics_with_boost_from_average_and_model_shrinkage(config):
mode, rate, lr = config
train = data_file('higgs', 'train_small')
test = data_file('higgs', 'test_small')
cd = data_file('higgs', 'train.cd')
loss_function = 'Logloss'
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', loss_function,
'--eval-metric', 'Logloss',
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--metric-period', '10',
'--learn-err-log', learn_error_path,
'--model-shrink-mode', mode,
'--model-shrink-rate', str(rate),
'--boost-from-average', 'true'
)
execute_catboost_fit('CPU', cmd)
test_eval_path = yatest.common.test_output_path('test_output.tsv')
learn_eval_path = yatest.common.test_output_path('learn_output.tsv')
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'Logloss',
'--input-path', train,
'--column-description', cd,
'-m', output_model_path,
'-o', learn_eval_path,
'--block-size', '100',
'--eval-period', '10',
'--save-stats',
)
yatest.common.execute(cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'Logloss',
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', test_eval_path,
'--block-size', '100',
'--eval-period', '10',
'--save-stats',
)
yatest.common.execute(cmd)
test_first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1:], 8)
test_second_metrics = np.round(np.loadtxt(test_eval_path, skiprows=1)[:, 1:], 8)
learn_first_metrics = np.round(np.loadtxt(learn_error_path, skiprows=1)[:, 1:], 8)
learn_second_metrics = np.round(np.loadtxt(learn_eval_path, skiprows=1)[:, 1:], 8)
assert test_first_metrics[-1] == test_second_metrics[-1]
assert learn_first_metrics[-1] == learn_second_metrics[-1]
@pytest.mark.parametrize('metrics', ['AUC', 'AUC,Precision'])
def test_eval_metrics_with_binarized_target(metrics):
train = data_file('adult', 'train_small')
test = data_file('adult', 'test_small')
cd = data_file('adult', 'train.cd')
loss_function = 'Logloss'
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', loss_function,
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--target-border', '0.25',
'--custom-metric', metrics,
)
execute_catboost_fit('CPU', cmd)
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metrics,
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--save-stats',
)
yatest.common.execute(cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2:], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)
assert np.all(first_metrics == second_metrics)
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['MultiClass', 'MultiClassOneVsAll', 'F1', 'Accuracy', 'TotalF1', 'MCC', 'Precision', 'Recall'])
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('dataset', ['cloudness_small', 'cloudness_lost_class'])
def test_eval_metrics_multiclass(metric, loss_function, dataset, metric_period):
if metric in MULTICLASS_LOSSES and metric != loss_function:
return
train, test, cd = data_file(dataset, 'train_small'), data_file(dataset, 'test_small'), data_file(dataset, 'train.cd')
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
'--loss-function', loss_function,
'--custom-metric', metric,
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--classes-count', '3',
'--metric-period', metric_period
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metric,
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--eval-period', metric_period,
'--save-stats'
)
yatest.common.execute(cmd)
start_index = 1 if metric == loss_function else 2
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, start_index:], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1:], 8)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(eval_path)]
def test_eval_metrics_class_names():
labels = ['a', 'b', 'c', 'd']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', 'MultiClass',
'--custom-metric', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--class-names', ','.join(labels),
)
execute_catboost_fit('CPU', cmd)
eval_cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'TotalF1,AUC:type=OneVsAll,AUC:type=Mu,AUC:misclass_cost_matrix=0/0.239/1/-1/0.5/0/1.5/-1.2/1/0.67/0/1.3/-0.5/1/0.5/0',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'-o', eval_path,
'--block-size', '100',
'--save-stats'
)
execute_catboost_fit('CPU', cmd)
yatest.common.execute(eval_cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)
assert np.all(first_metrics == second_metrics)
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['Accuracy', 'AUC:type=Ranking'])
def test_eval_metrics_with_baseline(metric_period, metric):
train = data_file('adult_weight', 'train_weight')
test = data_file('adult_weight', 'test_weight')
cd = data_file('train_adult_baseline.cd')
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
'--loss-function', 'Logloss',
'--eval-metric', metric,
'-f', train,
'-t', test,
'--column-description', cd,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--metric-period', metric_period
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metric,
'--input-path', test,
'--column-description', cd,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--eval-period', metric_period,
'--save-stats'
)
yatest.common.execute(cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('metric_period', ['1', '2'])
@pytest.mark.parametrize('metric', ['Accuracy'])
def test_eval_metrics_multiclass_with_baseline(metric_period, metric):
labels = [0, 1, 2, 3]
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
output_model_path = yatest.common.test_output_path('model.bin')
test_error_path = yatest.common.test_output_path('test_error.tsv')
eval_path = yatest.common.test_output_path('output.tsv')
cmd = (
'--loss-function', 'MultiClass',
'--eval-metric', metric,
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--classes-count', '4',
'--metric-period', metric_period
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', metric,
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'-o', eval_path,
'--block-size', '100',
'--eval-period', metric_period,
'--save-stats'
)
yatest.common.execute(cmd)
first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 1], 8)
second_metrics = np.round(np.loadtxt(eval_path, skiprows=1)[:, 1], 8)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_ctr_leaf_count_limit(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'--ctr-leaf-count-limit', '10',
'-i', '30',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('loss_function', ['RMSE', 'Logloss', 'CrossEntropy'])
def test_boost_from_average(boosting_type, grow_policy, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
output_calc_eval_path = yatest.common.test_output_path('test_calc.eval')
output_eval_path_with_avg = yatest.common.test_output_path('test_avg.eval')
output_eval_path_with_baseline = yatest.common.test_output_path('test_baseline.eval')
baselined_train = yatest.common.test_output_path('baselined_train')
baselined_test = yatest.common.test_output_path('baselined_test')
baselined_cd = yatest.common.test_output_path('baselined.cd')
train_path = data_file('adult', 'train_small')
test_path = data_file('adult', 'test_small')
original_cd = data_file('adult', 'train.cd')
sum_target = np.float32(0)
obj_count = np.float32(0)
with open(train_path) as train_f:
for line in train_f:
obj_count += 1
sum_target += np.float32(line.split()[1])
mean_target = sum_target / obj_count
if loss_function in ['Logloss', 'CrossEntropy']:
mean_target = -np.log(1 / mean_target - 1)
mean_target_str = str(mean_target)
def append_baseline_to_pool(source, target):
with open(source) as source_f, open(target, 'w') as target_f:
for line in source_f:
target_f.write(line.rstrip('\n') + '\t' + mean_target_str + '\n')
append_baseline_to_pool(train_path, baselined_train)
append_baseline_to_pool(test_path, baselined_test)
with open(baselined_cd, 'w') as cd_output, open(original_cd) as cd_input:
for line in cd_input:
cd_output.write(line)
cd_output.write('18\tBaseline\n')
base_cmd = (
'--loss-function', loss_function,
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'-i', '30',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', base_cmd + (
'-f', baselined_train,
'-t', baselined_test,
'--boost-from-average', '0',
'--column-description', baselined_cd,
'--eval-file', output_eval_path_with_baseline,
))
execute_catboost_fit('CPU', base_cmd + (
'-f', train_path,
'-t', test_path,
'--boost-from-average', '1',
'--column-description', original_cd,
'--eval-file', output_eval_path_with_avg,
))
yatest.common.execute((
CATBOOST_PATH, 'calc',
'--cd', original_cd,
'--input-path', test_path,
'-m', output_model_path,
'-T', '1',
'--output-path', output_calc_eval_path,
))
assert compare_fit_evals_with_precision(output_eval_path_with_avg, output_eval_path_with_baseline)
assert compare_evals(output_eval_path_with_avg, output_calc_eval_path)
return [local_canonical_file(output_eval_path_with_avg)]
@pytest.mark.parametrize('eval_period', ['1', '2'])
def test_eval_non_additive_metric(eval_period):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'AUC:hints=skip_train~false',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'-o', output_eval_path,
'--eval-period', eval_period,
'--block-size', '10'
)
yatest.common.execute(cmd)
output_eval_in_parts = yatest.common.test_output_path('eval_in_parts.eval')
cmd = (
CATBOOST_PATH,
'eval-metrics',
'--metrics', 'AUC:hints=skip_train~false',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'-o', output_eval_in_parts,
'--eval-period', eval_period,
'--calc-on-parts',
'--block-size', '10'
)
yatest.common.execute(cmd)
first_metrics = np.loadtxt(output_eval_path, skiprows=1)
second_metrics = np.loadtxt(output_eval_in_parts, skiprows=1)
assert np.all(first_metrics == second_metrics)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('max_ctr_complexity', [1, 2])
def test_eval_eq_calc(boosting_type, grow_policy, max_ctr_complexity):
one_hot_max_size = 2
cd_path = yatest.common.test_output_path('cd.txt')
train_path = yatest.common.test_output_path('train.txt')
test_path = yatest.common.test_output_path('test.txt')
model_path = yatest.common.test_output_path('model.bin')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
np.savetxt(cd_path, [['0', 'Target'],
['1', 'Categ'],
['2', 'Categ']
], fmt='%s', delimiter='\t')
np.savetxt(train_path, [['1', 'A', 'X'],
['1', 'B', 'Y'],
['1', 'C', 'Y'],
['0', 'A', 'Z'],
['0', 'B', 'Z'],
], fmt='%s', delimiter='\t')
np.savetxt(test_path, [['1', 'A', 'Y'],
['1', 'D', 'U'],
['1', 'D', 'U']
], fmt='%s', delimiter='\t')
cmd_fit = ('--loss-function', 'Logloss',
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--cd', cd_path,
'-f', train_path,
'-t', test_path,
'-m', model_path,
'--eval-file', test_eval_path,
'-i', '5',
'-T', '1',
'--max-ctr-complexity', str(max_ctr_complexity),
'--one-hot-max-size', str(one_hot_max_size),
)
cmd_calc = (CATBOOST_PATH, 'calc',
'--cd', cd_path,
'--input-path', test_path,
'-m', model_path,
'-T', '1',
'--output-path', calc_eval_path,
)
execute_catboost_fit('CPU', cmd_fit)
yatest.common.execute(cmd_calc)
assert(compare_evals(test_eval_path, calc_eval_path))
def do_test_object_importances(pool, loss_function, additional_train_params):
output_model_path = yatest.common.test_output_path('model.bin')
object_importances_path = yatest.common.test_output_path('object_importances.tsv')
cmd = (
'--loss-function', loss_function,
'-f', data_file(pool, 'train_small'),
'-t', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-i', '10',
'--boosting-type', 'Plain',
'-T', '4',
'-m', output_model_path,
'--use-best-model', 'false'
) + additional_train_params
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH,
'ostr',
'-f', data_file(pool, 'train_small'),
'-t', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-m', output_model_path,
'-o', object_importances_path,
)
yatest.common.execute(cmd)
return [local_canonical_file(object_importances_path)]
@pytest.mark.parametrize('loss_function', ['RMSE', 'Logloss', 'Poisson'])
@pytest.mark.parametrize('leaf_estimation_iteration', ['1', '2'])
def test_object_importances(loss_function, leaf_estimation_iteration):
additional_train_params = (
'--leaf-estimation-method', 'Gradient',
'--leaf-estimation-iterations', leaf_estimation_iteration
)
return do_test_object_importances(
pool='adult',
loss_function=loss_function,
additional_train_params=additional_train_params
)
def test_object_importances_with_target_border():
return do_test_object_importances(
pool='adult_not_binarized',
loss_function='Logloss',
additional_train_params=('--target-border', '0.4')
)
def test_object_importances_with_class_weights():
return do_test_object_importances(
pool='adult',
loss_function='Logloss',
additional_train_params=('--class-weights', '0.25,0.75')
)
def test_object_importances_with_target_border_and_class_weights():
return do_test_object_importances(
pool='adult_not_binarized',
loss_function='Logloss',
additional_train_params=('--target-border', '0.4', '--class-weights', '0.25,0.75')
)
def split_test_to(num_tests, test_input_path):
test_input_lines = open(test_input_path).readlines()
test_paths = [yatest.common.test_output_path('test{}'.format(i)) for i in range(num_tests)]
for testno in range(num_tests):
test_path = test_paths[testno]
test_lines = test_input_lines[testno::num_tests]
open(test_path, 'wt').write(''.join(test_lines))
return test_paths
def create_test_shuffles(test_paths, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
num_tests = len(test_paths)
num_shuffles = num_tests
test_shuffles = set()
while len(test_shuffles) < num_shuffles:
test_shuffles.add(tuple(prng.permutation(test_paths)))
return [','.join(shuffle) for shuffle in test_shuffles]
def fit_calc_cksum(fit_stem, calc_stem, test_shuffles):
import hashlib
last_cksum = None
for i, shuffle in enumerate(test_shuffles):
model_path = yatest.common.test_output_path('model{}.bin'.format(i))
eval_path = yatest.common.test_output_path('eval{}.txt'.format(i))
execute_catboost_fit('CPU', fit_stem + (
'-t', shuffle,
'-m', model_path,
))
yatest.common.execute(calc_stem + (
'-m', model_path,
'--output-path', eval_path,
))
cksum = hashlib.md5(open(eval_path).read()).hexdigest()
if last_cksum is None:
last_cksum = cksum
continue
assert(last_cksum == cksum)
@pytest.mark.parametrize('num_tests', [3, 4])
@pytest.mark.parametrize('boosting_type', ['Plain', 'Ordered'])
def test_multiple_eval_sets_order_independent(boosting_type, num_tests):
train_path = data_file('adult', 'train_small')
cd_path = data_file('adult', 'train.cd')
test_input_path = data_file('adult', 'test_small')
fit_stem = (
'--loss-function', 'RMSE',
'-f', train_path,
'--cd', cd_path,
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'--use-best-model', 'false',
)
calc_stem = (
CATBOOST_PATH, 'calc',
'--cd', cd_path,
'--input-path', test_input_path,
'-T', '4',
)
prng = np.random.RandomState(seed=20181219)
test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)
fit_calc_cksum(fit_stem, calc_stem, test_shuffles)
@pytest.mark.parametrize('num_tests', [3, 4])
@pytest.mark.parametrize('boosting_type', ['Plain', 'Ordered'])
def test_multiple_eval_sets_querywise_order_independent(boosting_type, num_tests):
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd.query_id')
test_input_path = data_file('querywise', 'test')
fit_stem = (
'--loss-function', 'QueryRMSE',
'-f', train_path,
'--cd', cd_path,
'--boosting-type', boosting_type,
'-i', '5',
'-T', '4',
'--use-best-model', 'false',
)
calc_stem = (CATBOOST_PATH, 'calc',
'--cd', cd_path,
'--input-path', test_input_path,
'-T', '4',
)
prng = np.random.RandomState(seed=20181219)
test_shuffles = create_test_shuffles(split_test_to(num_tests, test_input_path), prng=prng)
fit_calc_cksum(fit_stem, calc_stem, test_shuffles)
def test_multiple_eval_sets_no_empty():
train_path = data_file('adult', 'train_small')
cd_path = data_file('adult', 'train.cd')
test_input_path = data_file('adult', 'test_small')
fit_stem = ('--loss-function', 'RMSE',
'-f', train_path,
'--cd', cd_path,
'-i', '5',
'-T', '4',
'--use-best-model', 'false',
)
test0_path = yatest.common.test_output_path('test0.txt')
open(test0_path, 'wt').write('')
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', fit_stem + (
'-t', ','.join((test_input_path, test0_path))
))
@pytest.mark.parametrize('loss_function', ['RMSE', 'QueryRMSE'])
def test_multiple_eval_sets(loss_function):
num_tests = 5
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd.query_id')
test_input_path = data_file('querywise', 'test')
eval_path = yatest.common.test_output_path('test.eval')
test_paths = list(reversed(split_test_to(num_tests, test_input_path)))
cmd = ('--loss-function', loss_function,
'-f', train_path,
'-t', ','.join(test_paths),
'--column-description', cd_path,
'-i', '5',
'-T', '4',
'--use-best-model', 'false',
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(eval_path)]
def test_multiple_eval_sets_err_log():
num_tests = 3
train_path = data_file('querywise', 'train')
cd_path = data_file('querywise', 'train.cd.query_id')
test_input_path = data_file('querywise', 'test')
test_err_log_path = yatest.common.test_output_path('test-err.log')
json_log_path = yatest.common.test_output_path('json.log')
test_paths = reversed(split_test_to(num_tests, test_input_path))
cmd = ('--loss-function', 'RMSE',
'-f', train_path,
'-t', ','.join(test_paths),
'--column-description', cd_path,
'-i', '5',
'-T', '4',
'--test-err-log', test_err_log_path,
'--json-log', json_log_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(test_err_log_path),
local_canonical_file(remove_time_from_json(json_log_path))]
@pytest.mark.parametrize('cat_value', ['Normal', 'Quvena', 'Sineco'])
def test_const_cat_feature(cat_value):
def make_a_set(nrows, value, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
label = prng.randint(0, nrows, [nrows, 1])
feature = np.full([nrows, 1], value, dtype='|S{}'.format(len(value)))
return np.concatenate([label, feature], axis=1)
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'Categ']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=20181219)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, make_a_set(10, cat_value, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
cmd = ('--loss-function', 'RMSE',
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '5',
'-T', '4',
'--eval-file', eval_path,
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def test_model_metadata():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '2',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'-w', '0.1',
'--set-metadata-from-freeargs',
'A', 'A',
'BBB', 'BBB',
'CCC', 'A'
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'metadata', 'set',
'-m', output_model_path,
'--key', 'CCC',
'--value', 'CCC'
)
yatest.common.execute(calc_cmd)
calc_cmd = (
CATBOOST_PATH,
'metadata', 'set',
'-m', output_model_path,
'--key', 'CCC',
'--value', 'CCC'
)
yatest.common.execute(calc_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(output_model_path)
assert 'A' == py_catboost.get_metadata()['A']
assert 'BBB' == py_catboost.get_metadata()['BBB']
assert 'CCC' == py_catboost.get_metadata()['CCC']
def test_fit_multiclass_with_class_names():
labels = ['a', 'b', 'c', 'd']
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', 'MultiClass',
'--class-names', ','.join(labels),
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'--use-best-model', 'false',
'--prediction-type', 'RawFormulaVal,Class',
'--eval-file', eval_path
)
execute_catboost_fit('CPU', fit_cmd)
return [local_canonical_file(eval_path)]
def test_extract_multiclass_labels_from_class_names():
labels = ['a', 'b', 'c', 'd']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', 'MultiClass',
'--class-names', ','.join(labels),
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-T', '4',
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', 'RawFormulaVal,Class',
)
execute_catboost_fit('CPU', fit_cmd)
yatest.common.execute(calc_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['a', 'b', 'c', 'd']
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
assert json.loads(py_catboost.get_metadata()['params'])['data_processing_options']['class_names'] == ['a', 'b', 'c', 'd']
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('loss_function', ['MultiClass', 'MultiClassOneVsAll', 'Logloss', 'RMSE'])
def test_save_class_labels_from_data(loss_function):
labels = [10000000, 7, 0, 9999]
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
cmd = (
'--loss-function', loss_function,
'-f', train_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
if loss_function == 'Logloss':
cmd += ('--target-border', '0.5')
execute_catboost_fit('CPU', cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
if loss_function in MULTICLASS_LOSSES:
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
elif loss_function == 'Logloss':
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
else:
assert 'class_params' not in py_catboost.get_metadata()
@pytest.mark.parametrize('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])
def test_apply_multiclass_labels_from_data(prediction_type):
labels = [10000000, 7, 0, 9999]
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', 'MultiClass',
'-f', train_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', prediction_type,
)
execute_catboost_fit('CPU', fit_cmd)
yatest.common.execute(calc_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['0.0', '7.0', '9999.0', '10000000.0']
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
if prediction_type in ['Probability', 'RawFormulaVal']:
with open(eval_path, "rt") as f:
for line in f:
assert line[:-1] == 'SampleId\t{}:Class=0.0\t{}:Class=7.0\t{}:Class=9999.0\t{}:Class=10000000.0' \
.format(prediction_type, prediction_type, prediction_type, prediction_type)
break
else:
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if not i:
assert line[:-1] == 'SampleId\tClass'
else:
assert float(line[:-1].split()[1]) in labels
return [local_canonical_file(eval_path)]
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
@pytest.mark.parametrize('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])
def test_save_and_apply_multiclass_labels_from_classes_count(loss_function, prediction_type):
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, [0, 1, 2, 3], prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', loss_function,
'--classes-count', '4',
'-f', train_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', fit_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [1, 2]
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 4
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', prediction_type
)
yatest.common.execute(calc_cmd)
if prediction_type == 'RawFormulaVal':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'SampleId\t{}:Class=0\t{}:Class=1\t{}:Class=2\t{}:Class=3' \
.format(prediction_type, prediction_type, prediction_type, prediction_type)
else:
assert float(line[:-1].split()[1]) == float('-inf') and float(line[:-1].split()[4]) == float('-inf')
if prediction_type == 'Probability':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'SampleId\t{}:Class=0\t{}:Class=1\t{}:Class=2\t{}:Class=3' \
.format(prediction_type, prediction_type, prediction_type, prediction_type)
else:
assert (abs(float(line[:-1].split()[1])) < 1e-307
and abs(float(line[:-1].split()[4])) < 1e-307)
if prediction_type == 'Class':
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if i == 0:
assert line[:-1] == 'SampleId\tClass'
else:
assert float(line[:-1].split()[1]) in [1, 2]
return [local_canonical_file(eval_path)]
def test_set_class_names_implicitly():
INPUT_CLASS_LABELS = ['a', 'bc', '7.', '8.0', '19.2']
SAVED_CLASS_LABELS = ['19.2', '7.', '8.0', 'a', 'bc']
model_path = yatest.common.test_output_path('model.bin')
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\t')
prng = np.random.RandomState(seed=0)
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, INPUT_CLASS_LABELS, prng=prng), fmt='%s', delimiter='\t')
eval_path = yatest.common.test_output_path('eval.txt')
fit_cmd = (
'--loss-function', 'MultiClass',
'-f', train_path,
'--column-description', cd_path,
'-i', '10',
'-T', '4',
'-m', model_path,
'--use-best-model', 'false',
)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', model_path,
'--output-path', eval_path,
'--prediction-type', 'RawFormulaVal,Class',
)
execute_catboost_fit('CPU', fit_cmd)
py_catboost = catboost.CatBoost()
py_catboost.load_model(model_path)
assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'
assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3, 4]
assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == SAVED_CLASS_LABELS
assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0
yatest.common.execute(calc_cmd)
with open(eval_path, "rt") as f:
for i, line in enumerate(f):
if not i:
assert line[:-1] == 'SampleId\t{}:Class=19.2\t{}:Class=7.\t{}:Class=8.0\t{}:Class=a\t{}:Class=bc\tClass' \
.format(*(['RawFormulaVal'] * 5))
else:
label = line[:-1].split()[-1]
assert label in SAVED_CLASS_LABELS
return [local_canonical_file(eval_path)]
CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH = data_file('', 'multiclass_model.bin')
@pytest.mark.parametrize('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])
def test_multiclass_model_backward_compatibility(prediction_type):
model = catboost.CatBoost()
model.load_model(CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH)
assert 'class_params' not in model.get_metadata()
pool = catboost.Pool(data_file('cloudness_small', 'train_small'),
column_description=data_file('cloudness_small', 'train.cd'))
model.predict(data=pool, prediction_type='Class')
model.eval_metrics(data=pool, metrics=['Accuracy'])
output_path = yatest.common.test_output_path('out.txt')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('cloudness_small', 'train_small'),
'--column-description', data_file('cloudness_small', 'train.cd'),
'-m', CANONICAL_CLOUDNESS_MINI_MULTICLASS_MODEL_PATH,
'--prediction-type', prediction_type,
'--output-path', output_path,
)
yatest.common.execute(calc_cmd)
return [local_canonical_file(output_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('use_best_model', ['true', 'false'])
def test_learning_rate_auto_set(boosting_type, use_best_model):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', use_best_model,
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', boosting_type,
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--od-type', 'Iter',
'--od-wait', '2',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_paths_with_dsv_scheme():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', 'dsv://' + data_file('querywise', 'train'),
'-t', 'dsv://' + data_file('querywise', 'test'),
'--column-description', 'dsv://' + data_file('querywise', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_skip_train():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
json_log_path = yatest.common.test_output_path('json_log.json')
cmd = (
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'-i', '20',
'-T', '4',
'--custom-metric', 'AverageGain:top=2;hints=skip_train~true',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--use-best-model', 'false',
'--json-log', json_log_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(remove_time_from_json(json_log_path))]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_group_weight(boosting_type, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
def run_catboost(train_path, test_path, cd_path, eval_path):
cmd = (
'--loss-function', 'YetiRank',
'-f', data_file('querywise', train_path),
'-t', data_file('querywise', test_path),
'--column-description', data_file('querywise', cd_path),
'--boosting-type', boosting_type,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'-m', output_model_path,
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd)
output_eval_path_first = yatest.common.test_output_path('test_first.eval')
output_eval_path_second = yatest.common.test_output_path('test_second.eval')
run_catboost('train', 'test', 'train.cd', output_eval_path_first)
run_catboost('train.const_group_weight', 'test.const_group_weight', 'train.cd.group_weight', output_eval_path_second)
assert filecmp.cmp(output_eval_path_first, output_eval_path_second)
run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('loss_function', ['QueryRMSE', 'RMSE'])
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_group_weight_and_object_weight(boosting_type, grow_policy, loss_function, dev_score_calc_obj_block_size):
def run_catboost(train_path, test_path, cd_path, eval_path):
cmd = (
'--loss-function', loss_function,
'-f', data_file('querywise', train_path),
'-t', data_file('querywise', test_path),
'--column-description', data_file('querywise', cd_path),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '10',
'-T', '4',
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd)
output_eval_path_first = yatest.common.test_output_path('test_first.eval')
output_eval_path_second = yatest.common.test_output_path('test_second.eval')
run_catboost('train', 'test', 'train.cd.group_weight', output_eval_path_first)
run_catboost('train', 'test', 'train.cd.weight', output_eval_path_second)
assert filecmp.cmp(output_eval_path_first, output_eval_path_second)
def test_snapshot_without_random_seed():
def run_catboost(iters, eval_path, additional_params=None):
cmd = [
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', str(iters),
'-T', '4',
'--use-best-model', 'False',
'--eval-file', eval_path,
]
if additional_params:
cmd += additional_params
tmpfile = 'test_data_dumps'
with open(tmpfile, 'w') as f:
execute_catboost_fit('CPU', cmd, stdout=f)
with open(tmpfile, 'r') as output:
line_count = sum(1 for line in output)
return line_count
model_path = yatest.common.test_output_path('model.bin')
eval_path = yatest.common.test_output_path('test.eval')
progress_path = yatest.common.test_output_path('test.cbp')
additional_params = ['--snapshot-file', progress_path, '-m', model_path]
first_line_count = run_catboost(15, eval_path, additional_params=additional_params)
second_line_count = run_catboost(30, eval_path, additional_params=additional_params)
third_line_count = run_catboost(45, eval_path, additional_params=additional_params)
assert first_line_count == second_line_count == third_line_count
canon_eval_path = yatest.common.test_output_path('canon_test.eval')
cb_model = catboost.CatBoost()
cb_model.load_model(model_path)
random_seed = cb_model.random_seed_
run_catboost(45, canon_eval_path, additional_params=['-r', str(random_seed)])
assert filecmp.cmp(canon_eval_path, eval_path)
def test_snapshot_with_interval():
def run_with_timeout(cmd, timeout):
try:
execute_catboost_fit('CPU', cmd, timeout=timeout)
except ExecutionTimeoutError:
return True
return False
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-T', '4',
]
measure_time_iters = 100
exec_time = timeit.timeit(lambda: execute_catboost_fit('CPU', cmd + ['-i', str(measure_time_iters)]), number=1)
SNAPSHOT_INTERVAL = 1
TIMEOUT = 5
TOTAL_TIME = 25
iters = int(TOTAL_TIME / (exec_time / measure_time_iters))
canon_eval_path = yatest.common.test_output_path('canon_test.eval')
canon_params = cmd + ['--eval-file', canon_eval_path, '-i', str(iters)]
execute_catboost_fit('CPU', canon_params)
eval_path = yatest.common.test_output_path('test.eval')
progress_path = yatest.common.test_output_path('test.cbp')
model_path = yatest.common.test_output_path('model.bin')
params = cmd + ['--snapshot-file', progress_path,
'--snapshot-interval', str(SNAPSHOT_INTERVAL),
'-m', model_path,
'--eval-file', eval_path,
'-i', str(iters)]
was_timeout = False
while run_with_timeout(params, TIMEOUT):
was_timeout = True
assert was_timeout
assert filecmp.cmp(canon_eval_path, eval_path)
def test_snapshot_with_different_params():
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-T', '4',
'-i', '10',
'--snapshot-file', 'snapshot.cbp'
]
cmd_1 = cmd + ['--eval-metric', 'Logloss']
cmd_2 = cmd + ['--eval-metric', 'Accuracy']
execute_catboost_fit('CPU', cmd_1)
try:
execute_catboost_fit('CPU', cmd_2)
except ExecutionError:
return
assert False
@pytest.mark.parametrize('boosting_type, grow_policy', BOOSTING_TYPE_WITH_GROW_POLICIES)
@pytest.mark.parametrize('leaf_estimation_method', LEAF_ESTIMATION_METHOD)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_querysoftmax(boosting_type, grow_policy, leaf_estimation_method, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'QuerySoftMax',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--boosting-type', boosting_type,
'--grow-policy', grow_policy,
'--leaf-estimation-method', leaf_estimation_method,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_shap_verbose():
output_model_path = yatest.common.test_output_path('model.bin')
output_values_path = yatest.common.test_output_path('shapval')
output_log = yatest.common.test_output_path('log')
cmd_fit = [
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '250',
'-T', '4',
'-m', output_model_path,
]
execute_catboost_fit('CPU', cmd_fit)
cmd_shap = [
CATBOOST_PATH,
'fstr',
'-o', output_values_path,
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--verbose', '12',
'--fstr-type', 'ShapValues',
'-T', '4',
'-m', output_model_path,
]
with open(output_log, 'w') as log:
yatest.common.execute(cmd_shap, stdout=log)
with open(output_log, 'r') as log:
line_count = sum(1 for line in log)
assert line_count == 5
def test_shap_approximate():
output_model_path = yatest.common.test_output_path('model.bin')
output_values_path = yatest.common.test_output_path('shapval')
cmd_fit = [
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '250',
'-T', '4',
'-m', output_model_path,
]
execute_catboost_fit('CPU', cmd_fit)
cmd_shap = [
CATBOOST_PATH,
'fstr',
'-o', output_values_path,
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--verbose', '0',
'--fstr-type', 'ShapValues',
'--shap-calc-type', 'Approximate',
'-T', '4',
'-m', output_model_path,
]
yatest.common.execute(cmd_shap)
return [local_canonical_file(output_values_path)]
def test_shap_exact():
output_model_path = yatest.common.test_output_path('model.bin')
output_values_path = yatest.common.test_output_path('shapval')
cmd_fit = [
CATBOOST_PATH,
'fit',
'--loss-function', 'Logloss',
'--learning-rate', '0.5',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '250',
'-T', '4',
'-m', output_model_path,
]
yatest.common.execute(cmd_fit)
cmd_shap = [
CATBOOST_PATH,
'fstr',
'-o', output_values_path,
'--input-path', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--verbose', '0',
'--fstr-type', 'ShapValues',
'--shap-calc-type', 'Exact',
'-T', '4',
'-m', output_model_path,
]
yatest.common.execute(cmd_shap)
return [local_canonical_file(output_values_path)]
@pytest.mark.parametrize('bagging_temperature', ['0', '1'])
@pytest.mark.parametrize('sampling_unit', SAMPLING_UNIT_TYPES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_querywise_bayesian_bootstrap(bagging_temperature, sampling_unit, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--bootstrap-type', 'Bayesian',
'--sampling-unit', sampling_unit,
'--bagging-temperature', bagging_temperature,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('subsample', ['0.5', '1'])
@pytest.mark.parametrize('sampling_unit', SAMPLING_UNIT_TYPES)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_querywise_bernoulli_bootstrap(subsample, sampling_unit, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--bootstrap-type', 'Bernoulli',
'--sampling-unit', sampling_unit,
'--subsample', subsample,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING = ['YetiRankPairwise', 'PairLogitPairwise']
@pytest.mark.parametrize('bagging_temperature', ['0', '1'])
@pytest.mark.parametrize('sampling_unit', SAMPLING_UNIT_TYPES)
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_pairwise_bayesian_bootstrap(bagging_temperature, sampling_unit, loss_function, dev_score_calc_obj_block_size):
if loss_function == 'YetiRankPairwise' and sampling_unit == 'Group' and bagging_temperature == '1':
return pytest.xfail(reason='MLTOOLS-1801')
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', loss_function,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--bootstrap-type', 'Bayesian',
'--sampling-unit', sampling_unit,
'--bagging-temperature', bagging_temperature,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('subsample', ['0.5', '1'])
@pytest.mark.parametrize('sampling_unit', SAMPLING_UNIT_TYPES)
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS_WITH_PAIRWISE_SCORRING)
@pytest.mark.parametrize(
'dev_score_calc_obj_block_size',
SCORE_CALC_OBJ_BLOCK_SIZES,
ids=SCORE_CALC_OBJ_BLOCK_SIZES_IDS
)
def test_pairwise_bernoulli_bootstrap(subsample, sampling_unit, loss_function, dev_score_calc_obj_block_size):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', loss_function,
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs'),
'--bootstrap-type', 'Bernoulli',
'--sampling-unit', sampling_unit,
'--subsample', subsample,
'--dev-score-calc-obj-block-size', dev_score_calc_obj_block_size,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd, env=dict(MKL_CBWR='SSE4_2'))
eps = 0 if yatest.common.context.sanitize is None else 0.1
return [local_canonical_file(output_eval_path, diff_tool=diff_tool(eps))]
@pytest.mark.parametrize('loss_function', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'QueryRMSE'])
@pytest.mark.parametrize('metric', ['Logloss', 'RMSE', 'MultiClass', 'QuerySoftMax', 'AUC', 'PFound'])
def test_bad_metrics_combination(loss_function, metric):
BAD_PAIRS = {
'Logloss': ['RMSE', 'MultiClass'],
'RMSE': ['Logloss', 'MultiClass'],
'MultiClass': ['Logloss', 'RMSE', 'QuerySoftMax', 'PFound'],
'QuerySoftMax': ['RMSE', 'MultiClass', 'QueryRMSE'],
'QueryRMSE': ['Logloss', 'MultiClass', 'QuerySoftMax'],
'YetiRank': ['Logloss', 'RMSE', 'MultiClass']
}
cd_path = yatest.common.test_output_path('cd.txt')
np.savetxt(cd_path, [[0, 'Target'], [1, 'QueryId']], fmt='%s', delimiter='\t')
data = np.array([[0, 1, 0, 1, 0], [0, 0, 1, 1, 2], [1, 2, 3, 4, 5]]).T
train_path = yatest.common.test_output_path('train.txt')
np.savetxt(train_path, data, fmt='%s', delimiter='\t')
test_path = yatest.common.test_output_path('test.txt')
np.savetxt(test_path, data, fmt='%s', delimiter='\t')
cmd = (
'--loss-function', loss_function,
'--custom-metric', metric,
'-f', train_path,
'-t', test_path,
'--column-description', cd_path,
'-i', '4',
'-T', '4',
)
try:
execute_catboost_fit('CPU', cmd)
except Exception:
assert metric in BAD_PAIRS[loss_function]
return
assert metric not in BAD_PAIRS[loss_function]
@pytest.mark.parametrize('metric', [('good', ',AUC,'), ('bad', ',')])
def test_extra_commas(metric):
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-w', '0.03',
'-i', '10',
'-T', '4',
'--custom-metric', metric[1]
)
if metric[0] == 'good':
execute_catboost_fit('CPU', cmd)
if metric[0] == 'bad':
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def execute_fit_for_test_quantized_pool(loss_function, pool_path, test_path, cd_path, eval_path,
border_count=128, other_options=()):
model_path = yatest.common.test_output_path('model.bin')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', pool_path,
'-t', test_path,
'--cd', cd_path,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-x', str(border_count),
'--feature-border-type', 'GreedyLogSum',
'-m', model_path,
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd + other_options)
def test_quantized_pool():
test_path = data_file('higgs', 'test_small')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='Logloss',
pool_path=data_file('higgs', 'train_small'),
test_path=test_path,
cd_path=data_file('higgs', 'train.cd'),
eval_path=tsv_eval_path
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='Logloss',
pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),
test_path=test_path,
cd_path=data_file('higgs', 'train.cd'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_ignored_features():
test_path = data_file('higgs', 'test_small')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='Logloss',
pool_path=data_file('higgs', 'train_small'),
test_path=test_path,
cd_path=data_file('higgs', 'train.cd'),
eval_path=tsv_eval_path,
other_options=('-I', '5',)
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='Logloss',
pool_path='quantized://' + data_file('higgs', 'train_small_x128_greedylogsum.bin'),
test_path=test_path,
cd_path=data_file('higgs', 'train.cd'),
eval_path=quantized_eval_path,
other_options=('-I', '5',)
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_groupid():
test_path = data_file('querywise', 'test')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path=data_file('querywise', 'train'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=tsv_eval_path
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_ignored_during_quantization():
test_path = data_file('querywise', 'test')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path=data_file('querywise', 'train'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=tsv_eval_path,
other_options=('-I', '18-36',)
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa_ignore_18_36.bin'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_quantized_test():
test_path = data_file('querywise', 'test')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path=data_file('querywise', 'train'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=tsv_eval_path
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path='quantized://' + data_file('querywise', 'train_x128_greedylogsum_aqtaa.bin'),
test_path='quantized://' + data_file('querywise', 'test_borders_from_train_aqtaa.bin'),
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_quantized_pool_with_large_grid():
test_path = data_file('querywise', 'test')
tsv_eval_path = yatest.common.test_output_path('tsv.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path=data_file('querywise', 'train'),
test_path=test_path,
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=tsv_eval_path,
border_count=1024
)
quantized_eval_path = yatest.common.test_output_path('quantized.eval')
execute_fit_for_test_quantized_pool(
loss_function='PairLogitPairwise',
pool_path='quantized://' + data_file('querywise', 'train.quantized_x1024'),
test_path='quantized://' + data_file('querywise', 'test.quantized_x1024'),
cd_path=data_file('querywise', 'train.cd.query_id'),
eval_path=quantized_eval_path
)
assert filecmp.cmp(tsv_eval_path, quantized_eval_path)
def test_learn_without_header_eval_with_header():
train_path = yatest.common.test_output_path('airlines_without_header')
with open(data_file('airlines_5K', 'train'), 'r') as with_header_file:
with open(train_path, 'w') as without_header_file:
without_header_file.writelines(with_header_file.readlines()[1:])
model_path = yatest.common.test_output_path('model.bin')
cmd_fit = (
'--loss-function', 'Logloss',
'-f', train_path,
'--cd', data_file('airlines_5K', 'cd'),
'-i', '10',
'-m', model_path
)
execute_catboost_fit('CPU', cmd_fit)
cmd_calc = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('airlines_5K', 'test'),
'--cd', data_file('airlines_5K', 'cd'),
'-m', model_path,
'--has-header'
)
yatest.common.execute(cmd_calc)
def test_group_weights_file():
first_eval_path = yatest.common.test_output_path('first.eval')
second_eval_path = yatest.common.test_output_path('second.eval')
def run_catboost(eval_path, cd_file, is_additional_query_weights):
cmd = [
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', cd_file),
'-i', '5',
'-T', '4',
'--eval-file', eval_path,
]
if is_additional_query_weights:
cmd += [
'--learn-group-weights', data_file('querywise', 'train.group_weights'),
'--test-group-weights', data_file('querywise', 'test.group_weights'),
]
execute_catboost_fit('CPU', cmd)
run_catboost(first_eval_path, 'train.cd', True)
run_catboost(second_eval_path, 'train.cd.group_weight', False)
assert filecmp.cmp(first_eval_path, second_eval_path)
return [local_canonical_file(first_eval_path)]
def test_group_weights_file_quantized():
first_eval_path = yatest.common.test_output_path('first.eval')
second_eval_path = yatest.common.test_output_path('second.eval')
def run_catboost(eval_path, train, test, is_additional_query_weights):
cmd = [
'--use-best-model', 'false',
'--loss-function', 'QueryRMSE',
'-f', 'quantized://' + data_file('querywise', train),
'-t', 'quantized://' + data_file('querywise', test),
'-i', '5',
'-T', '4',
'--eval-file', eval_path,
]
if is_additional_query_weights:
cmd += [
'--learn-group-weights', data_file('querywise', 'train.group_weights'),
'--test-group-weights', data_file('querywise', 'test.group_weights'),
]
execute_catboost_fit('CPU', cmd)
run_catboost(first_eval_path, 'train.quantized', 'test.quantized', True)
run_catboost(second_eval_path, 'train.quantized.group_weight', 'test.quantized.group_weight', False)
assert filecmp.cmp(first_eval_path, second_eval_path)
return [local_canonical_file(first_eval_path)]
def test_mode_roc():
eval_path = yatest.common.test_output_path('eval.tsv')
output_roc_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'--counter-calc-method', 'SkipTest',
'--eval-file', eval_path,
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
roc_cmd = (
CATBOOST_PATH,
'roc',
'--eval-file', eval_path,
'--output-path', output_roc_path
)
yatest.common.execute(roc_cmd)
return local_canonical_file(output_roc_path)
@pytest.mark.parametrize('pool', ['adult', 'higgs', 'adult_nan'])
def test_convert_model_to_json(pool):
output_model_path = yatest.common.test_output_path('model')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--use-best-model', 'false',
'-f', data_file(pool, 'train_small'),
'-t', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-i', '20',
'-T', '4',
'--eval-file', output_eval_path,
'-m', output_model_path,
'--nan-mode', 'Max' if pool == 'adult_nan' else 'Forbidden',
'--model-format', 'CatboostBinary,Json'
)
execute_catboost_fit('CPU', cmd)
formula_predict_path_bin = yatest.common.test_output_path('predict_test_bin.eval')
formula_predict_path_json = yatest.common.test_output_path('predict_test_json.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-m', output_model_path + '.json',
'--model-format', 'Json',
'--output-path', formula_predict_path_json
)
yatest.common.execute(calc_cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file(pool, 'test_small'),
'--column-description', data_file(pool, 'train.cd'),
'-m', output_model_path + '.bin',
'--output-path', formula_predict_path_bin
)
yatest.common.execute(calc_cmd)
assert (compare_evals_with_precision(output_eval_path, formula_predict_path_bin))
assert (compare_evals_with_precision(output_eval_path, formula_predict_path_json))
LOSS_FUNCTIONS_NO_MAPE = ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson']
@pytest.mark.parametrize('loss_function', LOSS_FUNCTIONS_NO_MAPE)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantized_adult_pool(loss_function, boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
quantized_train_file = 'quantized://' + data_file('quantized_adult', 'train.qbin')
quantized_test_file = 'quantized://' + data_file('quantized_adult', 'test.qbin')
cmd = (
'--use-best-model', 'false',
'--loss-function', loss_function,
'-f', quantized_train_file,
'-t', quantized_test_file,
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', cmd)
cd_file = data_file('quantized_adult', 'pool.cd')
test_file = data_file('quantized_adult', 'test_small.tsv')
apply_catboost(output_model_path, test_file, cd_file, output_eval_path)
return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_quantized_with_one_thread(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
quantized_train_file = 'quantized://' + data_file('querywise', 'train.quantized')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'-f', quantized_train_file,
'--boosting-type', boosting_type,
'-i', '10',
'-w', '0.03',
'-T', '1',
'-m', output_model_path,
'--target-border', '0.5',
)
print(cmd)
execute_catboost_fit('CPU', cmd)
def test_eval_result_on_different_pool_type():
output_eval_path = yatest.common.test_output_path('test.eval')
output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')
def run_catboost(train, test, eval_path):
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--border-count', '128',
'-f', train,
'-t', test,
'--cd', data_file('querywise', 'train.cd'),
'-i', '10',
'-T', '4',
'--target-border', '0.5',
'--eval-file', eval_path,
)
execute_catboost_fit('CPU', cmd)
def get_pool_path(set_name, is_quantized=False):
path = data_file('querywise', set_name)
return 'quantized://' + path + '.quantized' if is_quantized else path
run_catboost(get_pool_path('train'), get_pool_path('test'), output_eval_path)
run_catboost(get_pool_path('train', True), get_pool_path('test', True), output_quantized_eval_path)
assert filecmp.cmp(output_eval_path, output_quantized_eval_path)
return [local_canonical_file(output_eval_path)]
def test_apply_on_different_pool_type():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')
def get_pool_path(set_name, is_quantized=False):
path = data_file('querywise', set_name)
return 'quantized://' + path + '.quantized' if is_quantized else path
cd_file = data_file('querywise', 'train.cd')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'Logloss',
'--learn-set', get_pool_path('train', True),
'--test-set', get_pool_path('test', True),
'--column-description', cd_file,
'-i', '10',
'-T', '4',
'--target-border', '0.5',
'--model-file', output_model_path,
)
execute_catboost_fit('CPU', cmd)
cmd = (
CATBOOST_PATH, 'calc',
'--input-path', get_pool_path('test'),
'--column-description', cd_file,
'--model-file', output_model_path,
'--output-path', output_eval_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(cmd)
cmd = (
CATBOOST_PATH, 'calc',
'--input-path', get_pool_path('test', True),
'--model-file', output_model_path,
'--output-path', output_quantized_eval_path,
'--prediction-type', 'RawFormulaVal'
)
yatest.common.execute(cmd)
assert filecmp.cmp(output_eval_path, output_quantized_eval_path)
def test_apply_output_column_by_idx():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
learn = data_file('black_friday', 'train')
test = data_file('black_friday', 'test')
cd = data_file('black_friday', 'cd')
cmd = (
'--use-best-model', 'false',
'--loss-function', 'RMSE',
'--learn-set', learn,
'--test-set', test,
'--column-description', cd,
'-i', '10',
'-T', '4',
'--model-file', output_model_path,
'--has-header'
)
execute_catboost_fit('CPU', cmd)
column_names = [
'Gender',
'Age',
'Occupation',
'City_Category',
'Stay_In_Current_City_Years',
'Marital_Status',
'Product_Category_1',
'Product_Category_2',
'Product_Category_3',
]
output_columns = ['#{}:{}'.format(idx, name) for idx, name in enumerate(column_names)]
output_columns = ['RawFormulaVal'] + ['GroupId', 'SampleId'] + output_columns + ['Label']
output_columns = ','.join(output_columns)
cmd = (
CATBOOST_PATH, 'calc',
'--input-path', test,
'--column-description', cd,
'--model-file', output_model_path,
'--output-path', output_eval_path,
'--output-columns', output_columns,
'--has-header'
)
yatest.common.execute(cmd)
with open(output_eval_path, 'r') as f:
f.readline()
eval_lines = f.readlines()
with open(test, 'r') as f:
f.readline()
test_lines = f.readlines()
assert len(eval_lines) == len(test_lines)
for i in range(len(eval_lines)):
eval_line = eval_lines[i].split('\t')[1:]
test_line = test_lines[i].split('\t')
for eval_column, test_column in zip(eval_line, test_line):
assert eval_column == test_column
@pytest.mark.parametrize(
'dataset_name,loss_function,has_pairs,has_group_weights',
[
('adult_small_broken_features', 'Logloss', False, False),
('querywise_broken_pairs', 'RMSE', True, False),
('querywise_broken_group_weights', 'RMSE', False, True),
]
)
def test_broken_dsv_format(dataset_name, loss_function, has_pairs, has_group_weights):
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', loss_function,
'--learn-set', data_file('broken_format', dataset_name, 'train'),
'--test-set', data_file('broken_format', dataset_name, 'test'),
'--column-description', data_file('broken_format', dataset_name, 'train.cd'),
'-i', '1',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
)
if has_pairs:
cmd += (
'--learn-pairs', data_file('broken_format', dataset_name, 'train.pairs'),
'--test-pairs', data_file('broken_format', dataset_name, 'test.pairs'),
)
if has_group_weights:
cmd += (
'--learn-group-weights', data_file('broken_format', dataset_name, 'train.group_weights'),
'--test-group-weights', data_file('broken_format', dataset_name, 'test.group_weights'),
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.use_fixtures('compressed_data')
@pytest.mark.parametrize(
'loss_function,eval_metric,boosting_type',
[
('QueryRMSE', 'NDCG', 'Plain'),
('QueryRMSE', 'NDCG', 'Ordered'),
('YetiRankPairwise', 'NDCG', 'Plain'),
('PairLogit:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),
('PairLogitPairwise:max_pairs=30', 'NDCG', 'Plain'),
('PairLogitPairwise:max_pairs=30', 'PairLogit:max_pairs=30', 'Plain'),
],
ids=[
'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Plain',
'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Ordered',
'loss_function=YetiRankPairwise,eval_metric=NDCG,boosting_type=Plain',
'loss_function=PairLogit:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain',
'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=NDCG,boosting_type=Plain',
'loss_function=PairLogitPairwise:max_pairs=30,eval_metric=PairLogit:max_pairs=30,boosting_type=Plain'
]
)
def test_groupwise_with_cat_features(compressed_data, loss_function, eval_metric, boosting_type):
test_error_path = yatest.common.test_output_path('test_error.tsv')
cmd = (
'--loss-function', loss_function,
'-f', os.path.join(compressed_data.name, 'mslr_web1k', 'train'),
'-t', os.path.join(compressed_data.name, 'mslr_web1k', 'test'),
'--column-description', os.path.join(compressed_data.name, 'mslr_web1k', 'cd.with_cat_features'),
'--boosting-type', boosting_type,
'-i', '100',
'-T', '8',
'--eval-metric', eval_metric,
'--metric-period', '100',
'--use-best-model', 'false',
'--test-err-log', test_error_path,
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(test_error_path, diff_tool=diff_tool(1e-5))]
def test_gradient_walker():
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '20',
'-T', '4',
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--boosting-type', 'Ordered',
'--max-ctr-complexity', '4',
'--leaf-estimation-iterations', '10',
'--leaf-estimation-backtracking', 'AnyImprovement',
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize(
'loss_function', ['YetiRankPairwise', 'PairLogitPairwise'],
ids=['loss_function=YetiRankPairwise', 'loss_function=PairLogitPairwise']
)
def test_groupwise_with_bad_one_hot_max_size(loss_function):
cmd = (
'--loss-function', loss_function,
'--has-header',
'-f', data_file('black_friday', 'train'),
'-t', data_file('black_friday', 'test'),
'--column-description', data_file('black_friday', 'cd'),
'--boosting-type', 'Plain',
'-i', '10',
'-T', '4',
'--eval-metric', 'NDCG',
'--one_hot_max_size', '10'
)
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
def test_load_quantized_pool_with_double_baseline():
cmd = (
'-f', 'quantized://' + data_file('quantized_with_baseline', 'dataset.qbin'),
'-i', '10')
execute_catboost_fit('CPU', cmd)
def test_write_predictions_to_streams():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
calc_output_eval_path_redirected = yatest.common.test_output_path('calc_test.eval')
cmd = (
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--eval-file', output_eval_path,
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-m', output_model_path
)
execute_catboost_fit('CPU', cmd)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'--output-path', 'stream://stdout',
)
with open(calc_output_eval_path_redirected, 'w') as catboost_stdout:
yatest.common.execute(calc_cmd, stdout=catboost_stdout)
assert compare_evals(output_eval_path, calc_output_eval_path_redirected)
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-m', output_model_path,
'--output-path', 'stream://stderr'
)
with open(calc_output_eval_path_redirected, 'w') as catboost_stderr:
yatest.common.execute(calc_cmd, stderr=catboost_stderr)
assert compare_evals(output_eval_path, calc_output_eval_path_redirected)
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_mvs_bootstrap(boosting_type):
def run_catboost(eval_path, mvs_sample_rate):
cmd = [
'--use-best-model', 'false',
'--allow-writing-files', 'false',
'--loss-function', 'Logloss',
'--max-ctr-complexity', '5',
'-f', data_file('airlines_5K', 'train'),
'-t', data_file('airlines_5K', 'test'),
'--column-description', data_file('airlines_5K', 'cd'),
'--has-header',
'--boosting-type', boosting_type,
'--bootstrap-type', 'MVS',
'--subsample', mvs_sample_rate,
'-i', '50',
'-w', '0.03',
'-T', '6',
'-r', '0',
'--leaf-estimation-iterations', '10',
'--eval-file', eval_path,
]
execute_catboost_fit('CPU', cmd)
ref_eval_path = yatest.common.test_output_path('test.eval')
run_catboost(ref_eval_path, '0.5')
for sample_rate in ('0.1', '0.9'):
eval_path = yatest.common.test_output_path('test_{}.eval'.format(sample_rate))
run_catboost(eval_path, sample_rate)
assert (filecmp.cmp(ref_eval_path, eval_path) is False)
return [local_canonical_file(ref_eval_path)]
def test_simple_ctr():
output_model_path = yatest.common.test_output_path('model.bin')
output_eval_path = yatest.common.test_output_path('test.eval')
simple_ctr = ','.join((
'Borders:TargetBorderCount=15',
'Buckets:TargetBorderCount=15',
'Borders:TargetBorderType=MinEntropy',
'Counter:CtrBorderCount=20',
))
execute_catboost_fit('CPU', (
'--loss-function', 'RMSE',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'--boosting-type', 'Ordered',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--eval-file', output_eval_path,
'--simple-ctr', simple_ctr,
))
return [local_canonical_file(output_eval_path)]
def test_output_options():
output_options_path = 'training_options.json'
train_dir = 'catboost_info'
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '10',
'-T', '4',
'--train-dir', train_dir,
'--training-options-file', output_options_path,
)
execute_catboost_fit('CPU', cmd)
return local_canonical_file(os.path.join(train_dir, output_options_path))
def test_target_border():
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('querywise', 'train'),
'-t', data_file('querywise', 'test'),
'--column-description', data_file('querywise', 'train.cd'),
'-i', '20',
'-T', '4',
'--eval-file', output_eval_path,
'--use-best-model', 'false',
'--target-border', '0.3'
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
def test_monotonic_constraint():
train_pool = catboost.Pool(
data_file('higgs', 'train_small'),
column_description=data_file('higgs', 'train.cd')
)
test_pool = catboost.Pool(
data_file('higgs', 'test_small'),
column_description=data_file('higgs', 'train.cd')
)
monotone_constraints = [0, 0, 1, -1, 0, 0, 1, 0, -1, 1, 1, -1, 0, 1, 0, 0, -1, 1, 1, -1, 0, 0, 0, 0, 0, -1, 0, -1]
model = catboost.CatBoostRegressor(
n_estimators=100,
learning_rate=0.2,
monotone_constraints=monotone_constraints,
verbose=False
).fit(train_pool, eval_set=test_pool)
dummy_data = np.zeros((1, test_pool.num_col()))
dummy_target = np.zeros(len(dummy_data))
feature_stats = model.calc_feature_statistics(dummy_data, dummy_target, plot=False)
for feature_index, feature_name in enumerate(model.feature_names_):
monotonicity = monotone_constraints[feature_index]
if monotonicity == 0:
continue
feature_borders = feature_stats[feature_name]['borders']
if len(feature_borders) == 0:
continue
mid_values = (feature_borders[:-1] + feature_borders[1:]) / 2
min_value = feature_borders[0] - 1
max_value = feature_borders[-1] + 1
feature_values = np.array([min_value] + list(mid_values) + [max_value])
for obj in test_pool.get_features():
obj_variations = np.zeros((len(feature_values), test_pool.num_col()))
obj_variations[:] = obj.reshape((1, -1))
obj_variations[:, feature_index] = feature_values
model_predicts = model.predict(obj_variations)
prediction_deltas = model_predicts[1:] - model_predicts[:-1]
assert np.all(prediction_deltas * monotonicity >= 0)
def test_different_formats_of_monotone_constraints():
eval_path = yatest.common.test_output_path('eval.tsv')
eval_path_with_monotone1 = yatest.common.test_output_path('eval_monotone1.tsv')
eval_path_with_monotone2 = yatest.common.test_output_path('eval_monotone2.tsv')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train_with_id.cd'),
'-i', '20'
]
execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path])
execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone1, '--monotone-constraints', '(0,0,0,1,0,-1)'])
assert not filecmp.cmp(eval_path_with_monotone1, eval_path)
for constraints in ['3:1,5:-1', 'F0:1,F1:-1']:
execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--monotone-constraints', constraints])
assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)
params_file = yatest.common.test_output_path("params.json")
for constraints in ['3:1,5:-1', 'F0:1,F1:-1', [0, 0, 0, 1, 0, -1], {3: 1, 5: -1}, {'F0': 1, 'F1': -1}]:
json.dump({'monotone_constraints': constraints}, open(params_file, 'w'))
execute_catboost_fit('CPU', cmd + ['--eval-file', eval_path_with_monotone2, '--params-file', params_file])
assert filecmp.cmp(eval_path_with_monotone1, eval_path_with_monotone2)
class TestModelWithoutParams(object):
@pytest.fixture(
params=[
('cut-info', 'RMSE'),
('cut-params', 'RMSE'),
('cut-info', 'QueryRMSE'),
('cut-params', 'QueryRMSE'),
],
ids=lambda param: '-'.join(param),
)
def model_etc(self, request):
cut, loss = request.param
model_json = yatest.common.test_output_path('model.json')
learn_set = data_file('querywise', 'train')
test_set = data_file('querywise', 'test')
cd = data_file('querywise', 'train.cd')
cmd = (
'--loss-function', loss,
'--learn-set', learn_set,
'--test-set', test_set,
'--column-description', cd,
'--iterations', '10',
'--model-file', model_json,
'--model-format', 'Json',
'--use-best-model', 'false'
)
execute_catboost_fit('CPU', cmd)
model = json.load(open(model_json))
if cut == 'cut-info':
model.pop('model_info')
if cut == 'cut-params':
model['model_info'].pop('params')
json.dump(model, open(model_json, 'wt'))
return model_json, learn_set, test_set, cd
def test_ostr(self, model_etc):
model_json, train_set, test_set, cd = model_etc
ostr_result = yatest.common.test_output_path('result.txt')
ostr_cmd = (
CATBOOST_PATH, 'ostr',
'--learn-set', train_set,
'--test-set', test_set,
'--column-description', cd,
'--model-file', model_json,
'--model-format', 'Json',
'--output-path', ostr_result,
)
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(ostr_cmd)
@pytest.mark.parametrize('should_fail,fstr_type', [
(False, 'FeatureImportance'),
(False, 'PredictionValuesChange'),
(True, 'LossFunctionChange'),
(False, 'ShapValues'),
])
def test_fstr(self, model_etc, fstr_type, should_fail):
model_json, train_set, _, cd = model_etc
fstr_result = yatest.common.test_output_path('result.txt')
fstr_cmd = (
CATBOOST_PATH, 'fstr',
'--input-path', train_set,
'--column-description', cd,
'--model-file', model_json,
'--model-format', 'Json',
'--output-path', fstr_result,
'--fstr-type', fstr_type,
)
if should_fail:
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(fstr_cmd)
else:
yatest.common.execute(fstr_cmd)
def test_equal_feature_names():
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', (
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'--column-description', data_file('querywise', 'train.cd.equal_names'),
))
def enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=False):
if eval_mode == 'OneVsOthers':
baseline = 'Baseline_set_{set_idx}_fold_{fold_idx}'
else:
baseline = 'Baseline_fold_{fold_idx}'
if not only_baseline:
testing = 'Testing_set_{set_idx}_fold_{fold_idx}'
dirs = []
for set_idx in range(set_count):
for fold_idx in range(offset, offset + fold_count):
fold = baseline.format(fold_idx=fold_idx, set_idx=set_idx)
if fold not in dirs:
dirs += [fold]
if not only_baseline:
fold = testing.format(fold_idx=fold_idx, set_idx=set_idx)
dirs += [fold]
return dirs
@pytest.mark.parametrize('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])
@pytest.mark.parametrize('features_to_eval', ['0-6', '0-6;7-13'], ids=['one_set', 'two_sets'])
@pytest.mark.parametrize('offset', [0, 2])
def test_eval_feature(eval_mode, features_to_eval, offset):
output_eval_path = yatest.common.test_output_path('feature.eval')
test_err_log = 'test_error.log'
fstr_file = 'fstrs'
train_dir = yatest.common.test_output_path('')
fold_count = 2
cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('higgs', 'train_small'),
'--cd', data_file('higgs', 'train.cd'),
'--features-to-evaluate', features_to_eval,
'--feature-eval-mode', eval_mode,
'-i', '30',
'-T', '4',
'-w', '0.7',
'--feature-eval-output-file', output_eval_path,
'--offset', str(offset),
'--fold-count', str(fold_count),
'--fold-size-unit', 'Object',
'--fold-size', '20',
'--test-err-log', test_err_log,
'--train-dir', train_dir,
'--fstr-file', fstr_file,
)
yatest.common.execute(cmd)
pj = os.path.join
set_count = len(features_to_eval.split(';'))
artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):
artifacts += [
local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),
local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),
]
return artifacts
@pytest.mark.parametrize('offset', [0, 2])
def test_eval_feature_empty_feature_set(offset):
output_eval_path = yatest.common.test_output_path('feature.eval')
test_err_log = 'test_error.log'
fstr_file = 'fstrs'
train_dir = yatest.common.test_output_path('')
fold_count = 2
eval_mode = 'OneVsNone'
cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('higgs', 'train_small'),
'--cd', data_file('higgs', 'train.cd'),
'--feature-eval-mode', eval_mode,
'-i', '30',
'-T', '4',
'-w', '0.7',
'--feature-eval-output-file', output_eval_path,
'--offset', str(offset),
'--fold-count', str(fold_count),
'--fold-size-unit', 'Object',
'--fold-size', '20',
'--test-err-log', test_err_log,
'--train-dir', train_dir,
'--fstr-file', fstr_file,
)
yatest.common.execute(cmd)
pj = os.path.join
set_count = 1
artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count, only_baseline=True):
artifacts += [
local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),
local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),
]
return artifacts
@pytest.mark.parametrize('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])
@pytest.mark.parametrize('fold_size_unit', ['Object', 'Group'])
def test_eval_feature_timesplit(eval_mode, fold_size_unit):
output_eval_path = yatest.common.test_output_path('feature.eval')
test_err_log = 'test_error.log'
fstr_file = 'fstrs'
train_dir = yatest.common.test_output_path('')
fold_count = 2
features_to_eval = '2-5;10-15'
offset = 2
fold_size = 500
cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'--features-to-evaluate', features_to_eval,
'--feature-eval-mode', eval_mode,
'-i', '30',
'-T', '4',
'-w', '0.7',
'--feature-eval-output-file', output_eval_path,
'--offset', str(offset),
'--fold-count', str(fold_count),
'--fold-size-unit', fold_size_unit,
'--fold-size', str(fold_size),
'--test-err-log', test_err_log,
'--train-dir', train_dir,
'--fstr-file', fstr_file,
'--learn-timestamps', data_file('querywise', 'train.timestamps'),
'--timesplit-quantile', '0.75'
)
yatest.common.execute(cmd)
pj = os.path.join
set_count = len(features_to_eval.split(';'))
artifacts = [local_canonical_file(output_eval_path, diff_tool=diff_tool())]
for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):
artifacts += [
local_canonical_file(pj(train_dir, output_dir, test_err_log), diff_tool=diff_tool()),
local_canonical_file(pj(train_dir, output_dir, fstr_file), diff_tool=diff_tool()),
]
return artifacts
@pytest.mark.parametrize('eval_mode', ['OneVsNone', 'OneVsAll', 'OneVsOthers', 'OthersVsAll'])
@pytest.mark.parametrize('features_to_eval', ['2-5', '2-5;10-15'], ids=['one_set', 'two_sets'])
@pytest.mark.parametrize('offset', [0, 2])
@pytest.mark.parametrize('fstr_mode', ['fstr', 'model'])
def test_eval_feature_snapshot(eval_mode, features_to_eval, offset, fstr_mode):
test_err_log = 'test_error.log'
fstr_file = 'fstrs'
model_file = 'model.bin'
fold_count = 2
snapshot_interval = 1
def make_cmd(summary, train_dir):
cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '200',
'-T', '4',
'-w', '0.1',
'--boost-from-average', 'False',
'--permutations', '1',
'--snapshot-interval', str(snapshot_interval),
'--features-to-evaluate', features_to_eval,
'--feature-eval-mode', eval_mode,
'--feature-eval-output-file', summary,
'--offset', str(offset),
'--fold-count', str(fold_count),
'--fold-size-unit', 'Group',
'--fold-size', '40',
'--test-err-log', test_err_log,
'--train-dir', train_dir,
)
if fstr_mode == 'fstr':
cmd += ('--fstr-file', fstr_file,)
else:
cmd += (
'--model-file', model_file,
'--use-best-model', 'False',
)
return cmd
reference_summary = yatest.common.test_output_path('reference_feature.eval')
reference_dir = yatest.common.test_output_path('reference')
yatest.common.execute(make_cmd(summary=reference_summary, train_dir=reference_dir))
snapshot_summary = yatest.common.test_output_path('snapshot_feature.eval')
snapshot_dir = yatest.common.test_output_path('snapshot')
snapshot = yatest.common.test_output_path('eval_feature.snapshot')
eval_with_snapshot_cmd = make_cmd(summary=snapshot_summary, train_dir=snapshot_dir) + ('--snapshot-file', snapshot,)
def stop_after_timeout(cmd, timeout):
try:
yatest.common.execute(cmd, timeout=timeout)
except ExecutionTimeoutError:
pass
resume_from_snapshot_count = 15
for idx in range(resume_from_snapshot_count):
timeout = 0.5 if idx % 2 == 0 else snapshot_interval + 0.1
stop_after_timeout(cmd=eval_with_snapshot_cmd, timeout=timeout)
yatest.common.execute(['rm', '-rf', snapshot_dir])
yatest.common.execute(eval_with_snapshot_cmd)
assert filecmp.cmp(reference_summary, snapshot_summary)
pj = os.path.join
set_count = len(features_to_eval.split(';'))
for output_dir in enumerate_eval_feature_output_dirs(eval_mode, set_count, offset, fold_count):
assert filecmp.cmp(pj(reference_dir, output_dir, test_err_log), pj(snapshot_dir, output_dir, test_err_log))
if fstr_mode == 'fstr':
assert filecmp.cmp(pj(reference_dir, output_dir, fstr_file), pj(snapshot_dir, output_dir, fstr_file))
else:
def load_json_model(model_path):
model = catboost.CatBoost()
model.load_model(model_path)
model.save_model(model_path + '.json', format='json')
with open(model_path + '.json') as json_model_file:
json_model = json.load(json_model_file)
json_model["model_info"]["output_options"] = ""
json_model["model_info"]["train_finish_time"] = ""
json_model["model_info"]["model_guid"] = ""
json_model["model_info"]["params"]["flat_params"]["snapshot_file"] = ""
json_model["model_info"]["params"]["flat_params"]["save_snapshot"] = ""
json_model["model_info"]["params"]["flat_params"]["train_dir"] = ""
return json_model
assert load_json_model(pj(reference_dir, output_dir, model_file)) == load_json_model(pj(snapshot_dir, output_dir, model_file))
def test_eval_feature_snapshot_wrong_options():
summary = yatest.common.test_output_path('eval_feature_summary')
snapshot = yatest.common.test_output_path('eval_feature_snapshot')
def make_cmd(fold_size):
return (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'RMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '600',
'-T', '4',
'-w', '0.1',
'--permutations', '1',
'--snapshot-interval', '1',
'--features-to-evaluate', '2-5',
'--feature-eval-mode', 'OneVsAll',
'--feature-eval-output-file', summary,
'--offset', '0',
'--fold-count', '5',
'--fold-size-unit', 'Group',
'--fold-size', str(fold_size),
'--snapshot-file', snapshot
)
def stop_after_timeout(cmd, timeout):
try:
yatest.common.execute(cmd, timeout=timeout)
except ExecutionTimeoutError:
pass
stop_after_timeout(cmd=make_cmd(fold_size=40), timeout=3)
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(make_cmd(fold_size=20))
def test_eval_feature_parse_timestamps():
summary = yatest.common.test_output_path('eval_feature_summary')
def make_cmd(timestamps_file):
return (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '600',
'-T', '4',
'-w', '0.1',
'--permutations', '1',
'--snapshot-interval', '1',
'--features-to-evaluate', '2-5',
'--feature-eval-mode', 'OneVsAll',
'--feature-eval-output-file', summary,
'--offset', '0',
'--fold-count', '5',
'--fold-size-unit', 'Group',
'--fold-size', '40',
'--learn-timestamps', data_file('querywise', timestamps_file),
'--timesplit-quantile', '0.75'
)
yatest.common.execute(make_cmd('train.timestamps'))
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(make_cmd('train.group_weights'))
def test_eval_feature_relative_fold_size():
summary = yatest.common.test_output_path('eval_feature_summary')
def make_cmd():
return (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'QueryRMSE',
'-f', data_file('querywise', 'train'),
'--cd', data_file('querywise', 'train.cd'),
'-i', '100',
'-T', '4',
'-w', '0.1',
'--permutations', '1',
'--snapshot-interval', '1',
'--features-to-evaluate', '2-5',
'--feature-eval-mode', 'OneVsAll',
'--feature-eval-output-file', summary,
'--offset', '0',
'--fold-count', '5',
'--fold-size-unit', 'Group',
'--relative-fold-size', '0.1',
)
yatest.common.execute(make_cmd())
with pytest.raises(yatest.common.ExecutionError):
yatest.common.execute(make_cmd() + ('--fold-size', '40',))
TEST_METRIC_DESCRIPTION_METRICS_LIST = ['Logloss', 'Precision', 'AUC']
@pytest.mark.parametrize('dataset_has_weights', [True, False], ids=['dataset_has_weights=True', 'dataset_has_weights=False'])
@pytest.mark.parametrize('eval_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,
ids=['eval_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])
@pytest.mark.parametrize('eval_metric_use_weights', [True, False, None],
ids=['eval_weights=' + str(mode) for mode in [True, False, None]])
@pytest.mark.parametrize('custom_metric_loss', TEST_METRIC_DESCRIPTION_METRICS_LIST,
ids=['custom_loss=' + mode for mode in TEST_METRIC_DESCRIPTION_METRICS_LIST])
@pytest.mark.parametrize('custom_metric_use_weights', [True, False, None],
ids=['custom_weights=' + str(mode) for mode in [True, False, None]])
def test_metric_description(dataset_has_weights, eval_metric_loss, eval_metric_use_weights, custom_metric_loss, custom_metric_use_weights):
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_error_path = yatest.common.test_output_path('test_error.tsv')
if dataset_has_weights:
train_pool_filename = data_file('adult_weight', 'train_weight')
test_pool_filename = data_file('adult_weight', 'test_weight')
pool_cd_filename = data_file('adult_weight', 'train.cd')
else:
train_pool_filename = data_file('adult', 'train_small')
test_pool_filename = data_file('adult', 'test_small')
pool_cd_filename = data_file('adult', 'train.cd')
eval_metric = eval_metric_loss
if eval_metric == 'AUC':
eval_metric += ':hints=skip_train~false'
if eval_metric_use_weights is not None:
eval_metric += ';' if eval_metric_loss == 'AUC' else ':'
eval_metric += 'use_weights=' + str(eval_metric_use_weights)
custom_metric = custom_metric_loss
if custom_metric == 'AUC':
custom_metric += ':hints=skip_train~false'
if custom_metric_use_weights is not None:
custom_metric += ';' if custom_metric_loss == 'AUC' else ':'
custom_metric += 'use_weights=' + str(custom_metric_use_weights)
cmd = (
'--loss-function', 'Logloss',
'-f', train_pool_filename,
'-t', test_pool_filename,
'--cd', pool_cd_filename,
'-i', '10',
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-metric', eval_metric,
'--custom-metric', custom_metric,
)
should_fail = not dataset_has_weights and (eval_metric_use_weights is not None or custom_metric_use_weights is not None)
try:
execute_catboost_fit('CPU', cmd)
except ExecutionError:
assert should_fail
return
for filename in [learn_error_path, test_error_path]:
with open(filename, 'r') as f:
metrics_descriptions = f.readline().split('\t')[1:]
metrics_descriptions[-1] = metrics_descriptions[-1][:-1]
unique_metrics_descriptions = set([s.lower() for s in metrics_descriptions])
assert len(metrics_descriptions) == len(unique_metrics_descriptions)
expected_objective_metric_description = 'Logloss'
if dataset_has_weights:
expected_eval_metric_description = \
eval_metric_loss if eval_metric_use_weights is None else eval_metric_loss + ':use_weights=' + str(eval_metric_use_weights)
if custom_metric_loss == 'AUC':
expected_custom_metrics_descriptions = \
['AUC' if custom_metric_use_weights is None else 'AUC:use_weights=' + str(custom_metric_use_weights)]
else:
expected_custom_metrics_descriptions = (
[custom_metric_loss + ':use_weights=False', custom_metric_loss + ':use_weights=True']
if custom_metric_use_weights is None
else [custom_metric_loss + ':use_weights=' + str(custom_metric_use_weights)])
else:
expected_eval_metric_description = eval_metric_loss
expected_custom_metrics_descriptions = [custom_metric_loss]
assert unique_metrics_descriptions == set(s.lower() for s in [expected_objective_metric_description] + [expected_eval_metric_description] + expected_custom_metrics_descriptions)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_leafwise_scoring():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'--learn-err-log', learn_error_path
]
execute_catboost_fit('CPU', cmd)
learn_errors_log = open(learn_error_path).read()
execute_catboost_fit('CPU', cmd + ['--dev-leafwise-scoring'])
new_learn_errors_log = open(learn_error_path).read()
assert new_learn_errors_log == learn_errors_log
def test_group_features():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
test_predictions_path = yatest.common.test_output_path('test_predictions.tsv')
model_path = yatest.common.test_output_path('model.bin')
fit_cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'-m', model_path,
'--learn-err-log', learn_error_path
]
execute_catboost_fit('CPU', fit_cmd)
calc_cmd = [
CATBOOST_PATH,
'calc',
'-m', model_path,
'--input-path', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'--output-path', test_predictions_path,
'--output-columns', 'Probability'
]
yatest.common.execute(calc_cmd)
return [local_canonical_file(learn_error_path), local_canonical_file(test_predictions_path)]
def test_model_sum():
model_path = yatest.common.test_output_path('model.bin')
model_eval = yatest.common.test_output_path('model_eval.txt')
execute_catboost_fit('CPU', [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '10',
'-m', model_path,
'-t', data_file('adult', 'test_small'),
'--eval-file', model_eval,
'--output-columns', 'SampleId,RawFormulaVal',
])
sum_path = yatest.common.test_output_path('sum.bin')
yatest.common.execute([
CATBOOST_PATH,
'model-sum',
'--model-with-weight', '{}={}'.format(model_path, 0.75),
'--model-with-weight', '{}={}'.format(model_path, 0.25),
'--output-path', sum_path,
])
sum_eval = yatest.common.test_output_path('sum_eval.txt')
yatest.common.execute([
CATBOOST_PATH,
'calc',
'-m', sum_path,
'--input-path', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'--output-path', sum_eval,
])
yatest.common.execute(get_limited_precision_dsv_diff_tool(0) + [model_eval, sum_eval])
def test_external_feature_names():
fstr_cd_with_id_path = yatest.common.test_output_path('fstr_cd_with_id.tsv')
fstr_cd_without_id_path = yatest.common.test_output_path('fstr_cd_without_id.tsv')
for cd_has_feature_names in [False, True]:
if cd_has_feature_names:
cd_file = data_file('adult', 'train_with_id.cd')
fstr_path = fstr_cd_with_id_path
else:
cd_file = data_file('adult', 'train.cd')
fstr_path = fstr_cd_without_id_path
cmd = (
'--loss-function', 'Logloss',
'--target-border', '0.5',
'-f', data_file('adult', 'train_small'),
'--column-description', cd_file,
'-i', '10',
'-T', '4',
'--feature-names-path', data_file('adult', 'feature_names'),
'--fstr-type', 'FeatureImportance',
'--fstr-file', fstr_path
)
execute_catboost_fit('CPU', cmd)
assert filecmp.cmp(fstr_cd_with_id_path, fstr_cd_without_id_path)
return [local_canonical_file(fstr_cd_with_id_path)]
def test_diffusion_temperature():
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'--langevin', 'True',
'--diffusion-temperature', '1000',
'--eval-file', output_eval_path
]
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('config', [('Constant', 0.2, 0.1), ('Constant', 2, 0.1), ('Decreasing', 0.2, 0.1)])
def test_model_shrink_correct(config):
mode, rate, lr = config
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'--eval-file', output_eval_path,
'--model-shrink-mode', mode,
'--model-shrink-rate', str(rate),
'--learning-rate', str(lr)
]
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(output_eval_path)]
@pytest.mark.parametrize('config', [('Constant', 20, 0.1), ('Constant', 10, 0.1), ('Decreasing', 2, 0.1)])
def test_model_shrink_incorrect(config):
mode, rate, lr = config
output_eval_path = yatest.common.test_output_path('test.eval')
cmd = [
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--cd', data_file('adult', 'train.cd'),
'-i', '50',
'-r', '0',
'--eval-file', output_eval_path,
'--model-shrink-mode', mode,
'--model-shrink-rate', str(rate),
'--learning-rate', str(lr)
]
with pytest.raises(yatest.common.ExecutionError):
execute_catboost_fit('CPU', cmd)
@pytest.mark.parametrize('average', ['Macro', 'Micro', 'Weighted'])
def test_total_f1_params(average):
return do_test_eval_metrics(
metric='TotalF1:average=' + average,
metric_period='1',
train=data_file('cloudness_small', 'train_small'),
test=data_file('cloudness_small', 'test_small'),
cd=data_file('cloudness_small', 'train.cd'),
loss_function='MultiClass'
)
def test_eval_metrics_with_pairs():
do_test_eval_metrics(
metric='PairAccuracy',
metric_period='1',
train=data_file('querywise', 'train'),
test=data_file('querywise', 'test'),
cd=data_file('querywise', 'train.cd'),
loss_function='PairLogit',
additional_train_params=(
'--learn-pairs', data_file('querywise', 'train.pairs'),
'--test-pairs', data_file('querywise', 'test.pairs')
),
additional_eval_params=(
'--input-pairs', data_file('querywise', 'test.pairs')
)
)
def test_tweedie():
learn_error_path = yatest.common.test_output_path('learn_error.tsv')
cmd = (
'--loss-function', 'Tweedie:variance_power=1.5',
'-f', data_file('adult_crossentropy', 'train_proba'),
'--column-description', data_file('adult_crossentropy', 'train.cd'),
'-i', '100',
'--learning-rate', '0.5',
'--learn-err-log', learn_error_path
)
execute_catboost_fit('CPU', cmd)
return [local_canonical_file(learn_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
@pytest.mark.parametrize('separator_type', SEPARATOR_TYPES)
@pytest.mark.parametrize('feature_estimators', TEXT_FEATURE_ESTIMATORS)
def test_fit_binclass_with_text_features(boosting_type, separator_type, feature_estimators):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd_binclass')
cmd = (
'--loss-function', 'Logloss',
'--eval-metric', 'AUC',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--text-processing', json.dumps(text_processing),
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [
local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(test_eval_path)
]
@pytest.mark.parametrize('separator_type', SEPARATOR_TYPES)
@pytest.mark.parametrize('feature_estimators', TEXT_FEATURE_ESTIMATORS)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_fit_multiclass_with_text_features(separator_type, feature_estimators, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
tokenizers = [{'tokenizer_id': separator_type, 'separator_type': separator_type, 'token_types': ['Word']}]
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer], 'tokenizers_names': [separator_type]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries, 'tokenizers': tokenizers}
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd')
cmd = (
'--loss-function', loss_function,
'--eval-metric', 'Accuracy',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--text-processing', json.dumps(text_processing),
'--column-description', cd_file,
'--boosting-type', 'Plain',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [
local_canonical_file(learn_error_path),
local_canonical_file(test_error_path),
local_canonical_file(test_eval_path)
]
@pytest.mark.parametrize('grow_policy', GROW_POLICIES)
def test_shrink_model_with_text_features(grow_policy):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
loss_function = 'MultiClass'
feature_estimators = 'BoW,NaiveBayes,BM25'
dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]
dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}
feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]
text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd')
cmd = (
'--loss-function', loss_function,
'--eval-metric', 'Accuracy',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--column-description', cd_file,
'--text-processing', json.dumps(text_processing),
'--grow-policy', grow_policy,
'--boosting-type', 'Plain',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'true',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('loss_function', ['RMSE', 'RMSEWithUncertainty', 'Logloss'])
def test_virtual_ensembles(loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
train_path = data_file('querywise', 'train') if loss_function in REGRESSION_LOSSES else data_file('adult', 'train_small')
test_path = data_file('querywise', 'test') if loss_function in REGRESSION_LOSSES else data_file('adult', 'test_small')
cd_path = data_file('querywise', 'train.cd') if loss_function in REGRESSION_LOSSES else data_file('adult', 'train.cd')
test_eval_path = yatest.common.test_output_path('test.eval')
cmd = [
'--use-best-model', 'false',
'-f', train_path,
'-t', test_path,
'--loss-function', loss_function,
'--column-description', cd_path,
'--posterior-sampling', 'true',
'--eval-file', test_eval_path,
'-i', '20',
'-T', '4',
'-m', output_model_path,
]
if loss_function == 'RMSEWithUncertainty':
cmd += ['--prediction-type', 'RMSEWithUncertainty']
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--virtual-ensembles-count', '1',
'--prediction-type', 'VirtEnsembles',
)
yatest.common.execute(calc_cmd)
assert compare_evals(test_eval_path, formula_predict_path, skip_header=True)
@pytest.mark.parametrize('virtual_ensembles_count', ['1', '10'])
@pytest.mark.parametrize('prediction_type', ['TotalUncertainty', 'VirtEnsembles'])
@pytest.mark.parametrize('loss_function', ['RMSE', 'RMSEWithUncertainty', 'Logloss', 'MultiClass'])
def test_uncertainty_prediction(virtual_ensembles_count, prediction_type, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
pool_names = {
'RMSE' : 'querywise',
'RMSEWithUncertainty' : 'querywise',
'Logloss' : 'adult',
'MultiClass' : 'cloudness_small'
}
pool_name = pool_names[loss_function]
train_path = data_file(pool_name, 'train') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'train_small')
test_path = data_file(pool_name, 'test') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'test_small')
cd_path = data_file(pool_name, 'train.cd') if loss_function in REGRESSION_LOSSES else data_file(pool_name, 'train.cd')
cmd = (
'--use-best-model', 'false',
'-f', train_path,
'-t', test_path,
'--loss-function', loss_function,
'--column-description', cd_path,
'--posterior-sampling', 'true',
'-i', '200',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--virtual-ensembles-count', virtual_ensembles_count,
'--prediction-type', prediction_type,
)
yatest.common.execute(calc_cmd)
model = catboost.CatBoost()
model.load_model(output_model_path)
pool = catboost.Pool(test_path, column_description=cd_path)
py_preds = model.virtual_ensembles_predict(
pool,
prediction_type=prediction_type,
virtual_ensembles_count=int(virtual_ensembles_count))
cli_preds = np.genfromtxt(
formula_predict_path,
delimiter='\t',
dtype=float,
skip_header=True)
assert(np.allclose(py_preds.reshape(-1,), cli_preds[:, 1:].reshape(-1,), rtol=1e-10))
return local_canonical_file(formula_predict_path)
@pytest.mark.parametrize('loss_function', ['RMSE', 'RMSEWithUncertainty'])
def test_uncertainty_prediction_requirements(loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
train_path = data_file('querywise', 'train')
test_path = data_file('querywise', 'test')
cd_path = data_file('querywise', 'train.cd')
cmd = (
'--use-best-model', 'false',
'-f', train_path,
'-t', test_path,
'--loss-function', loss_function,
'--column-description', cd_path,
'-i', '200',
'-T', '4',
'-m', output_model_path,
)
execute_catboost_fit('CPU', cmd)
formula_predict_path = yatest.common.test_output_path('predict_test.eval')
calc_cmd = (
CATBOOST_PATH,
'calc',
'--input-path', test_path,
'--column-description', cd_path,
'-m', output_model_path,
'--output-path', formula_predict_path,
'--prediction-type', 'VirtEnsembles'
)
try:
yatest.common.execute(calc_cmd)
except:
return
DICTIONARIES_OPTIONS = [
{
"Simple": "token_level_type=Word:occurrence_lower_bound=50"
},
{
"UniGramOccur5": "occurrence_lower_bound=5:token_level_type=Letter",
"BiGramOccur2": "occurrence_lower_bound=2:gram_order=2:token_level_type=Letter",
"WordDictOccur1": "occurrence_lower_bound=1:token_level_type=Word",
"WordDictOccur2": "occurrence_lower_bound=2:token_level_type=Word",
"WordDictOccur3": "occurrence_lower_bound=3:token_level_type=Word"
},
{
"Unigram": "gram_order=1:token_level_type=Letter:occurrence_lower_bound=50",
"Bigram": "gram_order=2:token_level_type=Letter:occurrence_lower_bound=50",
"Trigram": "gram_order=3:token_level_type=Letter:occurrence_lower_bound=50"
},
{
"Letter": "token_level_type=Letter:occurrence_lower_bound=50",
"Word": "token_level_type=Word:occurrence_lower_bound=50"
}
]
@pytest.mark.parametrize('dictionaries', DICTIONARIES_OPTIONS)
@pytest.mark.parametrize('loss_function', MULTICLASS_LOSSES)
def test_text_processing_options(dictionaries, loss_function):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
dictionaries = ','.join([key + ':' + value for key, value in dictionaries.items()])
feature_estimators = 'BM25,BoW,NaiveBayes'
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd')
cmd = (
'--loss-function', loss_function,
'--eval-metric', 'Accuracy',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--column-description', cd_file,
'--dictionaries', dictionaries,
'--feature-calcers', feature_estimators,
'--boosting-type', 'Plain',
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_fit_with_per_feature_text_options(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
text_processing = {
'tokenizers': [
{'tokenizer_id': 'Space', 'delimiter': ' '},
{'tokenizer_id': 'Comma', 'delimiter': ','},
],
'dictionaries': [
{'dictionary_id': 'Word', 'token_level_type': 'Word', 'occurrence_lower_bound': '50'},
{'dictionary_id': 'Bigram', 'token_level_type': 'Word', 'gram_order': '2', 'occurrence_lower_bound': '50'},
{'dictionary_id': 'Trigram', 'token_level_type': 'Letter', 'gram_order': '3', 'occurrence_lower_bound': '50'},
],
'feature_processing': {
'0': [
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes']},
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Bigram', 'Trigram'], 'feature_calcers': ['BoW']},
],
'1': [
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes', 'BM25']},
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Trigram'], 'feature_calcers': ['BoW', 'BM25']},
],
'2': [
{'tokenizers_names': ['Space'], 'dictionaries_names': ['Word', 'Bigram', 'Trigram'], 'feature_calcers': ['BoW']},
],
}
}
pool_name = 'rotten_tomatoes'
test_file = data_file(pool_name, 'test')
cd_file = data_file(pool_name, 'cd_binclass')
cmd = (
'--loss-function', 'Logloss',
'--eval-metric', 'AUC',
'-f', data_file(pool_name, 'train'),
'-t', test_file,
'--text-processing', json.dumps(text_processing),
'--column-description', cd_file,
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
@pytest.mark.parametrize('boosting_type', BOOSTING_TYPE)
def test_embeddings_train(boosting_type):
output_model_path = yatest.common.test_output_path('model.bin')
learn_error_path = yatest.common.test_output_path('learn.tsv')
test_error_path = yatest.common.test_output_path('test.tsv')
test_eval_path = yatest.common.test_output_path('test.eval')
calc_eval_path = yatest.common.test_output_path('calc.eval')
cmd = (
'--loss-function', 'Logloss',
'--eval-metric', 'AUC',
'-f', ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,
'-t', ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,
'--column-description', ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE,
'--boosting-type', boosting_type,
'-i', '20',
'-T', '4',
'-m', output_model_path,
'--learn-err-log', learn_error_path,
'--test-err-log', test_error_path,
'--eval-file', test_eval_path,
'--output-columns', 'RawFormulaVal',
'--use-best-model', 'false',
)
execute_catboost_fit('CPU', cmd)
apply_catboost(
output_model_path,
ROTTEN_TOMATOES_WITH_EMBEDDINGS_TRAIN_FILE,
ROTTEN_TOMATOES_ONLY_EMBEDDINGS_CD_BINCLASS_FILE,
calc_eval_path,
output_columns=['RawFormulaVal']
)
assert filecmp.cmp(test_eval_path, calc_eval_path)
return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]
def test_dump_options():
snapshot_path = yatest.common.test_output_path('snapshot.bin')
key = 'summary'
value = '{"key1":"value1", "key2":"value2"}'
cmd = (
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '20',
'-T', '4',
'--snapshot-file', snapshot_path,
'--use-best-model', 'false',
'--set-metadata-from-freeargs', '--', key, value,
)
execute_catboost_fit('CPU', cmd)
options_path = yatest.common.test_output_path('options.json')
dump_options_cmd = (
get_catboost_binary_path(),
'dump-options',
'--input', snapshot_path,
'--output', options_path
)
yatest.common.execute(dump_options_cmd)
with open(options_path) as options:
options_json = json.load(options)
assert options_json['metadata'][key] == value
def prepare_pool_metainfo_with_feature_tags():
pool_metainfo = {
'tags': {
'A': {
'features': [0, 1, 2, 3, 4, 5, 6, 7]
},
'B': {
'features': [12, 13, 14, 15, 16]
},
'C': {
'features': [5, 6, 7, 8, 9, 10, 11, 12, 13]
}
}
}
pool_metainfo_path = yatest.common.test_output_path('pool_metainfo.json')
with open(pool_metainfo_path, 'w') as f:
json.dump(pool_metainfo, f)
return pool_metainfo, pool_metainfo_path
def test_feature_tags_in_ignore_features():
pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()
base_cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '50',
'-T', '4',
)
for ignored_tags in (['A'], ['A', 'B'], ['B', 'C']):
output_eval_path_1 = yatest.common.test_output_path('1_test.eval')
ignored_features = sum((pool_metainfo['tags'][tag]['features'] for tag in ignored_tags), [])
cmd_1 = base_cmd + (
'--eval-file', output_eval_path_1,
'--ignore-features', ':'.join(map(str, ignored_features)),
)
output_eval_path_2 = yatest.common.test_output_path('2_test.eval')
cmd_2 = base_cmd + (
'--eval-file', output_eval_path_2,
'--ignore-features', ':'.join('#{}'.format(tag) for tag in ignored_tags),
'--pool-metainfo-path', pool_metainfo_path,
)
yatest.common.execute(cmd_1)
yatest.common.execute(cmd_2)
assert filecmp.cmp(output_eval_path_1, output_eval_path_2)
def test_feature_tags_in_features_for_select():
pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()
base_cmd = (
CATBOOST_PATH,
'select-features',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '50',
'-T', '4',
'--num-features-to-select', '3',
'--features-selection-algorithm', 'RecursiveByPredictionValuesChange',
'--features-selection-steps', '2',
'--train-final-model',
)
for selection_tags in (['A', 'B'], ['A', 'C'], ['B', 'C'], ['A', 'B', 'C']):
output_summary_path_1 = yatest.common.test_output_path('1_summary.json')
features_for_select = sum((pool_metainfo['tags'][tag]['features'] for tag in selection_tags), [])
cmd_1 = base_cmd + (
'--features-selection-result-path', output_summary_path_1,
'--features-for-select', ','.join(map(str, features_for_select)),
)
output_summary_path_2 = yatest.common.test_output_path('2_summary.json')
cmd_2 = base_cmd + (
'--features-selection-result-path', output_summary_path_2,
'--features-for-select', ','.join('#{}'.format(tag) for tag in selection_tags),
'--pool-metainfo-path', pool_metainfo_path,
)
yatest.common.execute(cmd_1)
yatest.common.execute(cmd_2)
assert filecmp.cmp(output_summary_path_1, output_summary_path_2)
def test_feature_tags_in_features_to_evaluate():
pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()
base_cmd = (
CATBOOST_PATH,
'eval-feature',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'--column-description', data_file('adult', 'train.cd'),
'--feature-eval-mode', 'OneVsAll',
'-i', '30',
'-T', '4',
'--fold-count', '2',
'--fold-size-unit', 'Object',
'--fold-size', '50'
)
features_to_evaluate_1 = []
features_to_evaluate_2 = []
for tags_set in (['A'], ['A', 'B'], ['B', 'C']):
features_set = sum((pool_metainfo['tags'][tag]['features'] for tag in tags_set), [])
features_to_evaluate_1.append(','.join(map(str, features_set)))
features_to_evaluate_2.append(','.join('#{}'.format(tag) for tag in tags_set))
output_eval_path_1 = yatest.common.test_output_path('1_feature.eval')
cmd_1 = base_cmd + (
'--feature-eval-output-file', output_eval_path_1,
'--features-to-evaluate', ';'.join(map(str, features_to_evaluate_1)),
)
output_eval_path_2 = yatest.common.test_output_path('2_feature.eval')
cmd_2 = base_cmd + (
'--feature-eval-output-file', output_eval_path_2,
'--features-to-evaluate', ';'.join(features_to_evaluate_2),
'--pool-metainfo-path', pool_metainfo_path,
)
yatest.common.execute(cmd_1)
yatest.common.execute(cmd_2)
assert filecmp.cmp(output_eval_path_1, output_eval_path_2)
def test_feature_tags_in_options_file():
pool_metainfo, pool_metainfo_path = prepare_pool_metainfo_with_feature_tags()
training_options_path = yatest.common.test_output_path('training_options.json')
cmd = (
CATBOOST_PATH,
'fit',
'--loss-function', 'Logloss',
'-f', data_file('adult', 'train_small'),
'-t', data_file('adult', 'test_small'),
'--column-description', data_file('adult', 'train.cd'),
'-i', '50',
'-T', '4',
'--pool-metainfo-path', pool_metainfo_path,
'--training-options-file', training_options_path,
)
yatest.common.execute(cmd)
with open(training_options_path) as f:
options = json.load(f)
assert options['pool_metainfo_options'] == pool_metainfo
| true | true |
f73484985ce0517435ef3853beb01472fde5ca7c | 3,076 | py | Python | syntropy_sdk/models/pointtotag_response.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | 1 | 2020-12-17T17:30:12.000Z | 2020-12-17T17:30:12.000Z | syntropy_sdk/models/pointtotag_response.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | null | null | null | syntropy_sdk/models/pointtotag_response.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Syntropy Rule service
Syntropy Rule service # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PointtotagResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {"data": "PointtotagPointToTag"}
attribute_map = {"data": "data"}
def __init__(self, data=None): # noqa: E501
"""PointtotagResponse - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this PointtotagResponse. # noqa: E501
:return: The data of this PointtotagResponse. # noqa: E501
:rtype: PointtotagPointToTag
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this PointtotagResponse.
:param data: The data of this PointtotagResponse. # noqa: E501
:type: PointtotagPointToTag
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(PointtotagResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PointtotagResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.711712 | 85 | 0.548765 |
import pprint
import re
import six
class PointtotagResponse(object):
swagger_types = {"data": "PointtotagPointToTag"}
attribute_map = {"data": "data"}
def __init__(self, data=None):
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(PointtotagResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PointtotagResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f734850affb2692f86a822ffb78159d7900c28c8 | 1,758 | py | Python | utils/callbacks.py | hz512/Smart-Parking-Enforcement-System | e990903de545693ad6e2536bf167c69ab672d16a | [
"MIT"
] | null | null | null | utils/callbacks.py | hz512/Smart-Parking-Enforcement-System | e990903de545693ad6e2536bf167c69ab672d16a | [
"MIT"
] | null | null | null | utils/callbacks.py | hz512/Smart-Parking-Enforcement-System | e990903de545693ad6e2536bf167c69ab672d16a | [
"MIT"
] | null | null | null | import tensorflow.keras as tfk
import tensorflow as tf
import tensorflow.keras.layers as layers
import json
import collections
from datetime import datetime
import os
class LrStepDecay(tfk.callbacks.Callback):
def __init__(self,
decay_rate,
decay_at):
super(LrStepDecay, self).__init__()
self.decay_rate = decay_rate
self.decay_at = decay_at
self.counter = 0
def on_epoch_end(self, epoch, logs=None):
if self.counter >= len(self.decay_at):
return
if epoch >= self.decay_at[self.counter]:
self.counter += 1
new_lr = float(tfk.backend.get_value(self.model.optimizer.learning_rate)) * self.decay_rate
tf.keras.backend.set_value(self.model.optimizer.lr, new_lr)
print("\nEpoch %05d: Learning rate is %3.6f." % (epoch, new_lr))
class Logger(tfk.callbacks.Callback):
def __init__(self,
name,
log_dir):
super(Logger, self).__init__()
self.name = name
self.log_dir = log_dir
self.log = collections.defaultdict(list)
self.start_time = datetime.now()
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
def on_epoch_begin(self, epoch, logs=None):
self.start_time = datetime.now()
def on_epoch_end(self, epoch, logs=None):
file = open('{}/{}.json'.format(self.log_dir, self.name), 'w')
for key in logs:
self.log[key].append(logs[key])
self.log['epoch'].append(epoch)
self.log['walltime'].append((datetime.now() - self.start_time).seconds)
json.dump(self.log, file)
file.close() | 33.169811 | 104 | 0.59727 | import tensorflow.keras as tfk
import tensorflow as tf
import tensorflow.keras.layers as layers
import json
import collections
from datetime import datetime
import os
class LrStepDecay(tfk.callbacks.Callback):
def __init__(self,
decay_rate,
decay_at):
super(LrStepDecay, self).__init__()
self.decay_rate = decay_rate
self.decay_at = decay_at
self.counter = 0
def on_epoch_end(self, epoch, logs=None):
if self.counter >= len(self.decay_at):
return
if epoch >= self.decay_at[self.counter]:
self.counter += 1
new_lr = float(tfk.backend.get_value(self.model.optimizer.learning_rate)) * self.decay_rate
tf.keras.backend.set_value(self.model.optimizer.lr, new_lr)
print("\nEpoch %05d: Learning rate is %3.6f." % (epoch, new_lr))
class Logger(tfk.callbacks.Callback):
def __init__(self,
name,
log_dir):
super(Logger, self).__init__()
self.name = name
self.log_dir = log_dir
self.log = collections.defaultdict(list)
self.start_time = datetime.now()
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
def on_epoch_begin(self, epoch, logs=None):
self.start_time = datetime.now()
def on_epoch_end(self, epoch, logs=None):
file = open('{}/{}.json'.format(self.log_dir, self.name), 'w')
for key in logs:
self.log[key].append(logs[key])
self.log['epoch'].append(epoch)
self.log['walltime'].append((datetime.now() - self.start_time).seconds)
json.dump(self.log, file)
file.close() | true | true |
f7348651855a7867e68bf36cfae9cc9c668012bc | 708 | py | Python | modules/post-exploitation/pykek.py | BustedSec/ptf | 48ecc6f17befd7fda7159f38e44bb88a7b1b3a45 | [
"FTL"
] | 5 | 2019-03-14T10:17:22.000Z | 2019-10-23T14:04:12.000Z | modules/post-exploitation/pykek.py | yeyintminthuhtut/ptf | 53d90661b9e1c372fb6965fb22c63033103d0c13 | [
"FTL"
] | 3 | 2017-12-06T00:45:04.000Z | 2017-12-06T00:49:05.000Z | modules/post-exploitation/pykek.py | yeyintminthuhtut/ptf | 53d90661b9e1c372fb6965fb22c63033103d0c13 | [
"FTL"
] | 14 | 2019-03-14T10:34:02.000Z | 2021-10-31T17:34:13.000Z | #!/usr/bin/env python
#####################################
# Installation module for PyKek
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="David Kennedy (ReL1K)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update PyKEK - Kerberos exploitation kit"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/bidord/pykek"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="pykek"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git,rdate,krb5-user"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git,openrdate,krb5-libs"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS=""
| 23.6 | 79 | 0.683616 | true | true | |
f734874472ed3a67638edda330549a063d170d6f | 9,101 | py | Python | mynt/processors.py | Mindiell/mynt3 | 340b526f143b5506e3b8457113728ff15e48523a | [
"BSD-3-Clause"
] | null | null | null | mynt/processors.py | Mindiell/mynt3 | 340b526f143b5506e3b8457113728ff15e48523a | [
"BSD-3-Clause"
] | null | null | null | mynt/processors.py | Mindiell/mynt3 | 340b526f143b5506e3b8457113728ff15e48523a | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from calendar import timegm
from datetime import datetime
from importlib import import_module
from os import path as op
import re
from pkg_resources import DistributionNotFound, iter_entry_points, load_entry_point
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from mynt.containers import Config, Container, Item, Items, Posts
from mynt.exceptions import ConfigException, ContentException, ParserException, RendererException
from mynt.fs import File
from mynt.utils import get_logger, normpath, Timer, unescape, Url
logger = get_logger('mynt')
class Reader(object):
def __init__(self, src, dest, site, writer):
self._writer = writer
self._parsers = {}
self._extensions = {}
self._cache = {}
self.src = src
self.dest = dest
self.site = site
self._find_parsers()
def _find_parsers(self):
for parser in iter_entry_points('mynt.parsers'):
name = parser.name
try:
Parser = parser.load()
except DistributionNotFound as e:
logger.debug('@@ The %s parser could not be loaded due to a missing requirement: %s.', name, e)
continue
for extension in Parser.accepts:
if extension in self._extensions:
self._extensions[extension].append(name)
else:
self._extensions[extension] = [name]
self._parsers[name] = Parser
#for parsers in self._extensions.values():
# parsers.sort(key = unicode.lower)
def _get_date(self, mtime, date):
if not date:
return mtime
d = [None, None, None, 0, 0]
for i, v in enumerate(date.split('-')):
d[i] = v
if not d[3]:
d[3], d[4] = mtime.strftime('%H %M').split()
elif not d[4]:
d[4] = '{0:02d}'.format(d[4])
return datetime.strptime('-'.join(d), '%Y-%m-%d-%H-%M')
def _get_parser(self, f, parser = None):
if not parser:
try:
parser = self._extensions[f.extension][0]
except KeyError:
raise ParserException('No parser found that accepts \'{0}\' files.'.format(f.extension),
'src: {0}'.format(f.path))
if parser in self._cache:
return self._cache[parser]
options = self.site.get(parser, None)
if parser in self._parsers:
Parser = self._parsers[parser](options)
else:
try:
Parser = import_module('mynt.parsers.{0}'.format(parser)).Parser(options)
except ImportError:
raise ParserException('The {0} parser could not be found.'.format(parser))
self._cache[parser] = Parser
return Parser
def _parse_filename(self, f):
date, text = re.match(r'(?:(\d{4}(?:-\d{2}-\d{2}){1,2})-)?(.+)', f.name).groups()
return (
text,
self._get_date(f.mtime, date)
)
def _parse_container(self, container):
for f in container.path:
container.add(self._parse_item(container.config, f))
container.sort()
container.tag()
container.archive()
return container
def _parse_item(self, config, f, simple = False):
Timer.start()
item = Item(f.path)
try:
frontmatter, bodymatter = re.search(r'\A---\s+^(.+?)$\s+---\s*(.*)\Z', f.content, re.M | re.S).groups()
frontmatter = Config(frontmatter)
except AttributeError:
raise ContentException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'frontmatter must not be empty')
except ConfigException:
raise ConfigException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'fontmatter contains invalid YAML')
if 'layout' not in frontmatter:
raise ContentException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'layout must be set')
frontmatter.pop('url', None)
parser = self._get_parser(f, frontmatter.get('parser', config.get('parser', None)))
text, date = self._parse_filename(f)
content = parser.parse(self._writer.from_string(bodymatter, frontmatter))
item['content'] = content
item['date'] = date.strftime(self.site['date_format'])
item['timestamp'] = timegm(date.utctimetuple())
if simple:
item['url'] = Url.from_path(f.root.path.replace(self.src.path, ''), text)
else:
item['excerpt'] = re.search(r'\A.*?(?:<p>(.+?)</p>)?', content, re.M | re.S).group(1)
item['tags'] = []
item['url'] = Url.from_format(config['url'], text, date, frontmatter)
item.update(frontmatter)
logger.debug('.. (%.3fs) %s', Timer.stop(), f.path.replace(self.src.path, ''))
return item
def parse(self):
posts = self._parse_container(Posts(self.src, self.site))
containers = {}
miscellany = Container('miscellany', self.src, None)
pages = posts.pages
for name, config in self.site['containers'].items():
container = self._parse_container(Items(name, self.src, config))
containers[name] = container
pages.extend(container.pages)
for f in miscellany.path:
if f.extension in self._extensions:
miscellany.add(self._parse_item(miscellany.config, f, True))
elif f.extension in ('.html', '.htm', '.xml'):
pages.append((f.path.replace(self.src.path, ''), None, None))
pages.extend(miscellany.pages)
return (posts, containers, pages)
class Writer(object):
def __init__(self, src, dest, site):
self.src = src
self.dest = dest
self.site = site
self._renderer = self._get_renderer()
def _get_path(self, url):
parts = [self.dest.path] + url.split('/')
if url.endswith('/'):
parts.append('index.html')
path = normpath(*parts)
if op.commonprefix((self.dest.path, path)) != self.dest.path:
raise ConfigException('Invalid URL.',
'url: {0}'.format(url),
'path traversal is not allowed')
return path
def _get_renderer(self):
renderer = self.site['renderer']
options = self.site.get(renderer, None)
try:
Renderer = load_entry_point('mynt', 'mynt.renderers', renderer)
except DistributionNotFound as e:
raise RendererException('The {0} renderer requires {1}.'.format(renderer, unicode(e)))
except ImportError:
try:
Renderer = import_module('mynt.renderers.{0}'.format(renderer)).Renderer
except ImportError:
raise RendererException('The {0} renderer could not be found.'.format(renderer))
return Renderer(self.src.path, options)
def _highlight(self, match):
language, code = match.groups()
formatter = HtmlFormatter(linenos = 'table')
code = unescape(code)
try:
code = highlight(code, get_lexer_by_name(language), formatter)
except ClassNotFound:
code = highlight(code, get_lexer_by_name('text'), formatter)
return '<div class="code"><div>{0}</div></div>'.format(code)
def _pygmentize(self, html):
return re.sub(r'<pre><code[^>]+data-lang="([^>]+)"[^>]*>(.+?)</code></pre>', self._highlight, html, flags = re.S)
def from_string(self, string, data = None):
return self._renderer.from_string(string, data)
def register(self, data):
self._renderer.register(data)
def render(self, template, data = None, url = None):
url = url if url is not None else template
path = self._get_path(url)
try:
Timer.start()
content = self._renderer.render(template, data)
if self.site['pygmentize']:
content = self._pygmentize(content)
logger.debug('.. (%.3fs) %s', Timer.stop(), path.replace(self.dest.path, ''))
except RendererException as e:
raise RendererException(e.message,
'{0} in container item {1}'.format(template, data.get('item', url)))
return File(path, content)
| 33.707407 | 121 | 0.545325 |
from calendar import timegm
from datetime import datetime
from importlib import import_module
from os import path as op
import re
from pkg_resources import DistributionNotFound, iter_entry_points, load_entry_point
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from mynt.containers import Config, Container, Item, Items, Posts
from mynt.exceptions import ConfigException, ContentException, ParserException, RendererException
from mynt.fs import File
from mynt.utils import get_logger, normpath, Timer, unescape, Url
logger = get_logger('mynt')
class Reader(object):
def __init__(self, src, dest, site, writer):
self._writer = writer
self._parsers = {}
self._extensions = {}
self._cache = {}
self.src = src
self.dest = dest
self.site = site
self._find_parsers()
def _find_parsers(self):
for parser in iter_entry_points('mynt.parsers'):
name = parser.name
try:
Parser = parser.load()
except DistributionNotFound as e:
logger.debug('@@ The %s parser could not be loaded due to a missing requirement: %s.', name, e)
continue
for extension in Parser.accepts:
if extension in self._extensions:
self._extensions[extension].append(name)
else:
self._extensions[extension] = [name]
self._parsers[name] = Parser
def _get_date(self, mtime, date):
if not date:
return mtime
d = [None, None, None, 0, 0]
for i, v in enumerate(date.split('-')):
d[i] = v
if not d[3]:
d[3], d[4] = mtime.strftime('%H %M').split()
elif not d[4]:
d[4] = '{0:02d}'.format(d[4])
return datetime.strptime('-'.join(d), '%Y-%m-%d-%H-%M')
def _get_parser(self, f, parser = None):
if not parser:
try:
parser = self._extensions[f.extension][0]
except KeyError:
raise ParserException('No parser found that accepts \'{0}\' files.'.format(f.extension),
'src: {0}'.format(f.path))
if parser in self._cache:
return self._cache[parser]
options = self.site.get(parser, None)
if parser in self._parsers:
Parser = self._parsers[parser](options)
else:
try:
Parser = import_module('mynt.parsers.{0}'.format(parser)).Parser(options)
except ImportError:
raise ParserException('The {0} parser could not be found.'.format(parser))
self._cache[parser] = Parser
return Parser
def _parse_filename(self, f):
date, text = re.match(r'(?:(\d{4}(?:-\d{2}-\d{2}){1,2})-)?(.+)', f.name).groups()
return (
text,
self._get_date(f.mtime, date)
)
def _parse_container(self, container):
for f in container.path:
container.add(self._parse_item(container.config, f))
container.sort()
container.tag()
container.archive()
return container
def _parse_item(self, config, f, simple = False):
Timer.start()
item = Item(f.path)
try:
frontmatter, bodymatter = re.search(r'\A---\s+^(.+?)$\s+---\s*(.*)\Z', f.content, re.M | re.S).groups()
frontmatter = Config(frontmatter)
except AttributeError:
raise ContentException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'frontmatter must not be empty')
except ConfigException:
raise ConfigException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'fontmatter contains invalid YAML')
if 'layout' not in frontmatter:
raise ContentException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'layout must be set')
frontmatter.pop('url', None)
parser = self._get_parser(f, frontmatter.get('parser', config.get('parser', None)))
text, date = self._parse_filename(f)
content = parser.parse(self._writer.from_string(bodymatter, frontmatter))
item['content'] = content
item['date'] = date.strftime(self.site['date_format'])
item['timestamp'] = timegm(date.utctimetuple())
if simple:
item['url'] = Url.from_path(f.root.path.replace(self.src.path, ''), text)
else:
item['excerpt'] = re.search(r'\A.*?(?:<p>(.+?)</p>)?', content, re.M | re.S).group(1)
item['tags'] = []
item['url'] = Url.from_format(config['url'], text, date, frontmatter)
item.update(frontmatter)
logger.debug('.. (%.3fs) %s', Timer.stop(), f.path.replace(self.src.path, ''))
return item
def parse(self):
posts = self._parse_container(Posts(self.src, self.site))
containers = {}
miscellany = Container('miscellany', self.src, None)
pages = posts.pages
for name, config in self.site['containers'].items():
container = self._parse_container(Items(name, self.src, config))
containers[name] = container
pages.extend(container.pages)
for f in miscellany.path:
if f.extension in self._extensions:
miscellany.add(self._parse_item(miscellany.config, f, True))
elif f.extension in ('.html', '.htm', '.xml'):
pages.append((f.path.replace(self.src.path, ''), None, None))
pages.extend(miscellany.pages)
return (posts, containers, pages)
class Writer(object):
def __init__(self, src, dest, site):
self.src = src
self.dest = dest
self.site = site
self._renderer = self._get_renderer()
def _get_path(self, url):
parts = [self.dest.path] + url.split('/')
if url.endswith('/'):
parts.append('index.html')
path = normpath(*parts)
if op.commonprefix((self.dest.path, path)) != self.dest.path:
raise ConfigException('Invalid URL.',
'url: {0}'.format(url),
'path traversal is not allowed')
return path
def _get_renderer(self):
renderer = self.site['renderer']
options = self.site.get(renderer, None)
try:
Renderer = load_entry_point('mynt', 'mynt.renderers', renderer)
except DistributionNotFound as e:
raise RendererException('The {0} renderer requires {1}.'.format(renderer, unicode(e)))
except ImportError:
try:
Renderer = import_module('mynt.renderers.{0}'.format(renderer)).Renderer
except ImportError:
raise RendererException('The {0} renderer could not be found.'.format(renderer))
return Renderer(self.src.path, options)
def _highlight(self, match):
language, code = match.groups()
formatter = HtmlFormatter(linenos = 'table')
code = unescape(code)
try:
code = highlight(code, get_lexer_by_name(language), formatter)
except ClassNotFound:
code = highlight(code, get_lexer_by_name('text'), formatter)
return '<div class="code"><div>{0}</div></div>'.format(code)
def _pygmentize(self, html):
return re.sub(r'<pre><code[^>]+data-lang="([^>]+)"[^>]*>(.+?)</code></pre>', self._highlight, html, flags = re.S)
def from_string(self, string, data = None):
return self._renderer.from_string(string, data)
def register(self, data):
self._renderer.register(data)
def render(self, template, data = None, url = None):
url = url if url is not None else template
path = self._get_path(url)
try:
Timer.start()
content = self._renderer.render(template, data)
if self.site['pygmentize']:
content = self._pygmentize(content)
logger.debug('.. (%.3fs) %s', Timer.stop(), path.replace(self.dest.path, ''))
except RendererException as e:
raise RendererException(e.message,
'{0} in container item {1}'.format(template, data.get('item', url)))
return File(path, content)
| true | true |
f73488401ecad308310e06fd79318c6a62a25cf2 | 2,967 | py | Python | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/casts/tests/test_cast_get.py | jhkuang11/UniTrade | 5f68b853926e167936b58c8543b8f95ebd6f5211 | [
"MIT"
] | null | null | null | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/casts/tests/test_cast_get.py | jhkuang11/UniTrade | 5f68b853926e167936b58c8543b8f95ebd6f5211 | [
"MIT"
] | 10 | 2020-06-05T19:42:26.000Z | 2022-03-11T23:38:35.000Z | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/casts/tests/test_cast_get.py | jhkuang11/UniTrade | 5f68b853926e167936b58c8543b8f95ebd6f5211 | [
"MIT"
] | null | null | null | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from __future__ import print_function
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as cast_utils
class CastsGetTestCase(BaseTestGenerator):
""" This class will fetch the cast node added under database node. """
scenarios = [
# Fetching default URL for cast node.
('Check Cast Node', dict(url='/browser/cast/obj/'))
]
def setUp(self):
""" This function will create cast."""
self.default_db = self.server["db"]
self.database_info = parent_node_dict['database'][-1]
self.db_name = self.database_info['db_name']
self.server["db"] = self.db_name
self.source_type = 'money'
self.target_type = 'bigint'
self.cast_id = cast_utils.create_cast(self.server, self.source_type,
self.target_type)
def runTest(self):
""" This function will fetch added cast."""
self.server_id = self.database_info["server_id"]
self.db_id = self.database_info['db_id']
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
response = self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' + str(
self.server_id) + '/' +
str(self.db_id) + '/' + str(self.cast_id),
content_type='html/json')
self.assertEquals(response.status_code, 200)
def tearDown(self):
"""This function disconnect the test database and drop added cast."""
connection = utils.get_db_connection(self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode'])
cast_utils.drop_cast(connection, self.source_type,
self.target_type)
database_utils.disconnect_database(self, self.server_id,
self.db_id)
self.server['db'] = self.default_db
| 43.632353 | 77 | 0.531513 | true | true | |
f73489645d5715e5a6b3c7e65b656254dbaa3841 | 614 | py | Python | py/next-greater-element-iii.py | ckclark/leetcode | 844c6f18d06dcb397db76436e5f4b8ddcb1beddc | [
"Apache-2.0"
] | null | null | null | py/next-greater-element-iii.py | ckclark/leetcode | 844c6f18d06dcb397db76436e5f4b8ddcb1beddc | [
"Apache-2.0"
] | null | null | null | py/next-greater-element-iii.py | ckclark/leetcode | 844c6f18d06dcb397db76436e5f4b8ddcb1beddc | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
s = str(n)
for i, n in enumerate(reversed(s[:-1]), 1):
if n < s[-i]:
x, j = min((x, k) for k, x in enumerate(s[-i:]) if x > n)
ans = s[:-i - 1]
ans += x
l = list(s[-i:])
l[j] = n
ans += ''.join(sorted(l))
ans = int(ans)
if ans >= 1 << 31:
return -1
return ans
else:
return -1
| 27.909091 | 73 | 0.333876 | class Solution(object):
def nextGreaterElement(self, n):
s = str(n)
for i, n in enumerate(reversed(s[:-1]), 1):
if n < s[-i]:
x, j = min((x, k) for k, x in enumerate(s[-i:]) if x > n)
ans = s[:-i - 1]
ans += x
l = list(s[-i:])
l[j] = n
ans += ''.join(sorted(l))
ans = int(ans)
if ans >= 1 << 31:
return -1
return ans
else:
return -1
| true | true |
f7348b51ae3998a5d2ba1a4df557df2269b43936 | 4,602 | py | Python | tests/ImpactPacket/test_TCP.py | StanHardy/impacket | 769c3196124af64d7bc08d51ae4b651e61a87037 | [
"Apache-1.1"
] | 8 | 2022-03-23T13:02:37.000Z | 2022-03-27T04:30:16.000Z | tests/ImpactPacket/test_TCP.py | anno5750/impacket | ed7082cd0bc0d951f6eefb0a98c4c1360fe1a8a2 | [
"Apache-1.1"
] | null | null | null | tests/ImpactPacket/test_TCP.py | anno5750/impacket | ed7082cd0bc0d951f6eefb0a98c4c1360fe1a8a2 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/env python
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
import unittest
from impacket.ImpactPacket import TCP
class TestTCP(unittest.TestCase):
def setUp(self):
# TCP - sport: 60655, dport: 80, sec: 0, HLen: 40, Flags: 0x02, win_size: 5840
# cksum: 0x64cb, Options: 0x20
self.frame = b'\xec\xef\x00\x50\xa8\xbd\xea\x4c\x00\x00\x00\x00\xa0\x02\x16\xd0' \
b'\x64\xcb\x00\x00\x02\x04\x05\xb4\x04\x02\x08\x0a\x00\xdc\xd6\x12' \
b'\x00\x00\x00\x00\x01\x03\x03\x06'
self.tcp = TCP(self.frame)
def test_01(self):
'Test TCP get_packet'
self.assertEqual(self.tcp.get_packet(), self.frame)
def test_02(self):
'Test TCP getters'
self.assertEqual(self.tcp.get_th_sport(), 60655)
self.assertEqual(self.tcp.get_th_dport(), 80)
self.assertEqual(self.tcp.get_th_off()*4, 40) # *4 because are words
self.assertEqual(self.tcp.get_th_flags(), 0x02)
self.assertEqual(self.tcp.get_th_win(), 5840)
self.assertEqual(self.tcp.get_th_sum(), 0x64cb)
self.assertEqual(self.tcp.get_SYN(), 1)
self.assertEqual(self.tcp.get_RST(), 0)
def test_03(self):
'Test TCP port setters'
self.tcp.set_th_sport(54321)
self.assertEqual(self.tcp.get_th_sport(), 54321)
self.tcp.set_th_dport(81)
self.assertEqual(self.tcp.get_th_dport(), 81)
def test_04(self):
'Test TCP offset setters'
# test that set_th_off doesn't affect to flags
flags = int('10101010',2)
self.tcp.set_th_flags( flags )
self.assertEqual(self.tcp.get_th_flags(), flags)
self.tcp.set_th_off(4)
self.assertEqual(self.tcp.get_th_off(), 4)
self.assertEqual(self.tcp.get_th_flags(), flags)
def test_05(self):
'Test TCP win setters'
self.tcp.set_th_win(12345)
self.assertEqual(self.tcp.get_th_win(), 12345)
def test_06(self):
'Test TCP checksum setters'
self.tcp.set_th_sum(0xFEFE)
self.assertEqual(self.tcp.get_th_sum(), 0xFEFE)
def test_07(self):
'Test TCP flags setters'
self.tcp.set_th_flags(0x03) # SYN+FIN
self.assertEqual(self.tcp.get_th_flags(), 0x03)
self.tcp.set_ACK()
self.assertEqual(self.tcp.get_ACK(), 1)
self.assertEqual(self.tcp.get_SYN(), 1)
self.assertEqual(self.tcp.get_FIN(), 1)
self.assertEqual(self.tcp.get_RST(), 0)
self.assertEqual(self.tcp.get_th_flags(), 19)
def test_08(self):
'Test TCP reset_flags'
# Test 1
self.tcp.set_th_flags(19) # ACK+SYN+FIN
self.assertEqual(self.tcp.get_th_flags(), 19)
self.assertEqual(self.tcp.get_ACK(), 1)
self.assertEqual(self.tcp.get_SYN(), 1)
self.assertEqual(self.tcp.get_FIN(), 1)
self.assertEqual(self.tcp.get_RST(), 0)
self.tcp.reset_flags(0x02)
self.assertEqual(self.tcp.get_th_flags(), 17)
# Test 2
flags = int('10011', 2) # 19 = ACK+SYN+FIN
self.tcp.set_th_flags(flags)
self.assertEqual(self.tcp.get_th_flags(), 19)
# 010011
# 000010
# ------
# 010001 = 17
self.tcp.reset_flags(int('000010',2))
self.assertEqual(self.tcp.get_th_flags(), 17)
# Test 3
flags = int('10011', 2) # 19 = ACK+SYN+FIN
self.tcp.set_th_flags(flags)
self.assertEqual(self.tcp.get_th_flags(), 19)
# 010011
# 010001
# ------
# 000010 = 2
self.tcp.reset_flags(int('010001',2))
self.assertEqual(self.tcp.get_th_flags(), 2)
def test_09(self):
'Test TCP set_flags'
flags = int('10101010',2) # 0xAA
self.tcp.set_flags(flags)
self.assertEqual(self.tcp.get_FIN(), 0)
self.assertEqual(self.tcp.get_SYN(), 1)
self.assertEqual(self.tcp.get_RST(), 0)
self.assertEqual(self.tcp.get_PSH(), 1)
self.assertEqual(self.tcp.get_ACK(), 0)
self.assertEqual(self.tcp.get_URG(), 1)
self.assertEqual(self.tcp.get_ECE(), 0)
self.assertEqual(self.tcp.get_CWR(), 1)
self.assertEqual(self.tcp.get_th_flags(), 0xAA )
if __name__ == '__main__':
unittest.main(verbosity=1)
| 32.871429 | 90 | 0.617992 |
import unittest
from impacket.ImpactPacket import TCP
class TestTCP(unittest.TestCase):
def setUp(self):
self.frame = b'\xec\xef\x00\x50\xa8\xbd\xea\x4c\x00\x00\x00\x00\xa0\x02\x16\xd0' \
b'\x64\xcb\x00\x00\x02\x04\x05\xb4\x04\x02\x08\x0a\x00\xdc\xd6\x12' \
b'\x00\x00\x00\x00\x01\x03\x03\x06'
self.tcp = TCP(self.frame)
def test_01(self):
self.assertEqual(self.tcp.get_packet(), self.frame)
def test_02(self):
self.assertEqual(self.tcp.get_th_sport(), 60655)
self.assertEqual(self.tcp.get_th_dport(), 80)
self.assertEqual(self.tcp.get_th_off()*4, 40)
self.assertEqual(self.tcp.get_th_flags(), 0x02)
self.assertEqual(self.tcp.get_th_win(), 5840)
self.assertEqual(self.tcp.get_th_sum(), 0x64cb)
self.assertEqual(self.tcp.get_SYN(), 1)
self.assertEqual(self.tcp.get_RST(), 0)
def test_03(self):
self.tcp.set_th_sport(54321)
self.assertEqual(self.tcp.get_th_sport(), 54321)
self.tcp.set_th_dport(81)
self.assertEqual(self.tcp.get_th_dport(), 81)
def test_04(self):
flags = int('10101010',2)
self.tcp.set_th_flags( flags )
self.assertEqual(self.tcp.get_th_flags(), flags)
self.tcp.set_th_off(4)
self.assertEqual(self.tcp.get_th_off(), 4)
self.assertEqual(self.tcp.get_th_flags(), flags)
def test_05(self):
self.tcp.set_th_win(12345)
self.assertEqual(self.tcp.get_th_win(), 12345)
def test_06(self):
self.tcp.set_th_sum(0xFEFE)
self.assertEqual(self.tcp.get_th_sum(), 0xFEFE)
def test_07(self):
self.tcp.set_th_flags(0x03) # SYN+FIN
self.assertEqual(self.tcp.get_th_flags(), 0x03)
self.tcp.set_ACK()
self.assertEqual(self.tcp.get_ACK(), 1)
self.assertEqual(self.tcp.get_SYN(), 1)
self.assertEqual(self.tcp.get_FIN(), 1)
self.assertEqual(self.tcp.get_RST(), 0)
self.assertEqual(self.tcp.get_th_flags(), 19)
def test_08(self):
# Test 1
self.tcp.set_th_flags(19) # ACK+SYN+FIN
self.assertEqual(self.tcp.get_th_flags(), 19)
self.assertEqual(self.tcp.get_ACK(), 1)
self.assertEqual(self.tcp.get_SYN(), 1)
self.assertEqual(self.tcp.get_FIN(), 1)
self.assertEqual(self.tcp.get_RST(), 0)
self.tcp.reset_flags(0x02)
self.assertEqual(self.tcp.get_th_flags(), 17)
# Test 2
flags = int('10011', 2) # 19 = ACK+SYN+FIN
self.tcp.set_th_flags(flags)
self.assertEqual(self.tcp.get_th_flags(), 19)
# 010011
# 000010
# ------
# 010001 = 17
self.tcp.reset_flags(int('000010',2))
self.assertEqual(self.tcp.get_th_flags(), 17)
# Test 3
flags = int('10011', 2) # 19 = ACK+SYN+FIN
self.tcp.set_th_flags(flags)
self.assertEqual(self.tcp.get_th_flags(), 19)
# 010011
# 010001
# ------
# 000010 = 2
self.tcp.reset_flags(int('010001',2))
self.assertEqual(self.tcp.get_th_flags(), 2)
def test_09(self):
flags = int('10101010',2) # 0xAA
self.tcp.set_flags(flags)
self.assertEqual(self.tcp.get_FIN(), 0)
self.assertEqual(self.tcp.get_SYN(), 1)
self.assertEqual(self.tcp.get_RST(), 0)
self.assertEqual(self.tcp.get_PSH(), 1)
self.assertEqual(self.tcp.get_ACK(), 0)
self.assertEqual(self.tcp.get_URG(), 1)
self.assertEqual(self.tcp.get_ECE(), 0)
self.assertEqual(self.tcp.get_CWR(), 1)
self.assertEqual(self.tcp.get_th_flags(), 0xAA )
if __name__ == '__main__':
unittest.main(verbosity=1)
| true | true |
f7348b73c872e7766dc0330b5f59a67197219de5 | 287 | py | Python | runtool/tests/perform_doctest.py | arangatang/gluon-ts-tools | 26509b853ddf1039993779f6049eafd4ec434ff7 | [
"Apache-2.0"
] | null | null | null | runtool/tests/perform_doctest.py | arangatang/gluon-ts-tools | 26509b853ddf1039993779f6049eafd4ec434ff7 | [
"Apache-2.0"
] | null | null | null | runtool/tests/perform_doctest.py | arangatang/gluon-ts-tools | 26509b853ddf1039993779f6049eafd4ec434ff7 | [
"Apache-2.0"
] | null | null | null | from doctest import testmod
from runtool import (
datatypes,
recurse_config,
runtool,
transformations,
transformer,
utils,
)
for module in (
datatypes,
recurse_config,
runtool,
transformations,
transformer,
utils,
):
testmod(module)
| 13.666667 | 27 | 0.651568 | from doctest import testmod
from runtool import (
datatypes,
recurse_config,
runtool,
transformations,
transformer,
utils,
)
for module in (
datatypes,
recurse_config,
runtool,
transformations,
transformer,
utils,
):
testmod(module)
| true | true |
f7348b7b44390ac77bda8f14895ef3bb4225a196 | 2,553 | py | Python | monai/networks/nets/__init__.py | IntroAI-termproject/MONAI | b9e0cca17241318e570021b258b181a15d567603 | [
"Apache-2.0"
] | null | null | null | monai/networks/nets/__init__.py | IntroAI-termproject/MONAI | b9e0cca17241318e570021b258b181a15d567603 | [
"Apache-2.0"
] | null | null | null | monai/networks/nets/__init__.py | IntroAI-termproject/MONAI | b9e0cca17241318e570021b258b181a15d567603 | [
"Apache-2.0"
] | 1 | 2021-11-14T06:54:44.000Z | 2021-11-14T06:54:44.000Z | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ahnet import AHnet, Ahnet, AHNet
from .autoencoder import AutoEncoder
from .basic_unet import BasicUNet, BasicUnet, Basicunet, basicunet
from .classifier import Classifier, Critic, Discriminator
from .densenet import (
DenseNet,
Densenet,
DenseNet121,
Densenet121,
DenseNet169,
Densenet169,
DenseNet201,
Densenet201,
DenseNet264,
Densenet264,
densenet121,
densenet169,
densenet201,
densenet264,
)
from .dynunet import DynUNet, DynUnet, Dynunet
from .efficientnet import (
BlockArgs,
EfficientNet,
EfficientNetBN,
EfficientNetBNFeatures,
drop_connect,
get_efficientnet_image_size,
)
from .fullyconnectednet import FullyConnectedNet, VarFullyConnectedNet
from .generator import Generator
from .highresnet import HighResBlock, HighResNet
from .milmodel import MILModel
from .netadapter import NetAdapter
from .regressor import Regressor
from .regunet import GlobalNet, LocalNet, RegUNet
from .resnet import ResNet, resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200
from .segresnet import SegResNet, SegResNetVAE
from .senet import (
SENet,
SEnet,
Senet,
SENet154,
SEnet154,
Senet154,
SEResNet50,
SEresnet50,
Seresnet50,
SEResNet101,
SEresnet101,
Seresnet101,
SEResNet152,
SEresnet152,
Seresnet152,
SEResNext50,
SEResNeXt50,
SEresnext50,
Seresnext50,
SEResNext101,
SEResNeXt101,
SEresnext101,
Seresnext101,
senet154,
seresnet50,
seresnet101,
seresnet152,
seresnext50,
seresnext101,
)
from .torchvision_fc import TorchVisionFCModel, TorchVisionFullyConvModel
from .transchex import BertAttention, BertMixedLayer, BertOutput, BertPreTrainedModel, MultiModal, Pooler, Transchex
from .unet import UNet, Unet
from .unetr import UNETR
from .varautoencoder import VarAutoEncoder
from .vit import ViT
from .vitautoenc import ViTAutoEnc
from .vnet import VNet
| 28.685393 | 116 | 0.757148 |
from .ahnet import AHnet, Ahnet, AHNet
from .autoencoder import AutoEncoder
from .basic_unet import BasicUNet, BasicUnet, Basicunet, basicunet
from .classifier import Classifier, Critic, Discriminator
from .densenet import (
DenseNet,
Densenet,
DenseNet121,
Densenet121,
DenseNet169,
Densenet169,
DenseNet201,
Densenet201,
DenseNet264,
Densenet264,
densenet121,
densenet169,
densenet201,
densenet264,
)
from .dynunet import DynUNet, DynUnet, Dynunet
from .efficientnet import (
BlockArgs,
EfficientNet,
EfficientNetBN,
EfficientNetBNFeatures,
drop_connect,
get_efficientnet_image_size,
)
from .fullyconnectednet import FullyConnectedNet, VarFullyConnectedNet
from .generator import Generator
from .highresnet import HighResBlock, HighResNet
from .milmodel import MILModel
from .netadapter import NetAdapter
from .regressor import Regressor
from .regunet import GlobalNet, LocalNet, RegUNet
from .resnet import ResNet, resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200
from .segresnet import SegResNet, SegResNetVAE
from .senet import (
SENet,
SEnet,
Senet,
SENet154,
SEnet154,
Senet154,
SEResNet50,
SEresnet50,
Seresnet50,
SEResNet101,
SEresnet101,
Seresnet101,
SEResNet152,
SEresnet152,
Seresnet152,
SEResNext50,
SEResNeXt50,
SEresnext50,
Seresnext50,
SEResNext101,
SEResNeXt101,
SEresnext101,
Seresnext101,
senet154,
seresnet50,
seresnet101,
seresnet152,
seresnext50,
seresnext101,
)
from .torchvision_fc import TorchVisionFCModel, TorchVisionFullyConvModel
from .transchex import BertAttention, BertMixedLayer, BertOutput, BertPreTrainedModel, MultiModal, Pooler, Transchex
from .unet import UNet, Unet
from .unetr import UNETR
from .varautoencoder import VarAutoEncoder
from .vit import ViT
from .vitautoenc import ViTAutoEnc
from .vnet import VNet
| true | true |
f7348be2dc59bceb8e5112a0f8fb8a4054ef9388 | 3,914 | py | Python | tests/system/action/motion_submitter/test_create.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | tests/system/action/motion_submitter/test_create.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | tests/system/action/motion_submitter/test_create.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | from tests.system.action.base import BaseActionTestCase
class MotionSubmitterCreateActionTest(BaseActionTestCase):
def test_create(self) -> None:
self.create_model("meeting/111", {"name": "name_m123etrd"})
self.create_model("motion/357", {"title": "title_YIDYXmKj", "meeting_id": 111})
self.create_model(
"user/78", {"username": "username_loetzbfg", "meeting_id": 111}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_submitter.create",
"data": [{"motion_id": 357, "user_id": 78}],
}
],
)
self.assert_status_code(response, 200)
model = self.get_model("motion_submitter/1")
assert model.get("motion_id") == 357
assert model.get("user_id") == 78
assert model.get("weight") == 10000
def test_create_not_unique(self) -> None:
self.create_model("meeting/111", {"name": "name_m123etrd"})
self.create_model("motion/357", {"title": "title_YIDYXmKj", "meeting_id": 111})
self.create_model(
"user/78", {"username": "username_loetzbfg", "meeting_id": 111}
)
self.create_model(
"motion_submitter/12", {"motion_id": 357, "user_id": 78, "meeting_id": 111}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_submitter.create",
"data": [{"motion_id": 357, "user_id": 78}],
}
],
)
self.assert_status_code(response, 400)
assert "(user_id, motion_id) must be unique." in str(response.data)
def test_create_empty_data(self) -> None:
response = self.client.post(
"/",
json=[{"action": "motion_submitter.create", "data": [{}]}],
)
self.assert_status_code(response, 400)
self.assertIn(
"data must contain [\\'motion_id\\', \\'user_id\\'] properties",
str(response.data),
)
def test_create_wrong_field(self) -> None:
self.create_model("meeting/111", {"name": "name_m123etrd"})
self.create_model("motion/357", {"title": "title_YIDYXmKj", "meeting_id": 111})
self.create_model(
"user/78", {"username": "username_lskeuebe", "meeting_id": 111}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_submitter.create",
"data": [
{
"motion_id": 357,
"user_id": 78,
"wrong_field": "text_AefohteiF8",
}
],
}
],
)
self.assert_status_code(response, 400)
self.assertIn(
"data must not contain {\\'wrong_field\\'} properties",
str(response.data),
)
def test_create_not_matching_meeting_ids(self) -> None:
self.create_model("meeting/111", {"name": "name_m123etrd"})
self.create_model("meeting/112", {"name": "name_ewadetrd"})
self.create_model("motion/357", {"title": "title_YIDYXmKj", "meeting_id": 111})
self.create_model(
"user/78", {"username": "username_loetzbfg", "meeting_id": 112}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_submitter.create",
"data": [{"motion_id": 357, "user_id": 78}],
}
],
)
self.assert_status_code(response, 400)
self.assertIn(
"Cannot create motion_submitter, meeting id of motion and (temporary) user don\\'t match.",
str(response.data),
)
| 36.924528 | 103 | 0.505621 | from tests.system.action.base import BaseActionTestCase
class MotionSubmitterCreateActionTest(BaseActionTestCase):
def test_create(self) -> None:
self.create_model("meeting/111", {"name": "name_m123etrd"})
self.create_model("motion/357", {"title": "title_YIDYXmKj", "meeting_id": 111})
self.create_model(
"user/78", {"username": "username_loetzbfg", "meeting_id": 111}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_submitter.create",
"data": [{"motion_id": 357, "user_id": 78}],
}
],
)
self.assert_status_code(response, 200)
model = self.get_model("motion_submitter/1")
assert model.get("motion_id") == 357
assert model.get("user_id") == 78
assert model.get("weight") == 10000
def test_create_not_unique(self) -> None:
self.create_model("meeting/111", {"name": "name_m123etrd"})
self.create_model("motion/357", {"title": "title_YIDYXmKj", "meeting_id": 111})
self.create_model(
"user/78", {"username": "username_loetzbfg", "meeting_id": 111}
)
self.create_model(
"motion_submitter/12", {"motion_id": 357, "user_id": 78, "meeting_id": 111}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_submitter.create",
"data": [{"motion_id": 357, "user_id": 78}],
}
],
)
self.assert_status_code(response, 400)
assert "(user_id, motion_id) must be unique." in str(response.data)
def test_create_empty_data(self) -> None:
response = self.client.post(
"/",
json=[{"action": "motion_submitter.create", "data": [{}]}],
)
self.assert_status_code(response, 400)
self.assertIn(
"data must contain [\\'motion_id\\', \\'user_id\\'] properties",
str(response.data),
)
def test_create_wrong_field(self) -> None:
self.create_model("meeting/111", {"name": "name_m123etrd"})
self.create_model("motion/357", {"title": "title_YIDYXmKj", "meeting_id": 111})
self.create_model(
"user/78", {"username": "username_lskeuebe", "meeting_id": 111}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_submitter.create",
"data": [
{
"motion_id": 357,
"user_id": 78,
"wrong_field": "text_AefohteiF8",
}
],
}
],
)
self.assert_status_code(response, 400)
self.assertIn(
"data must not contain {\\'wrong_field\\'} properties",
str(response.data),
)
def test_create_not_matching_meeting_ids(self) -> None:
self.create_model("meeting/111", {"name": "name_m123etrd"})
self.create_model("meeting/112", {"name": "name_ewadetrd"})
self.create_model("motion/357", {"title": "title_YIDYXmKj", "meeting_id": 111})
self.create_model(
"user/78", {"username": "username_loetzbfg", "meeting_id": 112}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_submitter.create",
"data": [{"motion_id": 357, "user_id": 78}],
}
],
)
self.assert_status_code(response, 400)
self.assertIn(
"Cannot create motion_submitter, meeting id of motion and (temporary) user don\\'t match.",
str(response.data),
)
| true | true |
f7348e3c875c7342813afe5b8cb4f7712c3f88d3 | 4,700 | py | Python | ci-wrappers/pythonpreview/wrapper/swagger_server/controllers/device_controller.py | gregga/iot-sdks-e2e-fx | 02d0ce0823f0190ef26ccfbf3fd3ba7ccde8d6cb | [
"MIT"
] | null | null | null | ci-wrappers/pythonpreview/wrapper/swagger_server/controllers/device_controller.py | gregga/iot-sdks-e2e-fx | 02d0ce0823f0190ef26ccfbf3fd3ba7ccde8d6cb | [
"MIT"
] | null | null | null | ci-wrappers/pythonpreview/wrapper/swagger_server/controllers/device_controller.py | gregga/iot-sdks-e2e-fx | 02d0ce0823f0190ef26ccfbf3fd3ba7ccde8d6cb | [
"MIT"
] | null | null | null | import connexion
import six
from swagger_server.models.certificate import Certificate # noqa: E501
from swagger_server.models.connect_response import ConnectResponse # noqa: E501
from swagger_server.models.roundtrip_method_call_body import RoundtripMethodCallBody # noqa: E501
from swagger_server import util
from device_glue import DeviceGlue
device_glue = DeviceGlue()
def device_connect(transportType, connectionString, caCertificate=None): # noqa: E501
"""Connect to the azure IoT Hub as a device
# noqa: E501
:param transportType: Transport to use
:type transportType: str
:param connectionString: connection string
:type connectionString: str
:param caCertificate:
:type caCertificate: dict | bytes
:rtype: ConnectResponse
"""
if connexion.request.is_json:
caCertificate = Certificate.from_dict(connexion.request.get_json()) # noqa: E501
return device_glue.connect(transportType, connectionString, caCertificate)
def device_disconnect(connectionId): # noqa: E501
"""Disconnect the device
Disconnects from Azure IoTHub service. More specifically, closes all connections and cleans up all resources for the active connection # noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:rtype: None
"""
device_glue.disconnect(connectionId)
def device_enable_c2d_messages(connectionId): # noqa: E501
"""Enable c2d messages
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:rtype: None
"""
device_glue.enable_c2d(connectionId)
def device_enable_methods(connectionId): # noqa: E501
"""Enable methods
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:rtype: None
"""
device_glue.enable_methods(connectionId)
def device_enable_twin(connectionId): # noqa: E501
"""Enable device twins
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:rtype: None
"""
device_glue.enable_twin(connectionId)
def device_get_twin(connectionId): # noqa: E501
"""Get the device twin
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:rtype: object
"""
return device_glue.get_twin(connectionId)
def device_patch_twin(connectionId, props): # noqa: E501
"""Updates the device twin
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:param props:
:type props:
:rtype: None
"""
device_glue.send_twin_patch(connectionId, props)
def device_roundtrip_method_call(connectionId, methodName, requestAndResponse): # noqa: E501
"""Wait for a method call, verify the request, and return the response.
This is a workaround to deal with SDKs that only have method call operations that are sync. This function responds to the method with the payload of this function, and then returns the method parameters. Real-world implemenatations would never do this, but this is the only same way to write our test code right now (because the method handlers for C, Java, and probably Python all return the method response instead of supporting an async method call) # noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:param methodName: name of the method to handle
:type methodName: str
:param requestAndResponse:
:type requestAndResponse: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
requestAndResponse = RoundtripMethodCallBody.from_dict(connexion.request.get_json()) # noqa: E501
return device_glue.roundtrip_method_call(
connectionId, methodName, requestAndResponse
)
def device_send_event(connectionId, eventBody): # noqa: E501
"""Send an event
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:param eventBody:
:type eventBody: str
:rtype: None
"""
device_glue.send_event(connectionId, eventBody)
def device_wait_for_c2d_message(connectionId): # noqa: E501
"""Wait for a c2d message
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:rtype: str
"""
return device_glue.wait_for_c2d_message(connectionId)
def device_wait_for_desired_properties_patch(connectionId): # noqa: E501
"""Wait for the next desired property patch
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:rtype: object
"""
return device_glue.wait_for_desired_property_patch(connectionId)
| 27.325581 | 471 | 0.720213 | import connexion
import six
from swagger_server.models.certificate import Certificate
from swagger_server.models.connect_response import ConnectResponse
from swagger_server.models.roundtrip_method_call_body import RoundtripMethodCallBody
from swagger_server import util
from device_glue import DeviceGlue
device_glue = DeviceGlue()
def device_connect(transportType, connectionString, caCertificate=None):
if connexion.request.is_json:
caCertificate = Certificate.from_dict(connexion.request.get_json())
return device_glue.connect(transportType, connectionString, caCertificate)
def device_disconnect(connectionId):
device_glue.disconnect(connectionId)
def device_enable_c2d_messages(connectionId):
device_glue.enable_c2d(connectionId)
def device_enable_methods(connectionId):
device_glue.enable_methods(connectionId)
def device_enable_twin(connectionId):
device_glue.enable_twin(connectionId)
def device_get_twin(connectionId):
return device_glue.get_twin(connectionId)
def device_patch_twin(connectionId, props):
device_glue.send_twin_patch(connectionId, props)
def device_roundtrip_method_call(connectionId, methodName, requestAndResponse):
if connexion.request.is_json:
requestAndResponse = RoundtripMethodCallBody.from_dict(connexion.request.get_json())
return device_glue.roundtrip_method_call(
connectionId, methodName, requestAndResponse
)
def device_send_event(connectionId, eventBody):
device_glue.send_event(connectionId, eventBody)
def device_wait_for_c2d_message(connectionId):
return device_glue.wait_for_c2d_message(connectionId)
def device_wait_for_desired_properties_patch(connectionId):
return device_glue.wait_for_desired_property_patch(connectionId)
| true | true |
f7348e64372058a41b16653a7078251c30d98d27 | 113 | py | Python | mapapp/admin.py | jmartinm/nostro-web | 22c8c613b7b5930e3a8e0e9f42d5a68917bc1c96 | [
"Unlicense",
"MIT"
] | null | null | null | mapapp/admin.py | jmartinm/nostro-web | 22c8c613b7b5930e3a8e0e9f42d5a68917bc1c96 | [
"Unlicense",
"MIT"
] | null | null | null | mapapp/admin.py | jmartinm/nostro-web | 22c8c613b7b5930e3a8e0e9f42d5a68917bc1c96 | [
"Unlicense",
"MIT"
] | null | null | null | from django.contrib import admin
from mapapp.models import PointOfInterest
admin.site.register(PointOfInterest)
| 22.6 | 41 | 0.858407 | from django.contrib import admin
from mapapp.models import PointOfInterest
admin.site.register(PointOfInterest)
| true | true |
f7348e730198012787fccce820babc5dd0ebbb81 | 15,658 | py | Python | novadocker/tests/virt/docker/test_docker_client.py | praveen1664/Nova-Docker-Integration | 87e578102b0a8487571be1fc5c935b2648820651 | [
"Apache-1.1"
] | 4 | 2019-05-25T10:11:10.000Z | 2019-08-30T17:32:26.000Z | novadocker/tests/virt/docker/test_docker_client.py | praveen1664/Nova-Docker-Integration | 87e578102b0a8487571be1fc5c935b2648820651 | [
"Apache-1.1"
] | null | null | null | novadocker/tests/virt/docker/test_docker_client.py | praveen1664/Nova-Docker-Integration | 87e578102b0a8487571be1fc5c935b2648820651 | [
"Apache-1.1"
] | null | null | null | # Copyright (c) 2013 dotCloud, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mox
import urllib
import uuid
from nova.openstack.common import jsonutils
from nova import test
import novadocker.virt.docker.client as docker_client
class FakeResponse(object):
def __init__(self, status, data='', headers=None):
self.status = status
self._data = data
self._headers = headers or {}
def read(self, _size=None):
return self._data
def getheader(self, key):
return self._headers.get(key)
class DockerHTTPClientTestCase(test.NoDBTestCase):
VERSION = "v1.13"
def make_request(self, *args, **kwargs):
mock_conn = self.mox.CreateMockAnything()
headers = {'Content-Type': 'application/json'}
kwargs['headers'] = headers
# args[1] == path, args[2:] == query represented as tuples
url = "/%s/%s" % (self.VERSION, urllib.quote(args[1]))
if len(args) > 2:
url += "?" + urllib.urlencode(args[2:])
encoded_args = args[0], url
mock_conn.request(*encoded_args, **kwargs)
return mock_conn
def test_list_containers(self):
mock_conn = self.make_request('GET',
'containers/ps', ('all', '1'))
response = FakeResponse(200, data='[]',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
containers = client.list_containers()
self.assertEqual([], containers)
self.mox.VerifyAll()
def test_create_container(self):
mock_conn = self.mox.CreateMockAnything()
expected_uuid = uuid.uuid4()
expected_body = jsonutils.dumps({
'Hostname': '',
'User': '',
'Memory': 0,
'MemorySwap': 0,
'AttachStdin': False,
'AttachStdout': False,
'AttachStderr': False,
'PortSpecs': [],
'Tty': True,
'OpenStdin': True,
'StdinOnce': False,
'Env': None,
'Cmd': [],
'Dns': None,
'Image': None,
'Volumes': {},
'VolumesFrom': '',
})
mock_conn = self.make_request('POST',
'containers/create',
('name', expected_uuid),
body=expected_body)
response = FakeResponse(201, data='{"id": "XXX"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container_id = client.create_container({}, expected_uuid)
self.assertEqual('XXX', container_id)
self.mox.VerifyAll()
def test_create_container_with_args(self):
expected_uuid = uuid.uuid4()
expected_body = jsonutils.dumps({
'Hostname': 'marco',
'User': '',
'Memory': 512,
'MemorySwap': 0,
'AttachStdin': False,
'AttachStdout': False,
'AttachStderr': False,
'PortSpecs': [],
'Tty': True,
'OpenStdin': True,
'StdinOnce': False,
'Env': None,
'Cmd': [],
'Dns': None,
'Image': 'example',
'Volumes': {},
'VolumesFrom': '',
})
mock_conn = self.make_request('POST',
'containers/create',
('name', expected_uuid),
body=expected_body)
response = FakeResponse(201, data='{"id": "XXX"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
args = {
'Hostname': 'marco',
'Memory': 512,
'Image': 'example',
}
container_id = client.create_container(args, expected_uuid)
self.assertEqual('XXX', container_id)
self.mox.VerifyAll()
def test_create_container_no_id_in_response(self):
expected_uuid = uuid.uuid4()
mock_conn = self.make_request('POST',
'containers/create',
('name', expected_uuid),
body=mox.IgnoreArg())
response = FakeResponse(201, data='{"ping": "pong"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container_id = client.create_container({}, expected_uuid)
self.assertIsNone(container_id)
self.mox.VerifyAll()
def test_create_container_bad_return_code(self):
expected_uuid = uuid.uuid4()
mock_conn = self.make_request('POST',
'containers/create',
('name', expected_uuid),
body=mox.IgnoreArg())
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container_id = client.create_container({}, expected_uuid)
self.assertIsNone(container_id)
self.mox.VerifyAll()
def test_start_container(self):
mock_conn = self.make_request('POST', 'containers/XXX/start',
body='{}')
response = FakeResponse(200,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.start_container('XXX'))
self.mox.VerifyAll()
def test_start_container_bad_return_code(self):
mock_conn = self.make_request('POST', 'containers/XXX/start',
body='{}')
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.start_container('XXX'))
self.mox.VerifyAll()
def test_inspect_image(self):
mock_conn = self.make_request('GET', 'images/XXX/json')
response = FakeResponse(200, data='{"name": "XXX"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
image = client.inspect_image('XXX')
self.assertEqual({'name': 'XXX'}, image)
self.mox.VerifyAll()
def test_inspect_image_bad_return_code(self):
mock_conn = self.make_request('GET', 'images/XXX/json')
response = FakeResponse(404)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
image = client.inspect_image('XXX')
self.assertIsNone(image)
self.mox.VerifyAll()
def test_inspect_container(self):
mock_conn = self.make_request('GET', 'containers/XXX/json')
response = FakeResponse(200, data='{"id": "XXX"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container = client.inspect_container('XXX')
self.assertEqual({'id': 'XXX'}, container)
self.mox.VerifyAll()
def test_inspect_container_bad_return_code(self):
mock_conn = self.make_request('GET', 'containers/XXX/json')
response = FakeResponse(404, data='inspect: No such container: XXX',
headers={'Content-Type': 'text/plain'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container = client.inspect_container('XXX')
self.assertEqual({}, container)
self.mox.VerifyAll()
def test_stop_container(self):
mock_conn = self.make_request('POST', 'containers/XXX/stop',
('t', '5'))
response = FakeResponse(204,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.stop_container('XXX'))
self.mox.VerifyAll()
def test_kill_container(self):
mock_conn = self.make_request('POST', 'containers/XXX/kill')
response = FakeResponse(204,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.kill_container('XXX'))
self.mox.VerifyAll()
def test_stop_container_bad_return_code(self):
mock_conn = self.make_request('POST', 'containers/XXX/stop',
('t', '5'))
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.stop_container('XXX'))
self.mox.VerifyAll()
def test_kill_container_bad_return_code(self):
mock_conn = self.make_request('POST', 'containers/XXX/kill')
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.kill_container('XXX'))
self.mox.VerifyAll()
def test_destroy_container(self):
mock_conn = self.make_request('DELETE', 'containers/XXX')
response = FakeResponse(204,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.destroy_container('XXX'))
self.mox.VerifyAll()
def test_destroy_container_bad_return_code(self):
mock_conn = self.make_request('DELETE', 'containers/XXX')
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.destroy_container('XXX'))
self.mox.VerifyAll()
def test_commit_container(self):
mock_conn = self.make_request('POST',
'commit',
('container', 'XXX'),
('repo', 'ping'))
response = FakeResponse(201,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.commit_container('XXX', 'ping'))
self.mox.VerifyAll()
def test_commit_container_bad_return_code(self):
mock_conn = self.make_request('POST',
'commit',
('container', 'XXX'),
('repo', 'ping'))
response = FakeResponse(400,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.commit_container('XXX', 'ping'))
self.mox.VerifyAll()
def test_get_container_logs(self):
url = 'containers/XXX/attach'
mock_conn = self.make_request('POST', url,
('logs', '1'),
('stream', '0'),
('stdout', '1'),
('stderr', '1'))
response = FakeResponse(200, data='ping pong',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
logs = client.get_container_logs('XXX')
self.assertEqual('ping pong', logs)
self.mox.VerifyAll()
def test_get_container_logs_bad_return_code(self):
url = 'containers/XXX/attach'
mock_conn = self.make_request('POST', url,
('logs', '1'),
('stream', '0'),
('stdout', '1'),
('stderr', '1'))
response = FakeResponse(404)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
logs = client.get_container_logs('XXX')
self.assertIsNone(logs)
self.mox.VerifyAll()
def test_get_image(self):
image_id = 'XXX'
data = ["hello world"]
url = 'images/{0}/get'.format(image_id)
headers = {'Content-Type': 'application/json'}
mock_conn = self.make_request('GET', url, headers=headers)
response = FakeResponse(201, data)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
image = client.get_image(image_id)
self.assertIsInstance(image, collections.Iterable)
# Only calling the generator will trigger the GET request.
next(image)
self.mox.VerifyAll()
def test_load_repository(self):
data = ["hello", "world"]
url = 'images/load'
headers = {'Content-Type': 'application/json'}
mock_conn = self.make_request('POST', url, data=data, headers=headers)
response = FakeResponse(200, data)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
client.load_repository('XXX', data)
self.mox.VerifyAll()
| 34.413187 | 78 | 0.568719 |
import collections
import mox
import urllib
import uuid
from nova.openstack.common import jsonutils
from nova import test
import novadocker.virt.docker.client as docker_client
class FakeResponse(object):
def __init__(self, status, data='', headers=None):
self.status = status
self._data = data
self._headers = headers or {}
def read(self, _size=None):
return self._data
def getheader(self, key):
return self._headers.get(key)
class DockerHTTPClientTestCase(test.NoDBTestCase):
VERSION = "v1.13"
def make_request(self, *args, **kwargs):
mock_conn = self.mox.CreateMockAnything()
headers = {'Content-Type': 'application/json'}
kwargs['headers'] = headers
url = "/%s/%s" % (self.VERSION, urllib.quote(args[1]))
if len(args) > 2:
url += "?" + urllib.urlencode(args[2:])
encoded_args = args[0], url
mock_conn.request(*encoded_args, **kwargs)
return mock_conn
def test_list_containers(self):
mock_conn = self.make_request('GET',
'containers/ps', ('all', '1'))
response = FakeResponse(200, data='[]',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
containers = client.list_containers()
self.assertEqual([], containers)
self.mox.VerifyAll()
def test_create_container(self):
mock_conn = self.mox.CreateMockAnything()
expected_uuid = uuid.uuid4()
expected_body = jsonutils.dumps({
'Hostname': '',
'User': '',
'Memory': 0,
'MemorySwap': 0,
'AttachStdin': False,
'AttachStdout': False,
'AttachStderr': False,
'PortSpecs': [],
'Tty': True,
'OpenStdin': True,
'StdinOnce': False,
'Env': None,
'Cmd': [],
'Dns': None,
'Image': None,
'Volumes': {},
'VolumesFrom': '',
})
mock_conn = self.make_request('POST',
'containers/create',
('name', expected_uuid),
body=expected_body)
response = FakeResponse(201, data='{"id": "XXX"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container_id = client.create_container({}, expected_uuid)
self.assertEqual('XXX', container_id)
self.mox.VerifyAll()
def test_create_container_with_args(self):
expected_uuid = uuid.uuid4()
expected_body = jsonutils.dumps({
'Hostname': 'marco',
'User': '',
'Memory': 512,
'MemorySwap': 0,
'AttachStdin': False,
'AttachStdout': False,
'AttachStderr': False,
'PortSpecs': [],
'Tty': True,
'OpenStdin': True,
'StdinOnce': False,
'Env': None,
'Cmd': [],
'Dns': None,
'Image': 'example',
'Volumes': {},
'VolumesFrom': '',
})
mock_conn = self.make_request('POST',
'containers/create',
('name', expected_uuid),
body=expected_body)
response = FakeResponse(201, data='{"id": "XXX"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
args = {
'Hostname': 'marco',
'Memory': 512,
'Image': 'example',
}
container_id = client.create_container(args, expected_uuid)
self.assertEqual('XXX', container_id)
self.mox.VerifyAll()
def test_create_container_no_id_in_response(self):
expected_uuid = uuid.uuid4()
mock_conn = self.make_request('POST',
'containers/create',
('name', expected_uuid),
body=mox.IgnoreArg())
response = FakeResponse(201, data='{"ping": "pong"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container_id = client.create_container({}, expected_uuid)
self.assertIsNone(container_id)
self.mox.VerifyAll()
def test_create_container_bad_return_code(self):
expected_uuid = uuid.uuid4()
mock_conn = self.make_request('POST',
'containers/create',
('name', expected_uuid),
body=mox.IgnoreArg())
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container_id = client.create_container({}, expected_uuid)
self.assertIsNone(container_id)
self.mox.VerifyAll()
def test_start_container(self):
mock_conn = self.make_request('POST', 'containers/XXX/start',
body='{}')
response = FakeResponse(200,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.start_container('XXX'))
self.mox.VerifyAll()
def test_start_container_bad_return_code(self):
mock_conn = self.make_request('POST', 'containers/XXX/start',
body='{}')
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.start_container('XXX'))
self.mox.VerifyAll()
def test_inspect_image(self):
mock_conn = self.make_request('GET', 'images/XXX/json')
response = FakeResponse(200, data='{"name": "XXX"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
image = client.inspect_image('XXX')
self.assertEqual({'name': 'XXX'}, image)
self.mox.VerifyAll()
def test_inspect_image_bad_return_code(self):
mock_conn = self.make_request('GET', 'images/XXX/json')
response = FakeResponse(404)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
image = client.inspect_image('XXX')
self.assertIsNone(image)
self.mox.VerifyAll()
def test_inspect_container(self):
mock_conn = self.make_request('GET', 'containers/XXX/json')
response = FakeResponse(200, data='{"id": "XXX"}',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container = client.inspect_container('XXX')
self.assertEqual({'id': 'XXX'}, container)
self.mox.VerifyAll()
def test_inspect_container_bad_return_code(self):
mock_conn = self.make_request('GET', 'containers/XXX/json')
response = FakeResponse(404, data='inspect: No such container: XXX',
headers={'Content-Type': 'text/plain'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
container = client.inspect_container('XXX')
self.assertEqual({}, container)
self.mox.VerifyAll()
def test_stop_container(self):
mock_conn = self.make_request('POST', 'containers/XXX/stop',
('t', '5'))
response = FakeResponse(204,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.stop_container('XXX'))
self.mox.VerifyAll()
def test_kill_container(self):
mock_conn = self.make_request('POST', 'containers/XXX/kill')
response = FakeResponse(204,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.kill_container('XXX'))
self.mox.VerifyAll()
def test_stop_container_bad_return_code(self):
mock_conn = self.make_request('POST', 'containers/XXX/stop',
('t', '5'))
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.stop_container('XXX'))
self.mox.VerifyAll()
def test_kill_container_bad_return_code(self):
mock_conn = self.make_request('POST', 'containers/XXX/kill')
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.kill_container('XXX'))
self.mox.VerifyAll()
def test_destroy_container(self):
mock_conn = self.make_request('DELETE', 'containers/XXX')
response = FakeResponse(204,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.destroy_container('XXX'))
self.mox.VerifyAll()
def test_destroy_container_bad_return_code(self):
mock_conn = self.make_request('DELETE', 'containers/XXX')
response = FakeResponse(400)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.destroy_container('XXX'))
self.mox.VerifyAll()
def test_commit_container(self):
mock_conn = self.make_request('POST',
'commit',
('container', 'XXX'),
('repo', 'ping'))
response = FakeResponse(201,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(True, client.commit_container('XXX', 'ping'))
self.mox.VerifyAll()
def test_commit_container_bad_return_code(self):
mock_conn = self.make_request('POST',
'commit',
('container', 'XXX'),
('repo', 'ping'))
response = FakeResponse(400,
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
self.assertEqual(False, client.commit_container('XXX', 'ping'))
self.mox.VerifyAll()
def test_get_container_logs(self):
url = 'containers/XXX/attach'
mock_conn = self.make_request('POST', url,
('logs', '1'),
('stream', '0'),
('stdout', '1'),
('stderr', '1'))
response = FakeResponse(200, data='ping pong',
headers={'Content-Type': 'application/json'})
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
logs = client.get_container_logs('XXX')
self.assertEqual('ping pong', logs)
self.mox.VerifyAll()
def test_get_container_logs_bad_return_code(self):
url = 'containers/XXX/attach'
mock_conn = self.make_request('POST', url,
('logs', '1'),
('stream', '0'),
('stdout', '1'),
('stderr', '1'))
response = FakeResponse(404)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
logs = client.get_container_logs('XXX')
self.assertIsNone(logs)
self.mox.VerifyAll()
def test_get_image(self):
image_id = 'XXX'
data = ["hello world"]
url = 'images/{0}/get'.format(image_id)
headers = {'Content-Type': 'application/json'}
mock_conn = self.make_request('GET', url, headers=headers)
response = FakeResponse(201, data)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
image = client.get_image(image_id)
self.assertIsInstance(image, collections.Iterable)
next(image)
self.mox.VerifyAll()
def test_load_repository(self):
data = ["hello", "world"]
url = 'images/load'
headers = {'Content-Type': 'application/json'}
mock_conn = self.make_request('POST', url, data=data, headers=headers)
response = FakeResponse(200, data)
mock_conn.getresponse().AndReturn(response)
self.mox.ReplayAll()
client = docker_client.DockerHTTPClient(mock_conn)
client.load_repository('XXX', data)
self.mox.VerifyAll()
| true | true |
f7348eebd36027072943cbaa88c43983c1e90c2d | 3,666 | py | Python | data/coco_pose/ref.py | CenIII/pose-ae-train | 8780ba9f3d80ca3a724bbee7b815073adc3d3e6e | [
"BSD-3-Clause"
] | null | null | null | data/coco_pose/ref.py | CenIII/pose-ae-train | 8780ba9f3d80ca3a724bbee7b815073adc3d3e6e | [
"BSD-3-Clause"
] | null | null | null | data/coco_pose/ref.py | CenIII/pose-ae-train | 8780ba9f3d80ca3a724bbee7b815073adc3d3e6e | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pickle
import h5py
from scipy.misc import imread
import os
from pycocotools.coco import COCO
from pycocotools import mask
data_dir = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/images'
ann_path = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/annotations/person_keypoints_train2014.json'
ref_dir = os.path.dirname(__file__)
assert os.path.exists(data_dir)
assert os.path.exists(ann_path)
coco, img_ids, num_examples = None, None, None
with open(ref_dir + '/valid_id', 'r') as f:
valid_id = list(map(lambda x:int(x.strip()), f.readlines()))
valid_id_set = set(valid_id)
def init():
global coco, img_ids, num_examples
ann_file = os.path.join(ann_path)
coco = COCO(ann_file)
img_ids = coco.getImgIds()
num_examples = len(img_ids)
# num_parts = 17
# part_mask = np.array([0,0,0,0,0,0,0,1,1,1,1,0,0,1,1,1,1])
# part_ref = {'ankle':[15,16],'knee':[13,14],'hip':[11,12],
# 'wrist':[9,10],'elbow':[7,8],'shoulder':[5,6],
# 'face':[0,1,2],'ears':[3,4]}
# part_labels = ['nose','eye_l','eye_r','ear_l','ear_r',
# 'sho_l','sho_r','elb_l','elb_r','wri_l','wri_r',
# 'hip_l','hip_r','kne_l','kne_r','ank_l','ank_r']
# basic_order = ['sho_l','sho_r', 'nose', 'eye_l','eye_r','ear_l',
# 'ear_r','elb_l','elb_r','wri_l','wri_r',
# 'hip_l','hip_r','kne_l','kne_r','ank_l','ank_r']
# pairRef = [
# [1,2],[2,3],[1,3],
# [6,8],[8,10],[12,14],[14,16],
# [7,9],[9,11],[13,15],[15,17],
# [6,7],[12,13],[6,12],[7,13]
# ]
# pairRef = np.array(pairRef) - 1
flipRef = [i-1 for i in [1,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16] ]
# part_idx = {b:a for a, b in enumerate(part_labels)}
# basic_order = [part_idx[i] for i in basic_order]
def initialize(opt):
return
def image_path(idx):
img_info = coco.loadImgs(img_ids[idx])[0]
path = img_info['file_name'].split('_')[1] + '/' + img_info['file_name']
return os.path.join(data_dir, path)
def load_image(idx):
return imread(image_path(idx),mode='RGB')
def num_objects(idx, anns=None, should_greater_than_1 = False):
if anns is None: anns = get_anns(idx)
return len(anns)
def setup_val_split(opt = None):
if coco is None:
return [], []
tmp_idxs = []
for i in range(num_examples):
if num_objects(i, None) > 0:
tmp_idxs += [i]
ref_idxs = np.array(tmp_idxs,dtype=int) #39935 images that # of ppl > 0
### choose image_id from valid_id_set
valid = {}
train = []
for i in ref_idxs:
if img_ids[i] in valid_id_set:
valid[ img_ids[i] ]=i
else:
train.append(i)
return np.array(train), np.array([valid[i] for i in valid_id if i in valid])
def get_anns(idx):
ann_ids = coco.getAnnIds(imgIds=img_ids[idx])
tmp_ann = coco.loadAnns(ann_ids)
# Filter tmp_ann for people with no keypoints annotated
return [tmp_ann[i] for i in range(len(tmp_ann)) if tmp_ann[i]['num_keypoints'] > 0]
def get_mask(idx):
ann_ids = coco.getAnnIds(imgIds=img_ids[idx])
anns = coco.loadAnns(ann_ids)
img = coco.loadImgs(img_ids[idx])[0]
m = np.zeros((img['height'], img['width']))
for j in anns:
if j['iscrowd']:
rle = mask.frPyObjects(j['segmentation'], img['height'], img['width'])
m += mask.decode(rle)
return m < 0.5
def get_keypoints(idx, anns=None):
if anns is None: anns = get_anns(idx)
num_people = num_objects(idx, anns)
kps = np.zeros((num_people, 17, 3))
for i in range(num_people):
kps[i] = np.array(anns[i]['keypoints']).reshape([-1,3])
return kps
| 32.157895 | 112 | 0.61784 | import numpy as np
import pickle
import h5py
from scipy.misc import imread
import os
from pycocotools.coco import COCO
from pycocotools import mask
data_dir = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/images'
ann_path = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/annotations/person_keypoints_train2014.json'
ref_dir = os.path.dirname(__file__)
assert os.path.exists(data_dir)
assert os.path.exists(ann_path)
coco, img_ids, num_examples = None, None, None
with open(ref_dir + '/valid_id', 'r') as f:
valid_id = list(map(lambda x:int(x.strip()), f.readlines()))
valid_id_set = set(valid_id)
def init():
global coco, img_ids, num_examples
ann_file = os.path.join(ann_path)
coco = COCO(ann_file)
img_ids = coco.getImgIds()
num_examples = len(img_ids)
flipRef = [i-1 for i in [1,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16] ]
def initialize(opt):
return
def image_path(idx):
img_info = coco.loadImgs(img_ids[idx])[0]
path = img_info['file_name'].split('_')[1] + '/' + img_info['file_name']
return os.path.join(data_dir, path)
def load_image(idx):
return imread(image_path(idx),mode='RGB')
def num_objects(idx, anns=None, should_greater_than_1 = False):
if anns is None: anns = get_anns(idx)
return len(anns)
def setup_val_split(opt = None):
if coco is None:
return [], []
tmp_idxs = []
for i in range(num_examples):
if num_objects(i, None) > 0:
tmp_idxs += [i]
ref_idxs = np.array(tmp_idxs,dtype=int) n valid_id_set:
valid[ img_ids[i] ]=i
else:
train.append(i)
return np.array(train), np.array([valid[i] for i in valid_id if i in valid])
def get_anns(idx):
ann_ids = coco.getAnnIds(imgIds=img_ids[idx])
tmp_ann = coco.loadAnns(ann_ids)
return [tmp_ann[i] for i in range(len(tmp_ann)) if tmp_ann[i]['num_keypoints'] > 0]
def get_mask(idx):
ann_ids = coco.getAnnIds(imgIds=img_ids[idx])
anns = coco.loadAnns(ann_ids)
img = coco.loadImgs(img_ids[idx])[0]
m = np.zeros((img['height'], img['width']))
for j in anns:
if j['iscrowd']:
rle = mask.frPyObjects(j['segmentation'], img['height'], img['width'])
m += mask.decode(rle)
return m < 0.5
def get_keypoints(idx, anns=None):
if anns is None: anns = get_anns(idx)
num_people = num_objects(idx, anns)
kps = np.zeros((num_people, 17, 3))
for i in range(num_people):
kps[i] = np.array(anns[i]['keypoints']).reshape([-1,3])
return kps
| true | true |
f7348f533855fcbaa041326c34a9d63cb3f767f6 | 1,296 | py | Python | setup.py | leenr/gzip-stream | 5161a19b67015d819109666f53fd2152f1e7029f | [
"CC0-1.0"
] | 17 | 2020-04-06T18:27:06.000Z | 2022-01-03T21:25:36.000Z | setup.py | leenr/gzip-stream | 5161a19b67015d819109666f53fd2152f1e7029f | [
"CC0-1.0"
] | 4 | 2021-02-24T12:56:41.000Z | 2021-12-08T17:00:17.000Z | setup.py | leenr/gzip-stream | 5161a19b67015d819109666f53fd2152f1e7029f | [
"CC0-1.0"
] | 5 | 2019-08-07T12:57:30.000Z | 2021-12-02T11:37:53.000Z | from setuptools import setup
setup(
name='gzip-stream',
version='1.2.0',
py_modules=['gzip_stream'],
provides=['gzip_stream'],
description='Compress stream by GZIP on the fly.',
long_description=open('README.rst').read(),
keywords=['gzip', 'compression'],
url='https://github.com/leenr/gzip-stream',
author='leenr',
author_email='i@leenr.me',
maintainer='leenr',
maintainer_email='i@leenr.me',
platforms=['posix'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries'
],
python_requires='~=3.5',
extras_require={
'develop': [
'pytest~=5.0',
'pytest-cov~=2.7',
'pylama~=7.7',
'faker~=2.0'
]
}
)
| 27.574468 | 74 | 0.56713 | from setuptools import setup
setup(
name='gzip-stream',
version='1.2.0',
py_modules=['gzip_stream'],
provides=['gzip_stream'],
description='Compress stream by GZIP on the fly.',
long_description=open('README.rst').read(),
keywords=['gzip', 'compression'],
url='https://github.com/leenr/gzip-stream',
author='leenr',
author_email='i@leenr.me',
maintainer='leenr',
maintainer_email='i@leenr.me',
platforms=['posix'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries'
],
python_requires='~=3.5',
extras_require={
'develop': [
'pytest~=5.0',
'pytest-cov~=2.7',
'pylama~=7.7',
'faker~=2.0'
]
}
)
| true | true |
f7348f8931e9d950c2b7c97398de87e48d6618ce | 14,169 | py | Python | nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py | bopopescu/Trusted-Platform-Module-nova | 20d28ef29daf6fd7a67b37b87ec2561c34b4230b | [
"Apache-2.0"
] | 5 | 2016-04-28T16:20:38.000Z | 2021-04-25T11:19:03.000Z | nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py | bopopescu/Trusted-Platform-Module-nova | 20d28ef29daf6fd7a67b37b87ec2561c34b4230b | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py | bopopescu/Trusted-Platform-Module-nova | 20d28ef29daf6fd7a67b37b87ec2561c34b4230b | [
"Apache-2.0"
] | 5 | 2020-04-08T20:24:45.000Z | 2020-10-05T19:02:13.000Z | # Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import flavors_extraspecs \
as flavorextraspecs_v21
from nova.api.openstack.compute.legacy_v2.contrib import flavorextraspecs \
as flavorextraspecs_v2
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_flavor
def return_create_flavor_extra_specs(context, flavor_id, extra_specs,
*args, **kwargs):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_flavor_extra_specs_item(context, flavor_id, key):
return {key: stub_flavor_extra_specs()[key]}
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
def delete_flavor_extra_specs(context, flavor_id, key):
pass
def stub_flavor_extra_specs():
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
class FlavorsExtraSpecsTestV21(test.TestCase):
bad_request = exception.ValidationError
flavorextraspecs = flavorextraspecs_v21
def _get_request(self, url, use_admin_context=False):
req_url = '/v2/fake/flavors/' + url
return fakes.HTTPRequest.blank(req_url,
use_admin_context=use_admin_context)
def setUp(self):
super(FlavorsExtraSpecsTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.controller = self.flavorextraspecs.FlavorExtraSpecsController()
def test_index(self):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key1': 'value1'})
req = self._get_request('1/os-extra_specs')
with mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db'
) as mock_get:
mock_get.return_value = flavor
res_dict = self.controller.index(req, 1)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_index_no_data(self, mock_get):
flavor = objects.Flavor(flavorid='1', extra_specs={})
mock_get.return_value = flavor
req = self._get_request('1/os-extra_specs')
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_index_flavor_not_found(self, mock_get):
req = self._get_request('1/os-extra_specs',
use_admin_context=True)
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
req, 1)
def test_show(self):
flavor = objects.Flavor(flavorid='1', extra_specs={'key5': 'value5'})
req = self._get_request('1/os-extra_specs/key5')
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.return_value = flavor
res_dict = self.controller.show(req, 1, 'key5')
self.assertEqual('value5', res_dict['key5'])
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_show_spec_not_found(self, mock_get):
mock_get.return_value = objects.Flavor(extra_specs={})
req = self._get_request('1/os-extra_specs/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key6')
def test_not_found_because_flavor(self):
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key5')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, 1, 'key5', body={'key5': 'value5'})
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key5')
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, 1, body={'extra_specs': {'key5': 'value5'}})
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db')
def test_delete(self, mock_get):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key5': 'value5'})
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
mock_get.return_value = flavor
with mock.patch('nova.objects.Flavor.save'):
self.controller.delete(req, 1, 'key5')
def test_delete_no_admin(self):
self.stub_out('nova.objects.flavor._flavor_extra_specs_del',
delete_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key5')
self.assertRaises(exception.Forbidden, self.controller.delete,
req, 1, 'key 5')
def test_delete_spec_not_found(self):
req = self._get_request('1/os-extra_specs/key6',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key6')
def test_create(self):
body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
self.assertEqual(0.5, res_dict['extra_specs']['key2'])
self.assertEqual(5, res_dict['extra_specs']['key3'])
def test_create_no_admin(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs')
self.assertRaises(exception.Forbidden, self.controller.create,
req, 1, body=body)
def test_create_flavor_not_found(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch('nova.objects.Flavor.save',
side_effect=exception.FlavorNotFound(flavor_id='')):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, 1, body=body)
def test_create_flavor_db_duplicate(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch(
'nova.objects.Flavor.save',
side_effect=exception.FlavorExtraSpecUpdateCreateFailed(
id='', retries=10)):
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
req, 1, body=body)
def _test_create_bad_request(self, body):
self.stub_out('nova.objects.flavor._flavor_extra_specs_add',
return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
req, 1, body=body)
def test_create_empty_body(self):
self._test_create_bad_request('')
def test_create_non_dict_extra_specs(self):
self._test_create_bad_request({"extra_specs": "non_dict"})
def test_create_non_string_key(self):
self._test_create_bad_request({"extra_specs": {None: "value1"}})
def test_create_non_string_value(self):
self._test_create_bad_request({"extra_specs": {"key1": None}})
def test_create_zero_length_key(self):
self._test_create_bad_request({"extra_specs": {"": "value1"}})
def test_create_long_key(self):
key = "a" * 256
self._test_create_bad_request({"extra_specs": {key: "value1"}})
def test_create_long_value(self):
value = "a" * 256
self._test_create_bad_request({"extra_specs": {"key1": value}})
def test_create_really_long_integer_value(self):
value = 10 ** 1000
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, 1, body={"extra_specs": {"key1": value}})
def test_create_invalid_specs_key(self):
invalid_keys = ("key1/", "<key>", "$$akey$", "!akey", "")
for key in invalid_keys:
body = {"extra_specs": {key: "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
req, 1, body=body)
@mock.patch('nova.objects.flavor._flavor_extra_specs_add')
def test_create_valid_specs_key(self, mock_flavor_extra_specs):
valid_keys = ("key1", "month.price", "I_am-a Key", "finance:g2")
mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
for key in valid_keys:
body = {"extra_specs": {key: "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs'][key])
@mock.patch('nova.objects.flavor._flavor_extra_specs_add')
def test_update_item(self, mock_add):
mock_add.side_effect = return_create_flavor_extra_specs
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
res_dict = self.controller.update(req, 1, 'key1', body=body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_no_admin(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1')
self.assertRaises(exception.Forbidden, self.controller.update,
req, 1, 'key1', body=body)
def _test_update_item_bad_request(self, body):
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.update,
req, 1, 'key1', body=body)
def test_update_item_empty_body(self):
self._test_update_item_bad_request('')
def test_update_item_too_many_keys(self):
body = {"key1": "value1", "key2": "value2"}
self._test_update_item_bad_request(body)
def test_update_item_non_dict_extra_specs(self):
self._test_update_item_bad_request("non_dict")
def test_update_item_non_string_key(self):
self._test_update_item_bad_request({None: "value1"})
def test_update_item_non_string_value(self):
self._test_update_item_bad_request({"key1": None})
def test_update_item_zero_length_key(self):
self._test_update_item_bad_request({"": "value1"})
def test_update_item_long_key(self):
key = "a" * 256
self._test_update_item_bad_request({key: "value1"})
def test_update_item_long_value(self):
value = "a" * 256
self._test_update_item_bad_request({"key1": value})
def test_update_item_body_uri_mismatch(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/bad', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body=body)
def test_update_flavor_not_found(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
with mock.patch('nova.objects.Flavor.save',
side_effect=exception.FlavorNotFound(flavor_id='')):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, 1, 'key1', body=body)
def test_update_flavor_db_duplicate(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
with mock.patch(
'nova.objects.Flavor.save',
side_effect=exception.FlavorExtraSpecUpdateCreateFailed(
id=1, retries=5)):
self.assertRaises(webob.exc.HTTPConflict, self.controller.update,
req, 1, 'key1', body=body)
def test_update_really_long_integer_value(self):
value = 10 ** 1000
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body={"key1": value})
class FlavorsExtraSpecsTestV2(FlavorsExtraSpecsTestV21):
bad_request = webob.exc.HTTPBadRequest
flavorextraspecs = flavorextraspecs_v2
| 40.598854 | 79 | 0.638154 |
import mock
import webob
from nova.api.openstack.compute import flavors_extraspecs \
as flavorextraspecs_v21
from nova.api.openstack.compute.legacy_v2.contrib import flavorextraspecs \
as flavorextraspecs_v2
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_flavor
def return_create_flavor_extra_specs(context, flavor_id, extra_specs,
*args, **kwargs):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_flavor_extra_specs_item(context, flavor_id, key):
return {key: stub_flavor_extra_specs()[key]}
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
def delete_flavor_extra_specs(context, flavor_id, key):
pass
def stub_flavor_extra_specs():
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
class FlavorsExtraSpecsTestV21(test.TestCase):
bad_request = exception.ValidationError
flavorextraspecs = flavorextraspecs_v21
def _get_request(self, url, use_admin_context=False):
req_url = '/v2/fake/flavors/' + url
return fakes.HTTPRequest.blank(req_url,
use_admin_context=use_admin_context)
def setUp(self):
super(FlavorsExtraSpecsTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.controller = self.flavorextraspecs.FlavorExtraSpecsController()
def test_index(self):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key1': 'value1'})
req = self._get_request('1/os-extra_specs')
with mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db'
) as mock_get:
mock_get.return_value = flavor
res_dict = self.controller.index(req, 1)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_index_no_data(self, mock_get):
flavor = objects.Flavor(flavorid='1', extra_specs={})
mock_get.return_value = flavor
req = self._get_request('1/os-extra_specs')
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_index_flavor_not_found(self, mock_get):
req = self._get_request('1/os-extra_specs',
use_admin_context=True)
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
req, 1)
def test_show(self):
flavor = objects.Flavor(flavorid='1', extra_specs={'key5': 'value5'})
req = self._get_request('1/os-extra_specs/key5')
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.return_value = flavor
res_dict = self.controller.show(req, 1, 'key5')
self.assertEqual('value5', res_dict['key5'])
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_show_spec_not_found(self, mock_get):
mock_get.return_value = objects.Flavor(extra_specs={})
req = self._get_request('1/os-extra_specs/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key6')
def test_not_found_because_flavor(self):
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key5')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, 1, 'key5', body={'key5': 'value5'})
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key5')
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, 1, body={'extra_specs': {'key5': 'value5'}})
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db')
def test_delete(self, mock_get):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key5': 'value5'})
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
mock_get.return_value = flavor
with mock.patch('nova.objects.Flavor.save'):
self.controller.delete(req, 1, 'key5')
def test_delete_no_admin(self):
self.stub_out('nova.objects.flavor._flavor_extra_specs_del',
delete_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key5')
self.assertRaises(exception.Forbidden, self.controller.delete,
req, 1, 'key 5')
def test_delete_spec_not_found(self):
req = self._get_request('1/os-extra_specs/key6',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key6')
def test_create(self):
body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
self.assertEqual(0.5, res_dict['extra_specs']['key2'])
self.assertEqual(5, res_dict['extra_specs']['key3'])
def test_create_no_admin(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs')
self.assertRaises(exception.Forbidden, self.controller.create,
req, 1, body=body)
def test_create_flavor_not_found(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch('nova.objects.Flavor.save',
side_effect=exception.FlavorNotFound(flavor_id='')):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, 1, body=body)
def test_create_flavor_db_duplicate(self):
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch(
'nova.objects.Flavor.save',
side_effect=exception.FlavorExtraSpecUpdateCreateFailed(
id='', retries=10)):
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
req, 1, body=body)
def _test_create_bad_request(self, body):
self.stub_out('nova.objects.flavor._flavor_extra_specs_add',
return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
req, 1, body=body)
def test_create_empty_body(self):
self._test_create_bad_request('')
def test_create_non_dict_extra_specs(self):
self._test_create_bad_request({"extra_specs": "non_dict"})
def test_create_non_string_key(self):
self._test_create_bad_request({"extra_specs": {None: "value1"}})
def test_create_non_string_value(self):
self._test_create_bad_request({"extra_specs": {"key1": None}})
def test_create_zero_length_key(self):
self._test_create_bad_request({"extra_specs": {"": "value1"}})
def test_create_long_key(self):
key = "a" * 256
self._test_create_bad_request({"extra_specs": {key: "value1"}})
def test_create_long_value(self):
value = "a" * 256
self._test_create_bad_request({"extra_specs": {"key1": value}})
def test_create_really_long_integer_value(self):
value = 10 ** 1000
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, 1, body={"extra_specs": {"key1": value}})
def test_create_invalid_specs_key(self):
invalid_keys = ("key1/", "<key>", "$$akey$", "!akey", "")
for key in invalid_keys:
body = {"extra_specs": {key: "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
req, 1, body=body)
@mock.patch('nova.objects.flavor._flavor_extra_specs_add')
def test_create_valid_specs_key(self, mock_flavor_extra_specs):
valid_keys = ("key1", "month.price", "I_am-a Key", "finance:g2")
mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
for key in valid_keys:
body = {"extra_specs": {key: "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs'][key])
@mock.patch('nova.objects.flavor._flavor_extra_specs_add')
def test_update_item(self, mock_add):
mock_add.side_effect = return_create_flavor_extra_specs
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
res_dict = self.controller.update(req, 1, 'key1', body=body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_no_admin(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1')
self.assertRaises(exception.Forbidden, self.controller.update,
req, 1, 'key1', body=body)
def _test_update_item_bad_request(self, body):
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.update,
req, 1, 'key1', body=body)
def test_update_item_empty_body(self):
self._test_update_item_bad_request('')
def test_update_item_too_many_keys(self):
body = {"key1": "value1", "key2": "value2"}
self._test_update_item_bad_request(body)
def test_update_item_non_dict_extra_specs(self):
self._test_update_item_bad_request("non_dict")
def test_update_item_non_string_key(self):
self._test_update_item_bad_request({None: "value1"})
def test_update_item_non_string_value(self):
self._test_update_item_bad_request({"key1": None})
def test_update_item_zero_length_key(self):
self._test_update_item_bad_request({"": "value1"})
def test_update_item_long_key(self):
key = "a" * 256
self._test_update_item_bad_request({key: "value1"})
def test_update_item_long_value(self):
value = "a" * 256
self._test_update_item_bad_request({"key1": value})
def test_update_item_body_uri_mismatch(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/bad', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body=body)
def test_update_flavor_not_found(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
with mock.patch('nova.objects.Flavor.save',
side_effect=exception.FlavorNotFound(flavor_id='')):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, 1, 'key1', body=body)
def test_update_flavor_db_duplicate(self):
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
with mock.patch(
'nova.objects.Flavor.save',
side_effect=exception.FlavorExtraSpecUpdateCreateFailed(
id=1, retries=5)):
self.assertRaises(webob.exc.HTTPConflict, self.controller.update,
req, 1, 'key1', body=body)
def test_update_really_long_integer_value(self):
value = 10 ** 1000
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body={"key1": value})
class FlavorsExtraSpecsTestV2(FlavorsExtraSpecsTestV21):
bad_request = webob.exc.HTTPBadRequest
flavorextraspecs = flavorextraspecs_v2
| true | true |
f7348fb9f03e1d0affa87e3c6d29a87b3da4654e | 866 | py | Python | packages/skills/__init__.py | mattmcd/aea-examples | 1c72faadf8feb9f4b8dda9f17995010c2fb3510e | [
"Apache-2.0"
] | 1 | 2021-07-25T18:50:18.000Z | 2021-07-25T18:50:18.000Z | packages/skills/__init__.py | mattmcd/aea-examples | 1c72faadf8feb9f4b8dda9f17995010c2fb3510e | [
"Apache-2.0"
] | 1 | 2020-02-21T14:28:13.000Z | 2020-03-05T14:53:53.000Z | packages/skills/__init__.py | mattmcd/aea-examples | 1c72faadf8feb9f4b8dda9f17995010c2fb3510e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the skill registry for the AEA framework."""
| 39.363636 | 80 | 0.58776 | true | true | |
f7349006ddb929b24d671bd4b9d7c2745ad72556 | 17,366 | py | Python | python/ccxt/bit2c.py | RusEu/ccxt | d6d2b3e2f54a59d102102ee2858eca4d6702fecc | [
"MIT"
] | 3 | 2021-06-29T16:27:19.000Z | 2021-07-18T08:36:07.000Z | python/ccxt/bit2c.py | RusEu/ccxt | d6d2b3e2f54a59d102102ee2858eca4d6702fecc | [
"MIT"
] | null | null | null | python/ccxt/bit2c.py | RusEu/ccxt | d6d2b3e2f54a59d102102ee2858eca4d6702fecc | [
"MIT"
] | 1 | 2022-01-11T07:39:19.000Z | 2022-01-11T07:39:19.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class bit2c(Exchange):
def describe(self):
return self.deep_extend(super(bit2c, self).describe(), {
'id': 'bit2c',
'name': 'Bit2C',
'countries': ['IL'], # Israel
'rateLimit': 3000,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766119-3593220e-5ece-11e7-8b3a-5a041f6bcc3f.jpg',
'api': 'https://bit2c.co.il',
'www': 'https://www.bit2c.co.il',
'referral': 'https://bit2c.co.il/Aff/63bfed10-e359-420c-ab5a-ad368dab0baf',
'doc': [
'https://www.bit2c.co.il/home/api',
'https://github.com/OferE/bit2c',
],
},
'api': {
'public': {
'get': [
'Exchanges/{pair}/Ticker',
'Exchanges/{pair}/orderbook',
'Exchanges/{pair}/trades',
'Exchanges/{pair}/lasttrades',
],
},
'private': {
'post': [
'Merchant/CreateCheckout',
'Order/AddCoinFundsRequest',
'Order/AddFund',
'Order/AddOrder',
'Order/AddOrderMarketPriceBuy',
'Order/AddOrderMarketPriceSell',
'Order/CancelOrder',
'Order/AddCoinFundsRequest',
'Order/AddStopOrder',
'Payment/GetMyId',
'Payment/Send',
'Payment/Pay',
],
'get': [
'Account/Balance',
'Account/Balance/v2',
'Order/MyOrders',
'Order/GetById',
'Order/AccountHistory',
'Order/OrderHistory',
],
},
},
'markets': {
'BTC/NIS': {'id': 'BtcNis', 'symbol': 'BTC/NIS', 'base': 'BTC', 'quote': 'NIS', 'baseId': 'Btc', 'quoteId': 'Nis'},
'ETH/NIS': {'id': 'EthNis', 'symbol': 'ETH/NIS', 'base': 'ETH', 'quote': 'NIS', 'baseId': 'Eth', 'quoteId': 'Nis'},
'BCH/NIS': {'id': 'BchabcNis', 'symbol': 'BCH/NIS', 'base': 'BCH', 'quote': 'NIS', 'baseId': 'Bchabc', 'quoteId': 'Nis'},
'LTC/NIS': {'id': 'LtcNis', 'symbol': 'LTC/NIS', 'base': 'LTC', 'quote': 'NIS', 'baseId': 'Ltc', 'quoteId': 'Nis'},
'ETC/NIS': {'id': 'EtcNis', 'symbol': 'ETC/NIS', 'base': 'ETC', 'quote': 'NIS', 'baseId': 'Etc', 'quoteId': 'Nis'},
'BTG/NIS': {'id': 'BtgNis', 'symbol': 'BTG/NIS', 'base': 'BTG', 'quote': 'NIS', 'baseId': 'Btg', 'quoteId': 'Nis'},
'BSV/NIS': {'id': 'BchsvNis', 'symbol': 'BSV/NIS', 'base': 'BSV', 'quote': 'NIS', 'baseId': 'Bchsv', 'quoteId': 'Nis'},
'GRIN/NIS': {'id': 'GrinNis', 'symbol': 'GRIN/NIS', 'base': 'GRIN', 'quote': 'NIS', 'baseId': 'Grin', 'quoteId': 'Nis'},
},
'fees': {
'trading': {
'maker': 0.5 / 100,
'taker': 0.5 / 100,
},
},
'options': {
'fetchTradesMethod': 'public_get_exchanges_pair_trades',
},
'exceptions': {
'exact': {
'Please provide valid APIkey': AuthenticationError, # {"error" : "Please provide valid APIkey"}
},
'broad': {
# {"error": "Please provide valid nonce in Request Nonce(1598218490) is not bigger than last nonce(1598218490)."}
# {"error": "Please provide valid nonce in Request UInt64.TryParse failed for nonce :"}
'Please provide valid nonce': InvalidNonce,
'please approve new terms of use on site': PermissionDenied, # {"error" : "please approve new terms of use on site."}
},
},
})
def fetch_balance(self, params={}):
self.load_markets()
balance = self.privateGetAccountBalanceV2(params)
#
# {
# "AVAILABLE_NIS": 0.0,
# "NIS": 0.0,
# "LOCKED_NIS": 0.0,
# "AVAILABLE_BTC": 0.0,
# "BTC": 0.0,
# "LOCKED_BTC": 0.0,
# "AVAILABLE_ETH": 0.0,
# "ETH": 0.0,
# "LOCKED_ETH": 0.0,
# "AVAILABLE_BCHSV": 0.0,
# "BCHSV": 0.0,
# "LOCKED_BCHSV": 0.0,
# "AVAILABLE_BCHABC": 0.0,
# "BCHABC": 0.0,
# "LOCKED_BCHABC": 0.0,
# "AVAILABLE_LTC": 0.0,
# "LTC": 0.0,
# "LOCKED_LTC": 0.0,
# "AVAILABLE_ETC": 0.0,
# "ETC": 0.0,
# "LOCKED_ETC": 0.0,
# "AVAILABLE_BTG": 0.0,
# "BTG": 0.0,
# "LOCKED_BTG": 0.0,
# "AVAILABLE_GRIN": 0.0,
# "GRIN": 0.0,
# "LOCKED_GRIN": 0.0,
# "Fees": {
# "BtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "EthNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BchabcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "LtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "EtcNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BtgNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "LtcBtc": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "BchsvNis": {"FeeMaker": 1.0, "FeeTaker": 1.0},
# "GrinNis": {"FeeMaker": 1.0, "FeeTaker": 1.0}
# }
# }
#
result = {'info': balance}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
account = self.account()
currencyId = self.currency_id(code)
uppercase = currencyId.upper()
if uppercase in balance:
account['free'] = self.safe_string(balance, 'AVAILABLE_' + uppercase)
account['total'] = self.safe_string(balance, uppercase)
result[code] = account
return self.parse_balance(result, False)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
orderbook = self.publicGetExchangesPairOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
ticker = self.publicGetExchangesPairTicker(self.extend(request, params))
timestamp = self.milliseconds()
averagePrice = self.safe_number(ticker, 'av')
baseVolume = self.safe_number(ticker, 'a')
quoteVolume = None
if baseVolume is not None and averagePrice is not None:
quoteVolume = baseVolume * averagePrice
last = self.safe_number(ticker, 'll')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_number(ticker, 'h'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'l'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': averagePrice,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = self.options['fetchTradesMethod']
request = {
'pair': market['id'],
}
if since is not None:
request['date'] = int(since)
if limit is not None:
request['limit'] = limit # max 100000
response = getattr(self, method)(self.extend(request, params))
if isinstance(response, basestring):
raise ExchangeError(response)
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
method = 'privatePostOrderAddOrder'
request = {
'Amount': amount,
'Pair': self.market_id(symbol),
}
if type == 'market':
method += 'MarketPrice' + self.capitalize(side)
else:
request['Price'] = price
request['Total'] = amount * price
request['IsBid'] = (side == 'buy')
response = getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': response['NewOrder']['id'],
}
def cancel_order(self, id, symbol=None, params={}):
request = {
'id': id,
}
return self.privatePostOrderCancelOrder(self.extend(request, params))
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.privateGetOrderMyOrders(self.extend(request, params))
orders = self.safe_value(response, market['id'], {})
asks = self.safe_value(orders, 'ask', [])
bids = self.safe_value(orders, 'bid', [])
return self.parse_orders(self.array_concat(asks, bids), market, since, limit)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'created')
price = self.safe_number(order, 'price')
amount = self.safe_number(order, 'amount')
symbol = None
if market is not None:
symbol = market['symbol']
side = self.safe_value(order, 'type')
if side == 0:
side = 'buy'
elif side == 1:
side = 'sell'
id = self.safe_string(order, 'id')
status = self.safe_string(order, 'status')
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': None,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'info': order,
'average': None,
})
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if limit is not None:
request['take'] = limit
request['take'] = limit
if since is not None:
request['toTime'] = self.ymd(self.milliseconds(), '.')
request['fromTime'] = self.ymd(since, '.')
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privateGetOrderOrderHistory(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
timestamp = None
id = None
priceString = None
amountString = None
orderId = None
feeCost = None
side = None
reference = self.safe_string(trade, 'reference')
if reference is not None:
timestamp = self.safe_timestamp(trade, 'ticks')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'firstAmount')
reference_parts = reference.split('|') # reference contains 'pair|orderId|tradeId'
if market is None:
marketId = self.safe_string(trade, 'pair')
if marketId in self.markets_by_id[marketId]:
market = self.markets_by_id[marketId]
elif reference_parts[0] in self.markets_by_id:
market = self.markets_by_id[reference_parts[0]]
orderId = reference_parts[1]
id = reference_parts[2]
side = self.safe_integer(trade, 'action')
if side == 0:
side = 'buy'
elif side == 1:
side = 'sell'
feeCost = self.safe_number(trade, 'feeAmount')
else:
timestamp = self.safe_timestamp(trade, 'date')
id = self.safe_string(trade, 'tid')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
side = self.safe_value(trade, 'isBid')
if side is not None:
if side:
side = 'buy'
else:
side = 'sell'
symbol = None
if market is not None:
symbol = market['symbol']
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': {
'cost': feeCost,
'currency': 'NIS',
'rate': None,
},
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
url += '.json'
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'nonce': nonce,
}, params)
auth = self.urlencode(query)
if method == 'GET':
if query:
url += '?' + auth
else:
body = auth
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512, 'base64')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'key': self.apiKey,
'sign': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"error" : "please approve new terms of use on site."}
# {"error": "Please provide valid nonce in Request Nonce(1598218490) is not bigger than last nonce(1598218490)."}
#
error = self.safe_string(response, 'error')
if error is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
raise ExchangeError(feedback) # unknown message
| 40.292343 | 138 | 0.488483 |
ge import Exchange
try:
basestring
except NameError:
basestring = str
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class bit2c(Exchange):
def describe(self):
return self.deep_extend(super(bit2c, self).describe(), {
'id': 'bit2c',
'name': 'Bit2C',
'countries': ['IL'],
'rateLimit': 3000,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766119-3593220e-5ece-11e7-8b3a-5a041f6bcc3f.jpg',
'api': 'https://bit2c.co.il',
'www': 'https://www.bit2c.co.il',
'referral': 'https://bit2c.co.il/Aff/63bfed10-e359-420c-ab5a-ad368dab0baf',
'doc': [
'https://www.bit2c.co.il/home/api',
'https://github.com/OferE/bit2c',
],
},
'api': {
'public': {
'get': [
'Exchanges/{pair}/Ticker',
'Exchanges/{pair}/orderbook',
'Exchanges/{pair}/trades',
'Exchanges/{pair}/lasttrades',
],
},
'private': {
'post': [
'Merchant/CreateCheckout',
'Order/AddCoinFundsRequest',
'Order/AddFund',
'Order/AddOrder',
'Order/AddOrderMarketPriceBuy',
'Order/AddOrderMarketPriceSell',
'Order/CancelOrder',
'Order/AddCoinFundsRequest',
'Order/AddStopOrder',
'Payment/GetMyId',
'Payment/Send',
'Payment/Pay',
],
'get': [
'Account/Balance',
'Account/Balance/v2',
'Order/MyOrders',
'Order/GetById',
'Order/AccountHistory',
'Order/OrderHistory',
],
},
},
'markets': {
'BTC/NIS': {'id': 'BtcNis', 'symbol': 'BTC/NIS', 'base': 'BTC', 'quote': 'NIS', 'baseId': 'Btc', 'quoteId': 'Nis'},
'ETH/NIS': {'id': 'EthNis', 'symbol': 'ETH/NIS', 'base': 'ETH', 'quote': 'NIS', 'baseId': 'Eth', 'quoteId': 'Nis'},
'BCH/NIS': {'id': 'BchabcNis', 'symbol': 'BCH/NIS', 'base': 'BCH', 'quote': 'NIS', 'baseId': 'Bchabc', 'quoteId': 'Nis'},
'LTC/NIS': {'id': 'LtcNis', 'symbol': 'LTC/NIS', 'base': 'LTC', 'quote': 'NIS', 'baseId': 'Ltc', 'quoteId': 'Nis'},
'ETC/NIS': {'id': 'EtcNis', 'symbol': 'ETC/NIS', 'base': 'ETC', 'quote': 'NIS', 'baseId': 'Etc', 'quoteId': 'Nis'},
'BTG/NIS': {'id': 'BtgNis', 'symbol': 'BTG/NIS', 'base': 'BTG', 'quote': 'NIS', 'baseId': 'Btg', 'quoteId': 'Nis'},
'BSV/NIS': {'id': 'BchsvNis', 'symbol': 'BSV/NIS', 'base': 'BSV', 'quote': 'NIS', 'baseId': 'Bchsv', 'quoteId': 'Nis'},
'GRIN/NIS': {'id': 'GrinNis', 'symbol': 'GRIN/NIS', 'base': 'GRIN', 'quote': 'NIS', 'baseId': 'Grin', 'quoteId': 'Nis'},
},
'fees': {
'trading': {
'maker': 0.5 / 100,
'taker': 0.5 / 100,
},
},
'options': {
'fetchTradesMethod': 'public_get_exchanges_pair_trades',
},
'exceptions': {
'exact': {
'Please provide valid APIkey': AuthenticationError,
},
'broad': {
'Please provide valid nonce': InvalidNonce,
'please approve new terms of use on site': PermissionDenied,
},
},
})
def fetch_balance(self, params={}):
self.load_markets()
balance = self.privateGetAccountBalanceV2(params)
result = {'info': balance}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
account = self.account()
currencyId = self.currency_id(code)
uppercase = currencyId.upper()
if uppercase in balance:
account['free'] = self.safe_string(balance, 'AVAILABLE_' + uppercase)
account['total'] = self.safe_string(balance, uppercase)
result[code] = account
return self.parse_balance(result, False)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
orderbook = self.publicGetExchangesPairOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
ticker = self.publicGetExchangesPairTicker(self.extend(request, params))
timestamp = self.milliseconds()
averagePrice = self.safe_number(ticker, 'av')
baseVolume = self.safe_number(ticker, 'a')
quoteVolume = None
if baseVolume is not None and averagePrice is not None:
quoteVolume = baseVolume * averagePrice
last = self.safe_number(ticker, 'll')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_number(ticker, 'h'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'l'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': averagePrice,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = self.options['fetchTradesMethod']
request = {
'pair': market['id'],
}
if since is not None:
request['date'] = int(since)
if limit is not None:
request['limit'] = limit
response = getattr(self, method)(self.extend(request, params))
if isinstance(response, basestring):
raise ExchangeError(response)
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
method = 'privatePostOrderAddOrder'
request = {
'Amount': amount,
'Pair': self.market_id(symbol),
}
if type == 'market':
method += 'MarketPrice' + self.capitalize(side)
else:
request['Price'] = price
request['Total'] = amount * price
request['IsBid'] = (side == 'buy')
response = getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': response['NewOrder']['id'],
}
def cancel_order(self, id, symbol=None, params={}):
request = {
'id': id,
}
return self.privatePostOrderCancelOrder(self.extend(request, params))
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.privateGetOrderMyOrders(self.extend(request, params))
orders = self.safe_value(response, market['id'], {})
asks = self.safe_value(orders, 'ask', [])
bids = self.safe_value(orders, 'bid', [])
return self.parse_orders(self.array_concat(asks, bids), market, since, limit)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'created')
price = self.safe_number(order, 'price')
amount = self.safe_number(order, 'amount')
symbol = None
if market is not None:
symbol = market['symbol']
side = self.safe_value(order, 'type')
if side == 0:
side = 'buy'
elif side == 1:
side = 'sell'
id = self.safe_string(order, 'id')
status = self.safe_string(order, 'status')
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': None,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'info': order,
'average': None,
})
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if limit is not None:
request['take'] = limit
request['take'] = limit
if since is not None:
request['toTime'] = self.ymd(self.milliseconds(), '.')
request['fromTime'] = self.ymd(since, '.')
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privateGetOrderOrderHistory(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
timestamp = None
id = None
priceString = None
amountString = None
orderId = None
feeCost = None
side = None
reference = self.safe_string(trade, 'reference')
if reference is not None:
timestamp = self.safe_timestamp(trade, 'ticks')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'firstAmount')
reference_parts = reference.split('|')
if market is None:
marketId = self.safe_string(trade, 'pair')
if marketId in self.markets_by_id[marketId]:
market = self.markets_by_id[marketId]
elif reference_parts[0] in self.markets_by_id:
market = self.markets_by_id[reference_parts[0]]
orderId = reference_parts[1]
id = reference_parts[2]
side = self.safe_integer(trade, 'action')
if side == 0:
side = 'buy'
elif side == 1:
side = 'sell'
feeCost = self.safe_number(trade, 'feeAmount')
else:
timestamp = self.safe_timestamp(trade, 'date')
id = self.safe_string(trade, 'tid')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
side = self.safe_value(trade, 'isBid')
if side is not None:
if side:
side = 'buy'
else:
side = 'sell'
symbol = None
if market is not None:
symbol = market['symbol']
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': {
'cost': feeCost,
'currency': 'NIS',
'rate': None,
},
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
url += '.json'
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'nonce': nonce,
}, params)
auth = self.urlencode(query)
if method == 'GET':
if query:
url += '?' + auth
else:
body = auth
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512, 'base64')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'key': self.apiKey,
'sign': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
error = self.safe_string(response, 'error')
if error is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
raise ExchangeError(feedback)
| true | true |
f7349134fd6962aa9890bab9d791308f56e2fb36 | 1,135 | py | Python | views/users.py | dimzignas/webapi | bec3dbdfd3a26c34a4b4ef01020079918b8d8bec | [
"Apache-2.0"
] | null | null | null | views/users.py | dimzignas/webapi | bec3dbdfd3a26c34a4b4ef01020079918b8d8bec | [
"Apache-2.0"
] | 3 | 2021-03-31T19:29:45.000Z | 2021-12-13T20:31:07.000Z | views/users.py | dimzignas/webapi | bec3dbdfd3a26c34a4b4ef01020079918b8d8bec | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint, request, session, render_template
from models.user import requires_login
user_blueprint = Blueprint('users', __name__)
@user_blueprint.route('/login')
def login_user():
is_logged_in = False if not session.get('email') else True
return render_template("users/login.html", is_logged_in=is_logged_in)
@user_blueprint.route('/register')
def register_user():
is_logged_in = False if not session.get('email') else True
return render_template("users/register.html", is_logged_in=is_logged_in)
@user_blueprint.route('/profile', methods=['GET', 'POST'])
@requires_login
def profile():
is_logged_in = False if not session.get('email') else True
if request.method == 'POST':
uname = request.form['uname']
api_key = request.form['key']
return render_template("users/profile.html", uname=uname, api_key=api_key, is_logged_in=is_logged_in)
return render_template("users/login.html", is_logged_in=is_logged_in)
@user_blueprint.route('/logout')
@requires_login
def logout():
session.pop('email')
return render_template("home.html", is_logged_in=False)
| 31.527778 | 109 | 0.737445 | from flask import Blueprint, request, session, render_template
from models.user import requires_login
user_blueprint = Blueprint('users', __name__)
@user_blueprint.route('/login')
def login_user():
is_logged_in = False if not session.get('email') else True
return render_template("users/login.html", is_logged_in=is_logged_in)
@user_blueprint.route('/register')
def register_user():
is_logged_in = False if not session.get('email') else True
return render_template("users/register.html", is_logged_in=is_logged_in)
@user_blueprint.route('/profile', methods=['GET', 'POST'])
@requires_login
def profile():
is_logged_in = False if not session.get('email') else True
if request.method == 'POST':
uname = request.form['uname']
api_key = request.form['key']
return render_template("users/profile.html", uname=uname, api_key=api_key, is_logged_in=is_logged_in)
return render_template("users/login.html", is_logged_in=is_logged_in)
@user_blueprint.route('/logout')
@requires_login
def logout():
session.pop('email')
return render_template("home.html", is_logged_in=False)
| true | true |
f734914502141e33079acd5522ab88b4195c6a4e | 2,723 | py | Python | dynamicgem/test/test_dynRNN.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | dynamicgem/test/test_dynRNN.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | dynamicgem/test/test_dynRNN.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for testing dynRNN
"""
import os
import matplotlib.pyplot as plt
from dynamicgem.embedding.dynRNN import DynRNN
from dynamicgem.graph_generation import dynamic_SBM_graph as sbm
from dynamicgem.visualization import plot_dynamic_sbm_embedding
from time import time
def test_dynRNN():
# Parameters for Stochastic block model graph
# Todal of 1000 nodes
node_num = 100
# Test with two communities
community_num = 2
# At each iteration migrate 10 nodes from one community to the another
node_change_num = 2
# Length of total time steps the graph will dynamically change
length = 7
# output directory for result
outdir = './output'
intr = './intermediate'
if not os.path.exists(outdir):
os.mkdir(outdir)
if not os.path.exists(intr):
os.mkdir(intr)
testDataType = 'sbm_cd'
# Generate the dynamic graph
dynamic_sbm_series = list(sbm.get_community_diminish_series_v2(node_num,
community_num,
length,
1, # comminity ID to perturb
node_change_num))
graphs = [g[0] for g in dynamic_sbm_series]
# parameters for the dynamic embedding
# dimension of the embedding
dim_emb = 8
lookback = 2
# dynRNN
embedding = DynRNN(d=dim_emb,
beta=5,
n_prev_graphs=lookback,
nu1=1e-6,
nu2=1e-6,
n_enc_units=[500, 300],
n_dec_units=[500, 300],
rho=0.3,
n_iter=2,
xeta=1e-3,
n_batch=100,
modelfile=['./intermediate/enc_model_dynRNN.json',
'./intermediate/dec_model_dynRNN.json'],
weightfile=['./intermediate/enc_weights_dynRNN.hdf5',
'./intermediate/dec_weights_dynRNN.hdf5'],
savefilesuffix="testing")
embs = []
t1 = time()
for temp_var in range(lookback + 1, length + 1):
emb, _ = embedding.learn_embeddings(graphs[:temp_var])
embs.append(emb)
print(embedding._method_name + ':\n\tTraining time: %f' % (time() - t1))
plt.figure()
plt.clf()
plot_dynamic_sbm_embedding.plot_dynamic_sbm_embedding_v2(embs[-5:-1], dynamic_sbm_series[-5:])
plt.show()
if __name__ == '__main__':
main()
| 35.828947 | 98 | 0.539111 |
import os
import matplotlib.pyplot as plt
from dynamicgem.embedding.dynRNN import DynRNN
from dynamicgem.graph_generation import dynamic_SBM_graph as sbm
from dynamicgem.visualization import plot_dynamic_sbm_embedding
from time import time
def test_dynRNN():
node_num = 100
community_num = 2
node_change_num = 2
length = 7
outdir = './output'
intr = './intermediate'
if not os.path.exists(outdir):
os.mkdir(outdir)
if not os.path.exists(intr):
os.mkdir(intr)
testDataType = 'sbm_cd'
dynamic_sbm_series = list(sbm.get_community_diminish_series_v2(node_num,
community_num,
length,
1,
node_change_num))
graphs = [g[0] for g in dynamic_sbm_series]
dim_emb = 8
lookback = 2
embedding = DynRNN(d=dim_emb,
beta=5,
n_prev_graphs=lookback,
nu1=1e-6,
nu2=1e-6,
n_enc_units=[500, 300],
n_dec_units=[500, 300],
rho=0.3,
n_iter=2,
xeta=1e-3,
n_batch=100,
modelfile=['./intermediate/enc_model_dynRNN.json',
'./intermediate/dec_model_dynRNN.json'],
weightfile=['./intermediate/enc_weights_dynRNN.hdf5',
'./intermediate/dec_weights_dynRNN.hdf5'],
savefilesuffix="testing")
embs = []
t1 = time()
for temp_var in range(lookback + 1, length + 1):
emb, _ = embedding.learn_embeddings(graphs[:temp_var])
embs.append(emb)
print(embedding._method_name + ':\n\tTraining time: %f' % (time() - t1))
plt.figure()
plt.clf()
plot_dynamic_sbm_embedding.plot_dynamic_sbm_embedding_v2(embs[-5:-1], dynamic_sbm_series[-5:])
plt.show()
if __name__ == '__main__':
main()
| true | true |
f734952d147b46ad927cefd4be1d128cdb2b51af | 2,737 | py | Python | addons/website_event_track_quiz/controllers/event_track_quiz.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/website_event_track_quiz/controllers/event_track_quiz.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/website_event_track_quiz/controllers/event_track_quiz.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http
from odoo.addons.website_event_track.controllers.event_track import EventTrackController
from odoo.http import request
class WebsiteEventTrackQuiz(EventTrackController):
# QUIZZES IN PAGE
# ----------------------------------------------------------
@http.route('/event_track/quiz/submit', type="json", auth="public", website=True)
def event_track_quiz_submit(self, event_id, track_id, answer_ids):
track = self._fetch_track(track_id)
event_track_visitor = track._get_event_track_visitors(force_create=True)
visitor_sudo = event_track_visitor.visitor_id
if event_track_visitor.quiz_completed:
return {'error': 'track_quiz_done'}
answers_details = self._get_quiz_answers_details(track, answer_ids)
if answers_details.get('error'):
return answers_details
event_track_visitor.write({
'quiz_completed': True,
'quiz_points': answers_details['points'],
})
result = {
'answers': {
answer.question_id.id: {
'is_correct': answer.is_correct,
'comment': answer.comment
} for answer in answers_details['user_answers']
},
'quiz_completed': event_track_visitor.quiz_completed,
'quiz_points': answers_details['points']
}
if visitor_sudo and request.httprequest.cookies.get('visitor_uuid', '') != visitor_sudo.access_token:
result['visitor_uuid'] = visitor_sudo.access_token
return result
@http.route('/event_track/quiz/reset', type="json", auth="user", website=True)
def quiz_reset(self, event_id, track_id):
track = self._fetch_track(track_id)
event_track_visitor = track._get_event_track_visitors(force_create=True)
event_track_visitor.write({
'quiz_completed': False,
'quiz_points': 0,
})
def _get_quiz_answers_details(self, track, answer_ids):
# TDE FIXME: lost sudo
questions_count = request.env['event.quiz.question'].sudo().search_count([('quiz_id', '=', track.sudo().quiz_id.id)])
user_answers = request.env['event.quiz.answer'].sudo().search([('id', 'in', answer_ids)])
if len(user_answers.mapped('question_id')) != questions_count:
return {'error': 'quiz_incomplete'}
return {
'user_answers': user_answers,
'points': sum([
answer.awarded_points
for answer in user_answers.filtered(lambda answer: answer.is_correct)
])
}
| 38.549296 | 125 | 0.624772 |
from odoo import http
from odoo.addons.website_event_track.controllers.event_track import EventTrackController
from odoo.http import request
class WebsiteEventTrackQuiz(EventTrackController):
@http.route('/event_track/quiz/submit', type="json", auth="public", website=True)
def event_track_quiz_submit(self, event_id, track_id, answer_ids):
track = self._fetch_track(track_id)
event_track_visitor = track._get_event_track_visitors(force_create=True)
visitor_sudo = event_track_visitor.visitor_id
if event_track_visitor.quiz_completed:
return {'error': 'track_quiz_done'}
answers_details = self._get_quiz_answers_details(track, answer_ids)
if answers_details.get('error'):
return answers_details
event_track_visitor.write({
'quiz_completed': True,
'quiz_points': answers_details['points'],
})
result = {
'answers': {
answer.question_id.id: {
'is_correct': answer.is_correct,
'comment': answer.comment
} for answer in answers_details['user_answers']
},
'quiz_completed': event_track_visitor.quiz_completed,
'quiz_points': answers_details['points']
}
if visitor_sudo and request.httprequest.cookies.get('visitor_uuid', '') != visitor_sudo.access_token:
result['visitor_uuid'] = visitor_sudo.access_token
return result
@http.route('/event_track/quiz/reset', type="json", auth="user", website=True)
def quiz_reset(self, event_id, track_id):
track = self._fetch_track(track_id)
event_track_visitor = track._get_event_track_visitors(force_create=True)
event_track_visitor.write({
'quiz_completed': False,
'quiz_points': 0,
})
def _get_quiz_answers_details(self, track, answer_ids):
questions_count = request.env['event.quiz.question'].sudo().search_count([('quiz_id', '=', track.sudo().quiz_id.id)])
user_answers = request.env['event.quiz.answer'].sudo().search([('id', 'in', answer_ids)])
if len(user_answers.mapped('question_id')) != questions_count:
return {'error': 'quiz_incomplete'}
return {
'user_answers': user_answers,
'points': sum([
answer.awarded_points
for answer in user_answers.filtered(lambda answer: answer.is_correct)
])
}
| true | true |
f73495a37d6406b3857940888f4e06efccc9a7ee | 1,576 | py | Python | pykbart/writer.py | pybrarian/pykbart | f63318d21171bd116bd2b65dd8b792721fca237f | [
"MIT"
] | 4 | 2018-03-01T16:04:11.000Z | 2020-12-18T15:02:27.000Z | pykbart/writer.py | pybrarian/pykbart | f63318d21171bd116bd2b65dd8b792721fca237f | [
"MIT"
] | null | null | null | pykbart/writer.py | pybrarian/pykbart | f63318d21171bd116bd2b65dd8b792721fca237f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Class and context manager for writing KbartRecord class to csv file."""
# coding: utf-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import contextlib
import six
import unicodecsv as csv
# TODO: make a better way to write the header when working from a reader object
class Writer(object):
"""Write a KbartRecord class to a csv file."""
def __init__(self, file_handle, delimiter='\t'):
"""
Set variables and open the csv writer using utf-8 encoding per
KBART spec.
"""
self.file_handle = file_handle
self.delimiter = delimiter
self.writer = csv.writer(file_handle,
delimiter=self.delimiter,
encoding='utf-8')
def writerow(self, kbart_record):
"""Write csv row from a KbartRecord record."""
self.writer.writerow(list(kbart_record.values()))
def writeheader(self, kbart_record):
self.writer.writerow(kbart_record.fields)
@contextlib.contextmanager
def KbartWriter(file_path, delimiter='\t'):
"""
Context manager for writing a KbartRecord. Written in camel-case to maintain
similarity to PyMARC.
Args:
file_path: The path to the KBART file to be written.
delimiter: KBART spec specifies tab-delimited, leaving this an option
though for the time being
"""
f = open(file_path, 'wb')
try:
yield Writer(f, delimiter=delimiter)
finally:
f.close()
| 29.735849 | 80 | 0.641497 |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import contextlib
import six
import unicodecsv as csv
class Writer(object):
def __init__(self, file_handle, delimiter='\t'):
self.file_handle = file_handle
self.delimiter = delimiter
self.writer = csv.writer(file_handle,
delimiter=self.delimiter,
encoding='utf-8')
def writerow(self, kbart_record):
self.writer.writerow(list(kbart_record.values()))
def writeheader(self, kbart_record):
self.writer.writerow(kbart_record.fields)
@contextlib.contextmanager
def KbartWriter(file_path, delimiter='\t'):
f = open(file_path, 'wb')
try:
yield Writer(f, delimiter=delimiter)
finally:
f.close()
| true | true |
f73495b9d6fabeed2313bbb04c6dae3670bd398d | 2,169 | py | Python | fft_prototype.py | kobeeraveendran/hackfsu5 | 5614d832423f56913bd35d96e2472068a106b376 | [
"MIT"
] | null | null | null | fft_prototype.py | kobeeraveendran/hackfsu5 | 5614d832423f56913bd35d96e2472068a106b376 | [
"MIT"
] | null | null | null | fft_prototype.py | kobeeraveendran/hackfsu5 | 5614d832423f56913bd35d96e2472068a106b376 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from scipy.io import wavfile # get the api
from scipy.fftpack import fft
from pylab import *
def f(filename):
# song files are in ogg... we need it to be in wav.
fs, data = wavfile.read(filename)
# songs have multiple channels, but we only need one channel
a = data.T[0]
# this is 8-bit track, b is now normalized on [-1,1)
#b=[(ele/2**16)*2-1 for ele in a]
# create a list of complex number
c = fft(a)
# only need half of the fft list (because the internet says so)
d = len(c)//2
#bam, it is plotted and saved.
#plt.plot(abs(c[:(d-1)]),'r')
#savefig(filename+'.png',bbox_inches='tight')
return c
guitar = f("auldlangguitar.wav")
violin = f("auldlangviolin.wav")
harmon = f("auldlangharmonica.wav")
combine= f("combined.wav")
cut = combine[:-14]
combined2 = guitar + violin
plt.plot(np.abs(guitar), 'r')
#plt.show()
savefig('guitarplot.png',bbox_inches='tight')
gc = np.dot(guitar, combined2)
vc = np.dot(violin, combined2)
hc = np.dot(harmon, combined2)
ng = guitar #/ np.linalg.norm(guitar)
nv = violin #/ np.linalg.norm(violin)
nh = harmon #/ np.linalg.norm(harmon)
nc = combined2 #/ np.linalg.norm(cut)
a = np.column_stack((ng, nv, nh))
x, res, rank, s = np.linalg.lstsq(a, nc)
plt.plot(np.abs(ng * x[0]), 'r')
#plt.show()
savefig('decompguitarplot.png',bbox_inches='tight')
decompGuitar = np.fft.ifft(ng * 1 + nv *1)
print("X\n")
print(x)
print("decomp real")
print(np.real(decompGuitar))
test = np.fft.ifft(guitar)
decompreal = (decompGuitar)
decompreal = decompreal #/ np.min(np.abs(decompreal[np.nonzero(decompreal)]))
origfs, origdata = wavfile.read("auldlangguitar.wav")
b = np.column_stack((decompGuitar.astype(origdata.dtype), decompGuitar.astype(origdata.dtype)))
wavfile.write("decompguitar.wav", origfs, b)
np.savetxt("guitar.csv", test.astype(uint8) , delimiter= ",")
np.savetxt("combined.csv", combine, delimiter= ",")
np.savetxt("channel2.csv", decompreal.astype(uint8), delimiter= ",")
print("decomp orig")
print(np.min(decompreal[np.nonzero(decompreal)]))
| 28.92 | 96 | 0.656985 | import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.fftpack import fft
from pylab import *
def f(filename):
fs, data = wavfile.read(filename)
a = data.T[0]
c = fft(a)
d = len(c)//2
return c
guitar = f("auldlangguitar.wav")
violin = f("auldlangviolin.wav")
harmon = f("auldlangharmonica.wav")
combine= f("combined.wav")
cut = combine[:-14]
combined2 = guitar + violin
plt.plot(np.abs(guitar), 'r')
savefig('guitarplot.png',bbox_inches='tight')
gc = np.dot(guitar, combined2)
vc = np.dot(violin, combined2)
hc = np.dot(harmon, combined2)
ng = guitar
nv = violin
nh = harmon
nc = combined2
a = np.column_stack((ng, nv, nh))
x, res, rank, s = np.linalg.lstsq(a, nc)
plt.plot(np.abs(ng * x[0]), 'r')
savefig('decompguitarplot.png',bbox_inches='tight')
decompGuitar = np.fft.ifft(ng * 1 + nv *1)
print("X\n")
print(x)
print("decomp real")
print(np.real(decompGuitar))
test = np.fft.ifft(guitar)
decompreal = (decompGuitar)
decompreal = decompreal
origfs, origdata = wavfile.read("auldlangguitar.wav")
b = np.column_stack((decompGuitar.astype(origdata.dtype), decompGuitar.astype(origdata.dtype)))
wavfile.write("decompguitar.wav", origfs, b)
np.savetxt("guitar.csv", test.astype(uint8) , delimiter= ",")
np.savetxt("combined.csv", combine, delimiter= ",")
np.savetxt("channel2.csv", decompreal.astype(uint8), delimiter= ",")
print("decomp orig")
print(np.min(decompreal[np.nonzero(decompreal)]))
| true | true |
f734970f81bbc8b2ca03df98f26a41b1af58a139 | 1,311 | py | Python | prompty.py | tnhung2011/WinPrompty | 8894588a15c5381f18e35420d6030ea6758b2a83 | [
"MIT"
] | null | null | null | prompty.py | tnhung2011/WinPrompty | 8894588a15c5381f18e35420d6030ea6758b2a83 | [
"MIT"
] | null | null | null | prompty.py | tnhung2011/WinPrompty | 8894588a15c5381f18e35420d6030ea6758b2a83 | [
"MIT"
] | null | null | null | import platform
from sys import exit
from os import system
from sys.stdin import isatty
from os.path import dirname, realpath
from colorama import init, Fore, Back
from tkinter.messagebox import showerror
if platform.system() is 'Windows':
__vista = platform.release == 'Vista'
__supported = platform.release >= '6'
if __vista or __supported:
init()
if not isatty:
system("title WinPrompty")
system("cmd /Q /D /C @echo off^&set ResetFore={Fore.RESET}^&set RedFore={Fore.RED}^&set GreenFore={Fore.GREEN}^&set YellowFore={Fore.YELLOW}^&set BlueFore={Fore.BLUE}^&set MagentaFore={Fore.MAGENTA}^&set CyanFore={Fore.CYAN}^&set WhiteFore={Fore.WHITE}^&^&set ResetBack={Back.RESET}^&set RedBack={Back.RED}^&set GreenBack={Back.GREEN}^&set YellowBack={Back.YELLOW}^&set BlueBack={Back.BLUE}^&set MagentaBack={Back.MAGENTA}^&set CyanBack={Back.CYAN}^&set WhiteBack={Back.WHITE}^&doskey clear=cls^&doskey true=prompt %GreenFore%$$:$F$S%ResetFore%^&doskey false=prompt %RedFore%$$:$C$S%ResetFore%^&doskey true/false=prompt $$:S$S^&doskey false/true=prompt $$:S$S^&prompt $$:$F$S^&@echo on")
else:
exit(showerror("WinPrompty", "This version of Windows is not supported."))
else:
exit(showerror("WinPrompty", "This operating system is not supported."))
| 57 | 695 | 0.704805 | import platform
from sys import exit
from os import system
from sys.stdin import isatty
from os.path import dirname, realpath
from colorama import init, Fore, Back
from tkinter.messagebox import showerror
if platform.system() is 'Windows':
__vista = platform.release == 'Vista'
__supported = platform.release >= '6'
if __vista or __supported:
init()
if not isatty:
system("title WinPrompty")
system("cmd /Q /D /C @echo off^&set ResetFore={Fore.RESET}^&set RedFore={Fore.RED}^&set GreenFore={Fore.GREEN}^&set YellowFore={Fore.YELLOW}^&set BlueFore={Fore.BLUE}^&set MagentaFore={Fore.MAGENTA}^&set CyanFore={Fore.CYAN}^&set WhiteFore={Fore.WHITE}^&^&set ResetBack={Back.RESET}^&set RedBack={Back.RED}^&set GreenBack={Back.GREEN}^&set YellowBack={Back.YELLOW}^&set BlueBack={Back.BLUE}^&set MagentaBack={Back.MAGENTA}^&set CyanBack={Back.CYAN}^&set WhiteBack={Back.WHITE}^&doskey clear=cls^&doskey true=prompt %GreenFore%$$:$F$S%ResetFore%^&doskey false=prompt %RedFore%$$:$C$S%ResetFore%^&doskey true/false=prompt $$:S$S^&doskey false/true=prompt $$:S$S^&prompt $$:$F$S^&@echo on")
else:
exit(showerror("WinPrompty", "This version of Windows is not supported."))
else:
exit(showerror("WinPrompty", "This operating system is not supported."))
| true | true |
f734988757cfc1649b03e5c17fecc7fc4eb23ada | 1,631 | py | Python | setup.py | team-mayes/common-python | e9173514bc19d1759ea3d85c632e0b07c8b1fff4 | [
"MIT"
] | null | null | null | setup.py | team-mayes/common-python | e9173514bc19d1759ea3d85c632e0b07c8b1fff4 | [
"MIT"
] | null | null | null | setup.py | team-mayes/common-python | e9173514bc19d1759ea3d85c632e0b07c8b1fff4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'scipy', 'matplotlib', 'numpy', 'six',
]
setup_requirements = [
# TODO(hmayes): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='tmlab_common',
version='0.1.0',
description="Common code used by Team Mayes and Blue Projects",
long_description=readme + '\n\n' + history,
author="Heather Mayes",
author_email='hbmayes@umich.edu',
url='https://github.com/hmayes/tmlab_common',
packages=find_packages(include=['tmlab_common']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='tmlab_common',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| 28.614035 | 76 | 0.646842 |
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'scipy', 'matplotlib', 'numpy', 'six',
]
setup_requirements = [
]
test_requirements = [
]
setup(
name='tmlab_common',
version='0.1.0',
description="Common code used by Team Mayes and Blue Projects",
long_description=readme + '\n\n' + history,
author="Heather Mayes",
author_email='hbmayes@umich.edu',
url='https://github.com/hmayes/tmlab_common',
packages=find_packages(include=['tmlab_common']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='tmlab_common',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| true | true |
f73498f87da532f12917e9b45a0ec0d62a7c9eb8 | 2,822 | py | Python | src/googlexfr.py | rathbird/Birds_of_Prey_CNN_Classifier | 13ceb78db2408709804263395175482cff6c6973 | [
"MIT"
] | null | null | null | src/googlexfr.py | rathbird/Birds_of_Prey_CNN_Classifier | 13ceb78db2408709804263395175482cff6c6973 | [
"MIT"
] | null | null | null | src/googlexfr.py | rathbird/Birds_of_Prey_CNN_Classifier | 13ceb78db2408709804263395175482cff6c6973 | [
"MIT"
] | null | null | null | # import the necessary packages
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import concatenate
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, confusion_matrix
from minigooglenet import minigooglenet_functional
#set param values
#classes (eagles, vultures)
n_categories = 2
dir_train = '.'
train_size = 2183
test_size = 501
batch_size = 16
EPOCHS = 6
#train data - 2 classes, 1000 per class
datagen_train = ImageDataGenerator(preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
#test data, no transformation
datagen_validation = ImageDataGenerator(preprocessing_function=preprocess_input)
#load images while model is running
train_generator = datagen_train.flow_from_directory(
directory='./data/train/',
target_size=(100,100),
color_mode='rgb',
batch_size=32,
class_mode='categorical',
shuffle=True,
seed=42)
valid_generator = datagen_validation.flow_from_directory(
directory="./data/test/",
target_size=(100, 100),
color_mode="rgb",
batch_size=1,
class_mode="categorical",
shuffle=False,
seed=42)
#create model
google = minigooglenet_functional(100, 100, 3, n_categories)
#compile model with very slow learning rate
google.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
history = google.fit(train_generator, steps_per_epoch=train_size//batch_size, epochs=1, validation_data=valid_generator, validation_steps= test_size//batch_size)
#save model
google.save('models/googlexfr')
#analyze results
#Confution Matrix and Classification Report
Y_pred = google.predict(valid_generator, test_size // batch_size+1)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(valid_generator.classes, y_pred))
print('Classification Report')
target_names = ['eagle', 'vulture']
print(classification_report(valid_generator.classes, y_pred, target_names=target_names))
| 32.068182 | 161 | 0.791637 |
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import concatenate
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, confusion_matrix
from minigooglenet import minigooglenet_functional
n_categories = 2
dir_train = '.'
train_size = 2183
test_size = 501
batch_size = 16
EPOCHS = 6
datagen_train = ImageDataGenerator(preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
datagen_validation = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = datagen_train.flow_from_directory(
directory='./data/train/',
target_size=(100,100),
color_mode='rgb',
batch_size=32,
class_mode='categorical',
shuffle=True,
seed=42)
valid_generator = datagen_validation.flow_from_directory(
directory="./data/test/",
target_size=(100, 100),
color_mode="rgb",
batch_size=1,
class_mode="categorical",
shuffle=False,
seed=42)
google = minigooglenet_functional(100, 100, 3, n_categories)
google.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
history = google.fit(train_generator, steps_per_epoch=train_size//batch_size, epochs=1, validation_data=valid_generator, validation_steps= test_size//batch_size)
google.save('models/googlexfr')
Y_pred = google.predict(valid_generator, test_size // batch_size+1)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(valid_generator.classes, y_pred))
print('Classification Report')
target_names = ['eagle', 'vulture']
print(classification_report(valid_generator.classes, y_pred, target_names=target_names))
| true | true |
f734999283b7c5d515772f9dd319e7240cd387ae | 22,417 | py | Python | noisemaker/cli.py | BumpierZulu9930/py-noisemaker | d67f03c9f42642dd7eb5b9fc0f79572dc0d3b1a9 | [
"Apache-2.0"
] | null | null | null | noisemaker/cli.py | BumpierZulu9930/py-noisemaker | d67f03c9f42642dd7eb5b9fc0f79572dc0d3b1a9 | [
"Apache-2.0"
] | null | null | null | noisemaker/cli.py | BumpierZulu9930/py-noisemaker | d67f03c9f42642dd7eb5b9fc0f79572dc0d3b1a9 | [
"Apache-2.0"
] | null | null | null | """Common CLI boilerplate for Noisemaker"""
import click
from noisemaker.constants import DistanceFunction, InterpolationType, PointDistribution, ValueDistribution, ValueMask, VoronoiDiagramType, WormBehavior
import noisemaker.masks as masks
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
CLICK_CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"], "max_content_width": 160}
# Boilerplate help strings
ALPHA_BLENDING_HINT = "alpha blending amount (0.0 = 0%, 1.0 = 100%)"
DISTANCE_HINT = "(1=Euclidean, 2=Manhattan, 3=Chebyshev)"
ENTIRE_IMAGE_HINT = "(1.0 = height/width of entire image)"
FREQ_HINT = "(must be >= 2)"
INTERPOLATION_HINT = "(0=constant, 1=linear, 2=cosine, 3=bicubic)"
NEAREST_NEIGHBOR_HINT = "(1.0 = as far as nearest neighbor)"
Y_FROM_OFFSET_HINT = "Use offset X values for Y (instead of sin/cos)"
def validate_more_than_one(allow_none=False):
"""
"""
def validate(ctx, param, value):
is_valid = False
if value is None:
is_valid = allow_none
elif value > 1:
is_valid = True
if not is_valid:
raise click.BadParameter("invalid choice: {0}. (choose a value greater than 1)".format(value))
return value
return validate
def validate_enum(cls):
"""
"""
def validate(ctx, param, value):
if value is not None and value not in [m.value for m in cls]:
raise click.BadParameter("invalid choice: {0}. (choose from {1})".format(value, ", ".join(["{0} ({1})".format(m.value, m.name) for m in cls])))
return value
return validate
def bool_option(attr, **attrs):
attrs.setdefault("is_flag", True)
attrs.setdefault("default", attrs.get("default", False))
return option(attr, **attrs)
def float_option(attr, **attrs):
attrs.setdefault("type", float)
attrs.setdefault("default", 0.0)
return option(attr, **attrs)
def int_option(attr, **attrs):
attrs.setdefault("type", int)
attrs.setdefault("default", 0)
return option(attr, **attrs)
def str_option(attr, **attrs):
attrs.setdefault("type", str)
attrs.setdefault("default", None)
return option(attr, **attrs)
def multi_str_option(attr, **attrs):
return str_option(attr, multiple=True, **attrs)
def option(*param_decls, **attrs):
""" Add a Click option. """
def decorator(f):
if isinstance(attrs.get("type"), click.IntRange):
r = attrs["type"]
attrs["help"] += " [range: {0}-{1}]".format(r.min, r.max)
if attrs.get("default") not in (None, False, 0):
attrs["help"] += " [default: {0}]".format(attrs["default"])
return click.option(*param_decls, **attrs)(f)
return decorator
def freq_option(**attrs):
attrs.setdefault("help", "Minimum noise frequency {0}".format(FREQ_HINT))
return int_option("--freq", default=3, callback=validate_more_than_one(), **attrs)
def width_option(**attrs):
attrs.setdefault("help", "Output width, in pixels")
return int_option("--width", default=1024, **attrs)
def height_option(**attrs):
attrs.setdefault("help", "Output height, in pixels")
return int_option("--height", default=1024, **attrs)
def channels_option(**attrs):
attrs.setdefault("help", "Color channel count (1=gray, 2=gray+alpha, 3=HSV/RGB, 4=RGB+alpha)")
return int_option("--channels", type=click.IntRange(1, 4), default=3, **attrs)
def time_option(**attrs):
attrs.setdefault("help", "Time value for Z axis (simplex only)")
return float_option("--time", default=0.0, **attrs)
def octaves_option(**attrs):
attrs.setdefault("help", "Octave count: Number of multi-res layers")
return int_option("--octaves", type=click.IntRange(1, 10), default=1, **attrs)
def ridges_option(**attrs):
attrs.setdefault("help", "Per-octave \"crease\" at midpoint values: abs(noise * 2 - 1)")
return bool_option("--ridges", **attrs)
def post_ridges_option(**attrs):
attrs.setdefault("help", "Post-reduce \"crease\" at midpoint values: abs(noise * 2 - 1)")
return bool_option("--post-ridges", **attrs)
def distrib_option(**attrs):
attrs.setdefault("help", "Value distribution")
return str_option("--distrib", type=click.Choice([m.name for m in ValueDistribution]), default="normal", **attrs)
def corners_option(**attrs):
attrs.setdefault("help", "Value distribution: Pin pixels to corners, instead of image center.")
return bool_option("--corners", **attrs)
def mask_option(**attrs):
attrs.setdefault("help", "Value distribution: Hot pixel mask")
return str_option("--mask", type=click.Choice([m.name for m in ValueMask]), **attrs)
def mask_inverse_option(**attrs):
attrs.setdefault("help", "Mask: Invert hot pixels")
return bool_option("--mask-inverse", **attrs)
def glyph_map_option(**attrs):
attrs.setdefault("help", "Mask: Glyph map brightness atlas mask")
choices = set(m.name for m in masks.square_masks())
return str_option("--glyph-map", type=click.Choice(choices), **attrs)
def glyph_map_colorize_option(**attrs):
attrs.setdefault("help", "Glyph map: Colorize exploded pixels")
return bool_option("--glyph-map-colorize", **attrs)
def glyph_map_zoom_option(**attrs):
attrs.setdefault("help", "Glyph map: Exploded pixel zoom factor")
return float_option("--glyph-map-zoom", default=4.0, **attrs)
def glyph_map_alpha_option(**attrs):
attrs.setdefault("help", "Glyph map: Output {0}".format(ALPHA_BLENDING_HINT))
return float_option("--glyph-map-alpha", default=1.0, **attrs)
def composite_option(**attrs):
attrs.setdefault("help", "Mask: Composite video effect mask")
return str_option("--composite", type=click.Choice([m.name for m in ValueMask.rgb_members()]), **attrs)
def composite_zoom_option(**attrs):
attrs.setdefault("help", "Composite video effect: Exploded pixel zoom factor")
return float_option("--composite-zoom", default=2.0, **attrs)
def interp_option(**attrs):
attrs.setdefault("help", "Interpolation type {0}".format(INTERPOLATION_HINT))
return int_option("--interp", callback=validate_enum(InterpolationType), default=3, **attrs)
def sin_option(**attrs):
attrs.setdefault("help", "Apply sin function to noise basis")
return float_option("--sin", **attrs)
def wavelet_option(**attrs):
attrs.setdefault("help", "Wavelets: What are they even?")
return bool_option("--wavelet", **attrs)
def lattice_drift_option(**attrs):
attrs.setdefault("help", "Domain warping: Lattice deform range {0}".format(NEAREST_NEIGHBOR_HINT))
return float_option("--lattice-drift", **attrs)
def vortex_option(**attrs):
attrs.setdefault("help", "Vortex tiling amount")
return float_option("--vortex", **attrs)
def warp_option(**attrs):
attrs.setdefault("help", "Octave Warp: Orthogonal displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--warp", **attrs)
def warp_octaves_option(**attrs):
attrs.setdefault("help", "Octave Warp: Octave count for --warp")
return int_option("--warp-octaves", type=click.IntRange(1, 10), default=3, **attrs)
def warp_interp_option(**attrs):
attrs.setdefault("help", "Octave Warp: Interpolation type {0}".format(INTERPOLATION_HINT))
return int_option("--warp-interp", default=None, callback=validate_enum(InterpolationType), **attrs)
def warp_freq_option(**attrs):
attrs.setdefault("help", "Octave Warp: Override --freq for warp frequency {0}".format(FREQ_HINT))
return int_option("--warp-freq", callback=validate_more_than_one(allow_none=True), default=None, **attrs)
def warp_map_option(**attrs):
attrs.setdefault("help", "Octave Warp: Filename of image with brightness values")
return str_option("--warp-map", type=click.Path(exists=True, dir_okay=False, resolve_path=True), **attrs)
def post_reindex_option(**attrs):
attrs.setdefault("help", "Post-reduce color re-indexing range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--post-reindex", **attrs)
def post_reflect_option(**attrs):
attrs.setdefault("help", "Domain warping: Post-reduce derivative-based displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--post-reflect", **attrs)
def post_refract_option(**attrs):
attrs.setdefault("help", "Domain warping: Post-reduce self-displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--post-refract", **attrs)
def post_refract_y_from_offset_option(**attrs):
attrs.setdefault("help", "Domain warping: Post-reduce refract: {0}".format(Y_FROM_OFFSET_HINT))
return bool_option("--post-refract-y-from-offset/--no-post-refract-y-from-offset", default=True, **attrs)
def reflect_option(**attrs):
attrs.setdefault("help", "Domain warping: Per-octave derivative-based displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--reflect", **attrs)
def refract_option(**attrs):
attrs.setdefault("help", "Domain warping: Per-octave self-displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--refract", **attrs)
def refract_y_from_offset_option(**attrs):
attrs.setdefault("help", "Domain warping: Per-octave refract: {0}".format(Y_FROM_OFFSET_HINT))
return bool_option("--refract-y-from-offset/--no-refract-y-from-offset", **attrs)
def ripple_option(**attrs):
attrs.setdefault("help", "Ripple effect: Displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--ripple", default=None, **attrs)
def ripple_freq_option(**attrs):
attrs.setdefault("help", "Ripple effect: Override --freq for ripple frequency {0}".format(FREQ_HINT))
return int_option("--ripple-freq", default=3, **attrs)
def ripple_kink_option(**attrs):
attrs.setdefault("help", "Ripple effect: Ripple amplitude")
return float_option("--ripple-kink", default=1.0, **attrs)
def reindex_option(**attrs):
attrs.setdefault("help", "Color re-indexing range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--reindex", **attrs)
def reverb_option(**attrs):
attrs.setdefault("help", "Post-reduce tiled octave count")
return int_option("--reverb", type=click.IntRange(1, 10), default=None, **attrs)
def reverb_iterations_option(**attrs):
attrs.setdefault("help", "Reverb: Re-reverberate N times")
return int_option("--reverb-iterations", type=click.IntRange(1, 4), default=1, **attrs)
def clut_option(**attrs):
attrs.setdefault("help", "Color lookup table (path to PNG or JPEG image)")
return str_option("--clut", type=click.Path(exists=True, dir_okay=False, resolve_path=True), **attrs)
def clut_range_option(**attrs):
attrs.setdefault("help", "CLUT: Maximum pixel gather distance {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--clut-range", default=0.5, **attrs)
def clut_horizontal_option(**attrs):
attrs.setdefault("help", "CLUT: Preserve vertical axis")
return bool_option("--clut-horizontal", **attrs)
def worms_option(**attrs):
attrs.setdefault("help", "Iterative \"worm\" field flow (1=Obedient, 2=Crosshatch, 3=Unruly, 4=Chaotic)")
return int_option("--worms", callback=validate_enum(WormBehavior), **attrs)
def worms_density_option(**attrs):
attrs.setdefault("help", "Worms: Density multiplier (larger is more costly)")
return float_option("--worms-density", default=4.0, **attrs)
def worms_duration_option(**attrs):
attrs.setdefault("help", "Worms: Iteration multiplier (larger is more costly)")
return float_option("--worms-duration", default=4.0, **attrs)
def worms_stride_option(**attrs):
attrs.setdefault("help", "Worms: Mean pixel displacement per iteration")
return float_option("--worms-stride", default=1.0, **attrs)
def worms_stride_deviation_option(**attrs):
attrs.setdefault("help", "Worms: Per-worm random stride variance")
return float_option("--worms-stride-deviation", **attrs)
def worms_alpha_option(**attrs):
attrs.setdefault("help", "Worms: Output {0}".format(ALPHA_BLENDING_HINT))
return float_option("--worms-alpha", default=.875, **attrs)
def worms_kink_option(**attrs):
attrs.setdefault("help", "Worms: Rotation range (1.0 = 360 degrees)")
return float_option("--worms-kink", default=1.0, **attrs)
def wormhole_option(**attrs):
attrs.setdefault("help", "Non-iterative per-pixel field flow")
return bool_option("--wormhole", **attrs)
def wormhole_stride_option(**attrs):
attrs.setdefault("help", "Wormhole: Max per-pixel displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--wormhole-stride", default=0.1, **attrs)
def wormhole_kink_option(**attrs):
attrs.setdefault("help", "Wormhole: Per-pixel rotation range (1.0 = 360 degrees)")
return float_option("--wormhole-kink", default=1.0, **attrs)
def erosion_worms_option(**attrs):
attrs.setdefault("help", "Experimental erosion worms (Does not use worms settings)")
return bool_option("--erosion-worms", **attrs)
def dla_option(**attrs):
attrs.setdefault("help", "Diffusion-limited aggregation (DLA) {0}".format(ALPHA_BLENDING_HINT))
return float_option("--dla", **attrs)
def dla_padding_option(**attrs):
attrs.setdefault("help", "DLA: Pixel padding (smaller is slower)")
return int_option("--dla-padding", default=2, **attrs)
def voronoi_option(**attrs):
attrs.setdefault("help", "Generate a Voronoi diagram (0=Off, 1=Range, 2=Color Range, 3=Indexed, 4=Color Map, 5=Blended, 6=Flow, 7=Collage)")
return int_option("--voronoi", callback=validate_enum(VoronoiDiagramType), **attrs)
def voronoi_func_option(**attrs):
attrs.setdefault("help", "Voronoi: Distance function {0}".format(DISTANCE_HINT))
return int_option("--voronoi-func", callback=validate_enum(DistanceFunction), default=1, **attrs)
def voronoi_nth_option(**attrs):
attrs.setdefault("help", "Voronoi: Plot Nth nearest, or -Nth farthest")
return int_option("--voronoi-nth", **attrs)
def voronoi_alpha_option(**attrs):
attrs.setdefault("help", "Voronoi: Basis {0}".format(ALPHA_BLENDING_HINT))
return float_option("--voronoi-alpha", default=1.0, **attrs)
def voronoi_refract_option(**attrs):
attrs.setdefault("help", "Voronoi: Domain warp input tensor {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--voronoi-refract", **attrs)
def voronoi_refract_y_from_offset_option(**attrs):
attrs.setdefault("help", "Domain warping: Voronoi refract: {0}".format(Y_FROM_OFFSET_HINT))
return bool_option("--voronoi-refract-y-from-offset/--no-voronoi-refract-y-from-offset", default=True, **attrs)
def voronoi_inverse_option(**attrs):
attrs.setdefault("help", "Voronoi: Inverse range")
return bool_option("--voronoi-inverse", **attrs)
def point_freq_option(default=3.0, **attrs):
attrs.setdefault("help", "Voronoi/DLA: Approximate lengthwise point cloud frequency (freq * freq = count)")
return int_option("--point-freq", type=click.IntRange(1, 10), default=default, **attrs)
def point_distrib_option(**attrs):
attrs.setdefault("help", "Voronoi/DLA: Point cloud distribution")
return str_option("--point-distrib",
type=click.Choice(
[m.name for m in PointDistribution]
+ [m.name for m in ValueMask.nonprocedural_members()]
), default="random", **attrs)
def point_corners_option(**attrs):
attrs.setdefault("help", "Voronoi/DLA: Pin diagram to corners, instead of image center.")
return bool_option("--point-corners", **attrs)
def point_generations_option(**attrs):
attrs.setdefault("help", "Voronoi/DLA: Penrose-ish generations. When using, keep --point-freq below ~3 to avoid OOM")
return int_option("--point-generations", type=click.IntRange(1, 3), default=1, **attrs)
def point_drift_option(**attrs):
attrs.setdefault("help", "Voronoi/DLA: Point drift range {0}".format(NEAREST_NEIGHBOR_HINT))
return float_option("--point-drift", **attrs)
def sobel_option(**attrs):
attrs.setdefault("help", "Post-processing: Apply Sobel operator {0}".format(DISTANCE_HINT))
return int_option("--sobel", callback=validate_enum(DistanceFunction), **attrs)
def outline_option(**attrs):
attrs.setdefault("help", "Post-processing: Apply Sobel operator, and multiply {0}".format(DISTANCE_HINT))
return int_option("--outline", callback=validate_enum(DistanceFunction), **attrs)
def normals_option(**attrs):
attrs.setdefault("help", "Post-processing: Generate a tangent-space normal map")
return bool_option("--normals", **attrs)
def post_deriv_option(**attrs):
attrs.setdefault("help", "Derivatives: Extract post-reduce rate of change {0}".format(DISTANCE_HINT))
return int_option("--post-deriv", callback=validate_enum(DistanceFunction), **attrs)
def deriv_option(**attrs):
attrs.setdefault("help", "Derivatives: Extract per-octave rate of change {0}".format(DISTANCE_HINT))
return int_option("--deriv", callback=validate_enum(DistanceFunction), **attrs)
def deriv_alpha_option(**attrs):
attrs.setdefault("help", "Derivatives: Per-octave {0}".format(ALPHA_BLENDING_HINT))
return float_option("--deriv-alpha", default=1.0, **attrs)
def posterize_option(**attrs):
attrs.setdefault("help", "Post-processing: Posterize levels (per channel)")
return int_option("--posterize", **attrs)
def bloom_option(**attrs):
attrs.setdefault("help", "Post-processing: Bloom {0}".format(ALPHA_BLENDING_HINT))
return float_option("--bloom", **attrs)
def glitch_option(**attrs):
attrs.setdefault("help", "Glitch effects: Bit-shit")
return bool_option("--glitch/--no-glitch", **attrs)
def vhs_option(**attrs):
attrs.setdefault("help", "Glitch effects: VHS tracking")
return bool_option("--vhs/--no-vhs", **attrs)
def crt_option(**attrs):
attrs.setdefault("help", "Glitch effects: CRT scanline")
return bool_option("--crt/--no-crt", **attrs)
def scan_error_option(**attrs):
attrs.setdefault("help", "Glitch effects: Analog scanline error")
return bool_option("--scan-error/--no-scan-error", **attrs)
def snow_option(**attrs):
attrs.setdefault("help", "Glitch effects: Analog broadcast snow (0.0=off, 1.0=saturated)")
return float_option("--snow", **attrs)
def dither_option(**attrs):
attrs.setdefault("help", "Glitch effects: Per-pixel brightness jitter")
return float_option("--dither", **attrs)
def aberration_option(**attrs):
attrs.setdefault("help", "Glitch effects: Chromatic aberration distance (e.g. .0075)")
return float_option("--aberration", **attrs)
def light_leak_option(**attrs):
attrs.setdefault("help", "Art effects: Light leak".format(ALPHA_BLENDING_HINT))
return float_option("--light-leak", **attrs)
def vignette_option(**attrs):
attrs.setdefault("help", "Art effects: Vignette {0}".format(ALPHA_BLENDING_HINT))
return float_option("--vignette", **attrs)
def vignette_brightness_option(**attrs):
attrs.setdefault("help", "Art effects: Vignette edge brightness (0-1)")
return float_option("--vignette-brightness", **attrs)
def pop_option(**attrs):
attrs.setdefault("help", "Art effects: Pop art")
return bool_option("--pop", **attrs)
def convolve_option(**attrs):
attrs.setdefault("help", "Convolution kernel: May be specified multiple times")
return multi_str_option("--convolve", type=click.Choice([m.name.replace('conv2d_', '') for m in ValueMask.conv2d_members()]), **attrs)
def shadow_option(**attrs):
attrs.setdefault("help", "Shadow {0}".format(ALPHA_BLENDING_HINT))
return float_option("--shadow", **attrs)
def rgb_option(**attrs):
attrs.setdefault("help", "Use RGB noise basis instead of HSV")
return bool_option("--rgb", **attrs)
def hue_range_option(**attrs):
attrs.setdefault("help", "HSV: Hue range (0..1+)")
return float_option("--hue-range", default=0.25, **attrs)
def hue_rotation_option(**attrs):
attrs.setdefault("help", "HSV: Hue rotation (0..1)")
return float_option("--hue-rotation", default=None, **attrs)
def saturation_option(**attrs):
attrs.setdefault("help", "HSV: Saturation (0..1+)")
return float_option("--saturation", default=1.0, **attrs)
def hue_distrib_option(**attrs):
attrs.setdefault("help", "HSV: Override value distribution for hue")
return str_option("--hue-distrib", type=click.Choice([m.name for m in ValueDistribution]), default=None, **attrs)
def saturation_distrib_option(**attrs):
attrs.setdefault("help", "HSV: Override value distribution for saturation")
return str_option("--saturation-distrib", type=click.Choice([m.name for m in ValueDistribution]), default=None, **attrs)
def brightness_distrib_option(**attrs):
attrs.setdefault("help", "HSV: Override value distribution for brightness")
return str_option("--brightness-distrib", type=click.Choice([m.name for m in ValueDistribution]), default=None, **attrs)
def post_hue_rotation_option(**attrs):
attrs.setdefault("help", "HSV: Post-reduce hue rotation (-0.5 .. 0.5)")
return float_option("--post-hue-rotation", default=None, **attrs)
def post_saturation_option(**attrs):
attrs.setdefault("help", "HSV: Post-reduce saturation")
return float_option("--post-saturation", default=None, **attrs)
def post_contrast_option(**attrs):
attrs.setdefault("help", "HSV: Post-reduce contrast adjustment")
return float_option("--post-contrast", default=None, **attrs)
def density_map_option(**attrs):
attrs.setdefault("help", "Map values to color density histogram")
return bool_option("--density", default=False, **attrs)
def input_dir_option(**attrs):
attrs.setdefault("help", "Input directory containing .jpg and/or .png images, for collage functions")
return str_option("--input-dir", type=click.Path(exists=True, file_okay=False, resolve_path=True), **attrs)
def seed_option(**attrs):
attrs.setdefault("help", "Random seed. Might not affect all things.")
return int_option("--seed", default=None, **attrs)
def name_option(default=None, **attrs):
attrs.setdefault("help", "Filename for image output (should end with .png or .jpg)")
return str_option("--name", type=click.Path(dir_okay=False), default=default or "noise.png", **attrs)
| 30.170929 | 155 | 0.698354 |
import click
from noisemaker.constants import DistanceFunction, InterpolationType, PointDistribution, ValueDistribution, ValueMask, VoronoiDiagramType, WormBehavior
import noisemaker.masks as masks
CLICK_CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"], "max_content_width": 160}
ALPHA_BLENDING_HINT = "alpha blending amount (0.0 = 0%, 1.0 = 100%)"
DISTANCE_HINT = "(1=Euclidean, 2=Manhattan, 3=Chebyshev)"
ENTIRE_IMAGE_HINT = "(1.0 = height/width of entire image)"
FREQ_HINT = "(must be >= 2)"
INTERPOLATION_HINT = "(0=constant, 1=linear, 2=cosine, 3=bicubic)"
NEAREST_NEIGHBOR_HINT = "(1.0 = as far as nearest neighbor)"
Y_FROM_OFFSET_HINT = "Use offset X values for Y (instead of sin/cos)"
def validate_more_than_one(allow_none=False):
def validate(ctx, param, value):
is_valid = False
if value is None:
is_valid = allow_none
elif value > 1:
is_valid = True
if not is_valid:
raise click.BadParameter("invalid choice: {0}. (choose a value greater than 1)".format(value))
return value
return validate
def validate_enum(cls):
def validate(ctx, param, value):
if value is not None and value not in [m.value for m in cls]:
raise click.BadParameter("invalid choice: {0}. (choose from {1})".format(value, ", ".join(["{0} ({1})".format(m.value, m.name) for m in cls])))
return value
return validate
def bool_option(attr, **attrs):
attrs.setdefault("is_flag", True)
attrs.setdefault("default", attrs.get("default", False))
return option(attr, **attrs)
def float_option(attr, **attrs):
attrs.setdefault("type", float)
attrs.setdefault("default", 0.0)
return option(attr, **attrs)
def int_option(attr, **attrs):
attrs.setdefault("type", int)
attrs.setdefault("default", 0)
return option(attr, **attrs)
def str_option(attr, **attrs):
attrs.setdefault("type", str)
attrs.setdefault("default", None)
return option(attr, **attrs)
def multi_str_option(attr, **attrs):
return str_option(attr, multiple=True, **attrs)
def option(*param_decls, **attrs):
def decorator(f):
if isinstance(attrs.get("type"), click.IntRange):
r = attrs["type"]
attrs["help"] += " [range: {0}-{1}]".format(r.min, r.max)
if attrs.get("default") not in (None, False, 0):
attrs["help"] += " [default: {0}]".format(attrs["default"])
return click.option(*param_decls, **attrs)(f)
return decorator
def freq_option(**attrs):
attrs.setdefault("help", "Minimum noise frequency {0}".format(FREQ_HINT))
return int_option("--freq", default=3, callback=validate_more_than_one(), **attrs)
def width_option(**attrs):
attrs.setdefault("help", "Output width, in pixels")
return int_option("--width", default=1024, **attrs)
def height_option(**attrs):
attrs.setdefault("help", "Output height, in pixels")
return int_option("--height", default=1024, **attrs)
def channels_option(**attrs):
attrs.setdefault("help", "Color channel count (1=gray, 2=gray+alpha, 3=HSV/RGB, 4=RGB+alpha)")
return int_option("--channels", type=click.IntRange(1, 4), default=3, **attrs)
def time_option(**attrs):
attrs.setdefault("help", "Time value for Z axis (simplex only)")
return float_option("--time", default=0.0, **attrs)
def octaves_option(**attrs):
attrs.setdefault("help", "Octave count: Number of multi-res layers")
return int_option("--octaves", type=click.IntRange(1, 10), default=1, **attrs)
def ridges_option(**attrs):
attrs.setdefault("help", "Per-octave \"crease\" at midpoint values: abs(noise * 2 - 1)")
return bool_option("--ridges", **attrs)
def post_ridges_option(**attrs):
attrs.setdefault("help", "Post-reduce \"crease\" at midpoint values: abs(noise * 2 - 1)")
return bool_option("--post-ridges", **attrs)
def distrib_option(**attrs):
attrs.setdefault("help", "Value distribution")
return str_option("--distrib", type=click.Choice([m.name for m in ValueDistribution]), default="normal", **attrs)
def corners_option(**attrs):
attrs.setdefault("help", "Value distribution: Pin pixels to corners, instead of image center.")
return bool_option("--corners", **attrs)
def mask_option(**attrs):
attrs.setdefault("help", "Value distribution: Hot pixel mask")
return str_option("--mask", type=click.Choice([m.name for m in ValueMask]), **attrs)
def mask_inverse_option(**attrs):
attrs.setdefault("help", "Mask: Invert hot pixels")
return bool_option("--mask-inverse", **attrs)
def glyph_map_option(**attrs):
attrs.setdefault("help", "Mask: Glyph map brightness atlas mask")
choices = set(m.name for m in masks.square_masks())
return str_option("--glyph-map", type=click.Choice(choices), **attrs)
def glyph_map_colorize_option(**attrs):
attrs.setdefault("help", "Glyph map: Colorize exploded pixels")
return bool_option("--glyph-map-colorize", **attrs)
def glyph_map_zoom_option(**attrs):
attrs.setdefault("help", "Glyph map: Exploded pixel zoom factor")
return float_option("--glyph-map-zoom", default=4.0, **attrs)
def glyph_map_alpha_option(**attrs):
attrs.setdefault("help", "Glyph map: Output {0}".format(ALPHA_BLENDING_HINT))
return float_option("--glyph-map-alpha", default=1.0, **attrs)
def composite_option(**attrs):
attrs.setdefault("help", "Mask: Composite video effect mask")
return str_option("--composite", type=click.Choice([m.name for m in ValueMask.rgb_members()]), **attrs)
def composite_zoom_option(**attrs):
attrs.setdefault("help", "Composite video effect: Exploded pixel zoom factor")
return float_option("--composite-zoom", default=2.0, **attrs)
def interp_option(**attrs):
attrs.setdefault("help", "Interpolation type {0}".format(INTERPOLATION_HINT))
return int_option("--interp", callback=validate_enum(InterpolationType), default=3, **attrs)
def sin_option(**attrs):
attrs.setdefault("help", "Apply sin function to noise basis")
return float_option("--sin", **attrs)
def wavelet_option(**attrs):
attrs.setdefault("help", "Wavelets: What are they even?")
return bool_option("--wavelet", **attrs)
def lattice_drift_option(**attrs):
attrs.setdefault("help", "Domain warping: Lattice deform range {0}".format(NEAREST_NEIGHBOR_HINT))
return float_option("--lattice-drift", **attrs)
def vortex_option(**attrs):
attrs.setdefault("help", "Vortex tiling amount")
return float_option("--vortex", **attrs)
def warp_option(**attrs):
attrs.setdefault("help", "Octave Warp: Orthogonal displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--warp", **attrs)
def warp_octaves_option(**attrs):
attrs.setdefault("help", "Octave Warp: Octave count for --warp")
return int_option("--warp-octaves", type=click.IntRange(1, 10), default=3, **attrs)
def warp_interp_option(**attrs):
attrs.setdefault("help", "Octave Warp: Interpolation type {0}".format(INTERPOLATION_HINT))
return int_option("--warp-interp", default=None, callback=validate_enum(InterpolationType), **attrs)
def warp_freq_option(**attrs):
attrs.setdefault("help", "Octave Warp: Override --freq for warp frequency {0}".format(FREQ_HINT))
return int_option("--warp-freq", callback=validate_more_than_one(allow_none=True), default=None, **attrs)
def warp_map_option(**attrs):
attrs.setdefault("help", "Octave Warp: Filename of image with brightness values")
return str_option("--warp-map", type=click.Path(exists=True, dir_okay=False, resolve_path=True), **attrs)
def post_reindex_option(**attrs):
attrs.setdefault("help", "Post-reduce color re-indexing range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--post-reindex", **attrs)
def post_reflect_option(**attrs):
attrs.setdefault("help", "Domain warping: Post-reduce derivative-based displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--post-reflect", **attrs)
def post_refract_option(**attrs):
attrs.setdefault("help", "Domain warping: Post-reduce self-displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--post-refract", **attrs)
def post_refract_y_from_offset_option(**attrs):
attrs.setdefault("help", "Domain warping: Post-reduce refract: {0}".format(Y_FROM_OFFSET_HINT))
return bool_option("--post-refract-y-from-offset/--no-post-refract-y-from-offset", default=True, **attrs)
def reflect_option(**attrs):
attrs.setdefault("help", "Domain warping: Per-octave derivative-based displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--reflect", **attrs)
def refract_option(**attrs):
attrs.setdefault("help", "Domain warping: Per-octave self-displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--refract", **attrs)
def refract_y_from_offset_option(**attrs):
attrs.setdefault("help", "Domain warping: Per-octave refract: {0}".format(Y_FROM_OFFSET_HINT))
return bool_option("--refract-y-from-offset/--no-refract-y-from-offset", **attrs)
def ripple_option(**attrs):
attrs.setdefault("help", "Ripple effect: Displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--ripple", default=None, **attrs)
def ripple_freq_option(**attrs):
attrs.setdefault("help", "Ripple effect: Override --freq for ripple frequency {0}".format(FREQ_HINT))
return int_option("--ripple-freq", default=3, **attrs)
def ripple_kink_option(**attrs):
attrs.setdefault("help", "Ripple effect: Ripple amplitude")
return float_option("--ripple-kink", default=1.0, **attrs)
def reindex_option(**attrs):
attrs.setdefault("help", "Color re-indexing range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--reindex", **attrs)
def reverb_option(**attrs):
attrs.setdefault("help", "Post-reduce tiled octave count")
return int_option("--reverb", type=click.IntRange(1, 10), default=None, **attrs)
def reverb_iterations_option(**attrs):
attrs.setdefault("help", "Reverb: Re-reverberate N times")
return int_option("--reverb-iterations", type=click.IntRange(1, 4), default=1, **attrs)
def clut_option(**attrs):
attrs.setdefault("help", "Color lookup table (path to PNG or JPEG image)")
return str_option("--clut", type=click.Path(exists=True, dir_okay=False, resolve_path=True), **attrs)
def clut_range_option(**attrs):
attrs.setdefault("help", "CLUT: Maximum pixel gather distance {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--clut-range", default=0.5, **attrs)
def clut_horizontal_option(**attrs):
attrs.setdefault("help", "CLUT: Preserve vertical axis")
return bool_option("--clut-horizontal", **attrs)
def worms_option(**attrs):
attrs.setdefault("help", "Iterative \"worm\" field flow (1=Obedient, 2=Crosshatch, 3=Unruly, 4=Chaotic)")
return int_option("--worms", callback=validate_enum(WormBehavior), **attrs)
def worms_density_option(**attrs):
attrs.setdefault("help", "Worms: Density multiplier (larger is more costly)")
return float_option("--worms-density", default=4.0, **attrs)
def worms_duration_option(**attrs):
attrs.setdefault("help", "Worms: Iteration multiplier (larger is more costly)")
return float_option("--worms-duration", default=4.0, **attrs)
def worms_stride_option(**attrs):
attrs.setdefault("help", "Worms: Mean pixel displacement per iteration")
return float_option("--worms-stride", default=1.0, **attrs)
def worms_stride_deviation_option(**attrs):
attrs.setdefault("help", "Worms: Per-worm random stride variance")
return float_option("--worms-stride-deviation", **attrs)
def worms_alpha_option(**attrs):
attrs.setdefault("help", "Worms: Output {0}".format(ALPHA_BLENDING_HINT))
return float_option("--worms-alpha", default=.875, **attrs)
def worms_kink_option(**attrs):
attrs.setdefault("help", "Worms: Rotation range (1.0 = 360 degrees)")
return float_option("--worms-kink", default=1.0, **attrs)
def wormhole_option(**attrs):
attrs.setdefault("help", "Non-iterative per-pixel field flow")
return bool_option("--wormhole", **attrs)
def wormhole_stride_option(**attrs):
attrs.setdefault("help", "Wormhole: Max per-pixel displacement range {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--wormhole-stride", default=0.1, **attrs)
def wormhole_kink_option(**attrs):
attrs.setdefault("help", "Wormhole: Per-pixel rotation range (1.0 = 360 degrees)")
return float_option("--wormhole-kink", default=1.0, **attrs)
def erosion_worms_option(**attrs):
attrs.setdefault("help", "Experimental erosion worms (Does not use worms settings)")
return bool_option("--erosion-worms", **attrs)
def dla_option(**attrs):
attrs.setdefault("help", "Diffusion-limited aggregation (DLA) {0}".format(ALPHA_BLENDING_HINT))
return float_option("--dla", **attrs)
def dla_padding_option(**attrs):
attrs.setdefault("help", "DLA: Pixel padding (smaller is slower)")
return int_option("--dla-padding", default=2, **attrs)
def voronoi_option(**attrs):
attrs.setdefault("help", "Generate a Voronoi diagram (0=Off, 1=Range, 2=Color Range, 3=Indexed, 4=Color Map, 5=Blended, 6=Flow, 7=Collage)")
return int_option("--voronoi", callback=validate_enum(VoronoiDiagramType), **attrs)
def voronoi_func_option(**attrs):
attrs.setdefault("help", "Voronoi: Distance function {0}".format(DISTANCE_HINT))
return int_option("--voronoi-func", callback=validate_enum(DistanceFunction), default=1, **attrs)
def voronoi_nth_option(**attrs):
attrs.setdefault("help", "Voronoi: Plot Nth nearest, or -Nth farthest")
return int_option("--voronoi-nth", **attrs)
def voronoi_alpha_option(**attrs):
attrs.setdefault("help", "Voronoi: Basis {0}".format(ALPHA_BLENDING_HINT))
return float_option("--voronoi-alpha", default=1.0, **attrs)
def voronoi_refract_option(**attrs):
attrs.setdefault("help", "Voronoi: Domain warp input tensor {0}".format(ENTIRE_IMAGE_HINT))
return float_option("--voronoi-refract", **attrs)
def voronoi_refract_y_from_offset_option(**attrs):
attrs.setdefault("help", "Domain warping: Voronoi refract: {0}".format(Y_FROM_OFFSET_HINT))
return bool_option("--voronoi-refract-y-from-offset/--no-voronoi-refract-y-from-offset", default=True, **attrs)
def voronoi_inverse_option(**attrs):
attrs.setdefault("help", "Voronoi: Inverse range")
return bool_option("--voronoi-inverse", **attrs)
def point_freq_option(default=3.0, **attrs):
attrs.setdefault("help", "Voronoi/DLA: Approximate lengthwise point cloud frequency (freq * freq = count)")
return int_option("--point-freq", type=click.IntRange(1, 10), default=default, **attrs)
def point_distrib_option(**attrs):
attrs.setdefault("help", "Voronoi/DLA: Point cloud distribution")
return str_option("--point-distrib",
type=click.Choice(
[m.name for m in PointDistribution]
+ [m.name for m in ValueMask.nonprocedural_members()]
), default="random", **attrs)
def point_corners_option(**attrs):
attrs.setdefault("help", "Voronoi/DLA: Pin diagram to corners, instead of image center.")
return bool_option("--point-corners", **attrs)
def point_generations_option(**attrs):
attrs.setdefault("help", "Voronoi/DLA: Penrose-ish generations. When using, keep --point-freq below ~3 to avoid OOM")
return int_option("--point-generations", type=click.IntRange(1, 3), default=1, **attrs)
def point_drift_option(**attrs):
attrs.setdefault("help", "Voronoi/DLA: Point drift range {0}".format(NEAREST_NEIGHBOR_HINT))
return float_option("--point-drift", **attrs)
def sobel_option(**attrs):
attrs.setdefault("help", "Post-processing: Apply Sobel operator {0}".format(DISTANCE_HINT))
return int_option("--sobel", callback=validate_enum(DistanceFunction), **attrs)
def outline_option(**attrs):
attrs.setdefault("help", "Post-processing: Apply Sobel operator, and multiply {0}".format(DISTANCE_HINT))
return int_option("--outline", callback=validate_enum(DistanceFunction), **attrs)
def normals_option(**attrs):
attrs.setdefault("help", "Post-processing: Generate a tangent-space normal map")
return bool_option("--normals", **attrs)
def post_deriv_option(**attrs):
attrs.setdefault("help", "Derivatives: Extract post-reduce rate of change {0}".format(DISTANCE_HINT))
return int_option("--post-deriv", callback=validate_enum(DistanceFunction), **attrs)
def deriv_option(**attrs):
attrs.setdefault("help", "Derivatives: Extract per-octave rate of change {0}".format(DISTANCE_HINT))
return int_option("--deriv", callback=validate_enum(DistanceFunction), **attrs)
def deriv_alpha_option(**attrs):
attrs.setdefault("help", "Derivatives: Per-octave {0}".format(ALPHA_BLENDING_HINT))
return float_option("--deriv-alpha", default=1.0, **attrs)
def posterize_option(**attrs):
attrs.setdefault("help", "Post-processing: Posterize levels (per channel)")
return int_option("--posterize", **attrs)
def bloom_option(**attrs):
attrs.setdefault("help", "Post-processing: Bloom {0}".format(ALPHA_BLENDING_HINT))
return float_option("--bloom", **attrs)
def glitch_option(**attrs):
attrs.setdefault("help", "Glitch effects: Bit-shit")
return bool_option("--glitch/--no-glitch", **attrs)
def vhs_option(**attrs):
attrs.setdefault("help", "Glitch effects: VHS tracking")
return bool_option("--vhs/--no-vhs", **attrs)
def crt_option(**attrs):
attrs.setdefault("help", "Glitch effects: CRT scanline")
return bool_option("--crt/--no-crt", **attrs)
def scan_error_option(**attrs):
attrs.setdefault("help", "Glitch effects: Analog scanline error")
return bool_option("--scan-error/--no-scan-error", **attrs)
def snow_option(**attrs):
attrs.setdefault("help", "Glitch effects: Analog broadcast snow (0.0=off, 1.0=saturated)")
return float_option("--snow", **attrs)
def dither_option(**attrs):
attrs.setdefault("help", "Glitch effects: Per-pixel brightness jitter")
return float_option("--dither", **attrs)
def aberration_option(**attrs):
attrs.setdefault("help", "Glitch effects: Chromatic aberration distance (e.g. .0075)")
return float_option("--aberration", **attrs)
def light_leak_option(**attrs):
attrs.setdefault("help", "Art effects: Light leak".format(ALPHA_BLENDING_HINT))
return float_option("--light-leak", **attrs)
def vignette_option(**attrs):
attrs.setdefault("help", "Art effects: Vignette {0}".format(ALPHA_BLENDING_HINT))
return float_option("--vignette", **attrs)
def vignette_brightness_option(**attrs):
attrs.setdefault("help", "Art effects: Vignette edge brightness (0-1)")
return float_option("--vignette-brightness", **attrs)
def pop_option(**attrs):
attrs.setdefault("help", "Art effects: Pop art")
return bool_option("--pop", **attrs)
def convolve_option(**attrs):
attrs.setdefault("help", "Convolution kernel: May be specified multiple times")
return multi_str_option("--convolve", type=click.Choice([m.name.replace('conv2d_', '') for m in ValueMask.conv2d_members()]), **attrs)
def shadow_option(**attrs):
attrs.setdefault("help", "Shadow {0}".format(ALPHA_BLENDING_HINT))
return float_option("--shadow", **attrs)
def rgb_option(**attrs):
attrs.setdefault("help", "Use RGB noise basis instead of HSV")
return bool_option("--rgb", **attrs)
def hue_range_option(**attrs):
attrs.setdefault("help", "HSV: Hue range (0..1+)")
return float_option("--hue-range", default=0.25, **attrs)
def hue_rotation_option(**attrs):
attrs.setdefault("help", "HSV: Hue rotation (0..1)")
return float_option("--hue-rotation", default=None, **attrs)
def saturation_option(**attrs):
attrs.setdefault("help", "HSV: Saturation (0..1+)")
return float_option("--saturation", default=1.0, **attrs)
def hue_distrib_option(**attrs):
attrs.setdefault("help", "HSV: Override value distribution for hue")
return str_option("--hue-distrib", type=click.Choice([m.name for m in ValueDistribution]), default=None, **attrs)
def saturation_distrib_option(**attrs):
attrs.setdefault("help", "HSV: Override value distribution for saturation")
return str_option("--saturation-distrib", type=click.Choice([m.name for m in ValueDistribution]), default=None, **attrs)
def brightness_distrib_option(**attrs):
attrs.setdefault("help", "HSV: Override value distribution for brightness")
return str_option("--brightness-distrib", type=click.Choice([m.name for m in ValueDistribution]), default=None, **attrs)
def post_hue_rotation_option(**attrs):
attrs.setdefault("help", "HSV: Post-reduce hue rotation (-0.5 .. 0.5)")
return float_option("--post-hue-rotation", default=None, **attrs)
def post_saturation_option(**attrs):
attrs.setdefault("help", "HSV: Post-reduce saturation")
return float_option("--post-saturation", default=None, **attrs)
def post_contrast_option(**attrs):
attrs.setdefault("help", "HSV: Post-reduce contrast adjustment")
return float_option("--post-contrast", default=None, **attrs)
def density_map_option(**attrs):
attrs.setdefault("help", "Map values to color density histogram")
return bool_option("--density", default=False, **attrs)
def input_dir_option(**attrs):
attrs.setdefault("help", "Input directory containing .jpg and/or .png images, for collage functions")
return str_option("--input-dir", type=click.Path(exists=True, file_okay=False, resolve_path=True), **attrs)
def seed_option(**attrs):
attrs.setdefault("help", "Random seed. Might not affect all things.")
return int_option("--seed", default=None, **attrs)
def name_option(default=None, **attrs):
attrs.setdefault("help", "Filename for image output (should end with .png or .jpg)")
return str_option("--name", type=click.Path(dir_okay=False), default=default or "noise.png", **attrs)
| true | true |
f73499beff1247bfebd0abc2a4dc89bbe5f9fdb4 | 26,521 | py | Python | chia/farmer/farmer_api.py | grayfallstown/chia-rosechain | fa09caba364aa1639b2a65020b4c312ff2d5b058 | [
"Apache-2.0"
] | null | null | null | chia/farmer/farmer_api.py | grayfallstown/chia-rosechain | fa09caba364aa1639b2a65020b4c312ff2d5b058 | [
"Apache-2.0"
] | null | null | null | chia/farmer/farmer_api.py | grayfallstown/chia-rosechain | fa09caba364aa1639b2a65020b4c312ff2d5b058 | [
"Apache-2.0"
] | null | null | null | import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
max_pos_per_sp = 5
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
# This will likely never happen for any farmer with less than 10% of global space
# It's meant to make testnets more stable
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(
f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [
]
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(
int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(
f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(
authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(
f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(
taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(
agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(
f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(
authentication_pk)]
authentication_signature = AugSchemeMPL.sign(
authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate(
[plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(
payload, agg_sig)
post_partial_body = json.dumps(
post_partial_request.to_json_dict())
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append(
(time.time(), pool_state_dict["current_difficulty"]))
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(
f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(
pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append(
(time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(
f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(
sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig,
farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(
sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig,
farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(
self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(
pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed(
"proof", {"proof": request, "passed_filter": True})
msg = make_msg(
ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(
sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(
sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer,
foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(
agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(
agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(
ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(
ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(
f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(
new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(
int(time.time()))
tStart = time.time()
self.farmer.lastChannageTime = int(round(tStart * 1000))
self.farmer.state_changed("new_signage_point", {
"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(
f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash,
full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
timeConsuming = 999
tEnd = time.time()
timeConsuming = int(round(tEnd * 1000)) - self.farmer.lastChannageTime
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
"timeconsuming": timeConsuming,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
| 47.785586 | 118 | 0.558199 | import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
max_pos_per_sp = 5
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(
f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [
]
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(
int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(
f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(
authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(
f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(
taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(
agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(
f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(
authentication_pk)]
authentication_signature = AugSchemeMPL.sign(
authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate(
[plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(
payload, agg_sig)
post_partial_body = json.dumps(
post_partial_request.to_json_dict())
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append(
(time.time(), pool_state_dict["current_difficulty"]))
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(
f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(
pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append(
(time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(
f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(
sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig,
farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, challenge_chain_sp, agg_sig_cc_sp)
farmer_share_rc_sp = AugSchemeMPL.sign(
sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig,
farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(
self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(
pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed(
"proof", {"proof": request, "passed_filter": True})
msg = make_msg(
ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(
sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(
sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer,
foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(
agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(
agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(
ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(
ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(
f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(
new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(
int(time.time()))
tStart = time.time()
self.farmer.lastChannageTime = int(round(tStart * 1000))
self.farmer.state_changed("new_signage_point", {
"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(
f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash,
full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
timeConsuming = 999
tEnd = time.time()
timeConsuming = int(round(tEnd * 1000)) - self.farmer.lastChannageTime
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
"timeconsuming": timeConsuming,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
| true | true |
f73499e3d2fd9b89f3e965e012e39e09413e4f22 | 10,268 | py | Python | objectModel/Python/tests/utilities/projection_test_utils.py | QPC-database/CDM | ef9ffb5fe4b692c76eb03f57b222bf21ee976692 | [
"CC-BY-4.0",
"MIT"
] | 1 | 2021-07-06T17:08:15.000Z | 2021-07-06T17:08:15.000Z | objectModel/Python/tests/utilities/projection_test_utils.py | QPC-database/CDM | ef9ffb5fe4b692c76eb03f57b222bf21ee976692 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/tests/utilities/projection_test_utils.py | QPC-database/CDM | ef9ffb5fe4b692c76eb03f57b222bf21ee976692 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
from typing import List, Optional
from cdm.enums import CdmLogCode, CdmObjectType, CdmStatusLevel
from cdm.utilities import AttributeResolutionDirectiveSet, ResolveOptions
from tests.cdm.projection.attribute_context_util import AttributeContextUtil
from tests.common import TestHelper
shortened_directives = {
'normalized': 'norm',
'referenceOnly': 'refOnly',
'structured': 'struc',
'virtual': 'virt',
}
class ProjectionTestUtils:
"""
Common utility methods for projection tests
If you want to update the expected output txt files for all the tests that are ran,
please set the parameter update_expected_output true in the method validate_attribute_context()
"""
# Path to foundations
foundation_json_path = 'cdm:/foundations.cdm.json'
# The log codes that are allowed to be logged without failing the test
allowed_logs = set([
CdmLogCode.WARN_DEPRECATED_RESOLUTION_GUIDANCE.name
])
@staticmethod
async def get_resolved_entity(corpus: 'CdmCorpusDefinition', input_entity: 'CdmEntityDefinition', directives: List[str]) -> 'CdmEntityDefinition':
"""Resolves an entity"""
ro_hash_set = set(directives)
resolved_entity_name = 'Resolved_{}'.format(input_entity.entity_name)
res_opt = ResolveOptions(input_entity.in_document, directives=AttributeResolutionDirectiveSet(ro_hash_set))
resolved_folder = corpus.storage.fetch_root_folder('output')
resolved_entity = await input_entity.create_resolved_entity_async(resolved_entity_name, res_opt, resolved_folder) # type: CdmEntityDefinition
return resolved_entity
@staticmethod
def get_resolution_option_name_suffix(directives: List[str], expected_output_path = None, entity_name = None) -> str:
"""Returns a suffix that contains the file name and resolution option used"""
file_name_prefix = ''
for directive in directives:
shortened_directive = shortened_directives.get(directive)
if not shortened_directive:
raise Exception('Using unsupported directive')
file_name_prefix = '{}_{}'.format(file_name_prefix, shortened_directive)
file_exists = os.path.exists(os.path.join(expected_output_path, 'AttrCtx_{}{}.txt'.format(entity_name, file_name_prefix))) \
if expected_output_path and entity_name else True
if not file_name_prefix or not file_exists:
file_name_prefix = '_default'
return file_name_prefix
@staticmethod
async def load_entity_for_resolution_option_and_save(test: 'TestCase', corpus: 'CdmCorpusDefinition', test_name: str, tests_subpath: str, entity_name: str, \
directives: List[str], update_expected_output: bool = False) -> 'CdmEntityDefinition':
"""Loads an entity, resolves it, and then validates the generated attribute contexts"""
expected_output_path = TestHelper.get_expected_output_folder_path(tests_subpath, test_name)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name))
test.assertIsNotNone(entity)
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, directives)
test.assertIsNotNone(resolved_entity)
await ProjectionTestUtils.validate_attribute_context(test, directives, expected_output_path, entity_name, resolved_entity, update_expected_output)
return resolved_entity
@staticmethod
def get_local_corpus(tests_subpath: str, test_name: str) -> 'CdmCorpusDefinition':
"""Creates a corpus"""
corpus = TestHelper.get_local_corpus(tests_subpath, test_name)
def callback(level: CdmStatusLevel, message: str):
last_event = corpus.ctx.events[-1]
if not last_event.get('code') or last_event['code'] not in ProjectionTestUtils.allowed_logs:
raise Exception(message)
corpus.set_event_callback(callback, CdmStatusLevel.WARNING)
return corpus
@staticmethod
def create_entity(corpus: 'CdmCorpusDefinition', local_root: 'CdmFolderDefinition') -> 'CdmEntityDefinition':
"""Creates an entity"""
entity_name = 'TestEntity'
entity = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
entity_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_doc.imports.append(ProjectionTestUtils.foundation_json_path)
entity_doc.definitions.append(entity)
local_root.documents.append(entity_doc, entity_doc.name)
return entity
@staticmethod
def create_source_entity(corpus: 'CdmCorpusDefinition', local_root: 'CdmFolderDefinition') -> 'CdmEntityDefinition':
"""Creates a source entity for a projection"""
entity_name = 'SourceEntity'
entity = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
attribute_name1 = 'id'
attribute1 = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attribute_name1)
attribute1.date_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'string', True)
entity.attributes.append(attribute1)
attributeName2 = 'name'
attribute2 = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attributeName2)
attribute2.date_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'string', True)
entity.attributes.append(attribute2)
attributeName3 = 'value'
attribute3 = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attributeName3)
attribute3.date_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'integer', True)
entity.attributes.append(attribute3)
attributeName4 = 'date'
attribute4 = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attributeName4)
attribute4.date_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'date', True)
entity.attributes.append(attribute4)
entity_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_doc.imports.append(ProjectionTestUtils.foundation_json_path)
entity_doc.definitions.append(entity)
local_root.documents.append(entity_doc, entity_doc.name)
return entity
@staticmethod
def create_projection(corpus: 'CdmCorpusDefinition', local_root: 'CdmFolderDefinition') -> 'CdmProjection':
"""Creates a projection"""
# Create an entity reference to use as the source of the projection
projection_source = corpus.make_object(CdmObjectType.ENTITY_REF, None)
projection_source.explicit_reference = ProjectionTestUtils.create_source_entity(corpus, local_root)
# Create the projection
projection = corpus.make_object(CdmObjectType.PROJECTION_DEF)
projection.source = projection_source
return projection
@staticmethod
async def validate_attribute_context(test: 'TestCase', directives: List[str], expected_output_path: str, entity_name: str, resolved_entity: 'CdmEntityDefinition', update_expected_output: Optional[bool] = False):
"""
Validates if the attribute context of the resolved entity matches the expected output.
If update_expected_output is true, will update the expected output txt files for all the tests that are ran.
"""
if not resolved_entity.attribute_context:
raise Exception('ValidateAttributeContext called with not resolved entity.')
file_name_prefix = 'AttrCtx_' + entity_name
file_name_suffix = ProjectionTestUtils.get_resolution_option_name_suffix(directives)
# Get actual text
attr_ctx_util = AttributeContextUtil()
actual_text = attr_ctx_util.get_attribute_context_strings(resolved_entity)
if update_expected_output:
expected_string_file_path = os.path.join(expected_output_path, file_name_prefix + file_name_suffix + '.txt')
if len(directives) > 0:
default_file_name_suffix = ProjectionTestUtils.get_resolution_option_name_suffix([])
default_string_file_path = os.path.join(expected_output_path, file_name_prefix + default_file_name_suffix + '.txt')
if os.path.exists(default_string_file_path):
with open(default_string_file_path) as default_file:
default_text = default_file.read().replace('\r\n', '\n')
else:
default_text = None
if actual_text == default_text:
if os.path.exists(expected_string_file_path):
os.remove(expected_string_file_path)
else:
with open(expected_string_file_path, 'w') as expected_file:
expected_file.write(actual_text)
else:
with open(expected_string_file_path, 'w') as expected_file:
expected_file.write(actual_text)
else:
# Actual
actual_string_file_path = os.path.join(expected_output_path, '..', 'ActualOutput', file_name_prefix + file_name_suffix + '.txt')
# Save Actual AttrCtx_*.txt and Resolved_*.cdm.json
with open(actual_string_file_path, 'w') as expected_file:
expected_file.write(actual_text)
await resolved_entity.in_document.save_as_async(resolved_entity.entity_name + file_name_suffix + '.cdm.json', save_referenced=False)
# Expected
expected_file_name_suffix = ProjectionTestUtils.get_resolution_option_name_suffix(directives, expected_output_path, entity_name)
expected_string_file_path = os.path.join(expected_output_path, file_name_prefix + expected_file_name_suffix + '.txt')
with open(expected_string_file_path) as expected_file:
expected_text = expected_file.read()
# Test if Actual is Equal to Expected
test.assertEqual(expected_text.replace('\r\n', '\n'), actual_text.replace('\r\n', '\n'))
| 48.895238 | 215 | 0.708609 |
import os
from typing import List, Optional
from cdm.enums import CdmLogCode, CdmObjectType, CdmStatusLevel
from cdm.utilities import AttributeResolutionDirectiveSet, ResolveOptions
from tests.cdm.projection.attribute_context_util import AttributeContextUtil
from tests.common import TestHelper
shortened_directives = {
'normalized': 'norm',
'referenceOnly': 'refOnly',
'structured': 'struc',
'virtual': 'virt',
}
class ProjectionTestUtils:
foundation_json_path = 'cdm:/foundations.cdm.json'
allowed_logs = set([
CdmLogCode.WARN_DEPRECATED_RESOLUTION_GUIDANCE.name
])
@staticmethod
async def get_resolved_entity(corpus: 'CdmCorpusDefinition', input_entity: 'CdmEntityDefinition', directives: List[str]) -> 'CdmEntityDefinition':
ro_hash_set = set(directives)
resolved_entity_name = 'Resolved_{}'.format(input_entity.entity_name)
res_opt = ResolveOptions(input_entity.in_document, directives=AttributeResolutionDirectiveSet(ro_hash_set))
resolved_folder = corpus.storage.fetch_root_folder('output')
resolved_entity = await input_entity.create_resolved_entity_async(resolved_entity_name, res_opt, resolved_folder)
return resolved_entity
@staticmethod
def get_resolution_option_name_suffix(directives: List[str], expected_output_path = None, entity_name = None) -> str:
file_name_prefix = ''
for directive in directives:
shortened_directive = shortened_directives.get(directive)
if not shortened_directive:
raise Exception('Using unsupported directive')
file_name_prefix = '{}_{}'.format(file_name_prefix, shortened_directive)
file_exists = os.path.exists(os.path.join(expected_output_path, 'AttrCtx_{}{}.txt'.format(entity_name, file_name_prefix))) \
if expected_output_path and entity_name else True
if not file_name_prefix or not file_exists:
file_name_prefix = '_default'
return file_name_prefix
@staticmethod
async def load_entity_for_resolution_option_and_save(test: 'TestCase', corpus: 'CdmCorpusDefinition', test_name: str, tests_subpath: str, entity_name: str, \
directives: List[str], update_expected_output: bool = False) -> 'CdmEntityDefinition':
expected_output_path = TestHelper.get_expected_output_folder_path(tests_subpath, test_name)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name))
test.assertIsNotNone(entity)
resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, directives)
test.assertIsNotNone(resolved_entity)
await ProjectionTestUtils.validate_attribute_context(test, directives, expected_output_path, entity_name, resolved_entity, update_expected_output)
return resolved_entity
@staticmethod
def get_local_corpus(tests_subpath: str, test_name: str) -> 'CdmCorpusDefinition':
corpus = TestHelper.get_local_corpus(tests_subpath, test_name)
def callback(level: CdmStatusLevel, message: str):
last_event = corpus.ctx.events[-1]
if not last_event.get('code') or last_event['code'] not in ProjectionTestUtils.allowed_logs:
raise Exception(message)
corpus.set_event_callback(callback, CdmStatusLevel.WARNING)
return corpus
@staticmethod
def create_entity(corpus: 'CdmCorpusDefinition', local_root: 'CdmFolderDefinition') -> 'CdmEntityDefinition':
entity_name = 'TestEntity'
entity = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
entity_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_doc.imports.append(ProjectionTestUtils.foundation_json_path)
entity_doc.definitions.append(entity)
local_root.documents.append(entity_doc, entity_doc.name)
return entity
@staticmethod
def create_source_entity(corpus: 'CdmCorpusDefinition', local_root: 'CdmFolderDefinition') -> 'CdmEntityDefinition':
entity_name = 'SourceEntity'
entity = corpus.make_object(CdmObjectType.ENTITY_DEF, entity_name)
attribute_name1 = 'id'
attribute1 = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attribute_name1)
attribute1.date_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'string', True)
entity.attributes.append(attribute1)
attributeName2 = 'name'
attribute2 = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attributeName2)
attribute2.date_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'string', True)
entity.attributes.append(attribute2)
attributeName3 = 'value'
attribute3 = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attributeName3)
attribute3.date_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'integer', True)
entity.attributes.append(attribute3)
attributeName4 = 'date'
attribute4 = corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF, attributeName4)
attribute4.date_type = corpus.make_ref(CdmObjectType.DATA_TYPE_REF, 'date', True)
entity.attributes.append(attribute4)
entity_doc = corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity_name), False)
entity_doc.imports.append(ProjectionTestUtils.foundation_json_path)
entity_doc.definitions.append(entity)
local_root.documents.append(entity_doc, entity_doc.name)
return entity
@staticmethod
def create_projection(corpus: 'CdmCorpusDefinition', local_root: 'CdmFolderDefinition') -> 'CdmProjection':
projection_source = corpus.make_object(CdmObjectType.ENTITY_REF, None)
projection_source.explicit_reference = ProjectionTestUtils.create_source_entity(corpus, local_root)
projection = corpus.make_object(CdmObjectType.PROJECTION_DEF)
projection.source = projection_source
return projection
@staticmethod
async def validate_attribute_context(test: 'TestCase', directives: List[str], expected_output_path: str, entity_name: str, resolved_entity: 'CdmEntityDefinition', update_expected_output: Optional[bool] = False):
if not resolved_entity.attribute_context:
raise Exception('ValidateAttributeContext called with not resolved entity.')
file_name_prefix = 'AttrCtx_' + entity_name
file_name_suffix = ProjectionTestUtils.get_resolution_option_name_suffix(directives)
attr_ctx_util = AttributeContextUtil()
actual_text = attr_ctx_util.get_attribute_context_strings(resolved_entity)
if update_expected_output:
expected_string_file_path = os.path.join(expected_output_path, file_name_prefix + file_name_suffix + '.txt')
if len(directives) > 0:
default_file_name_suffix = ProjectionTestUtils.get_resolution_option_name_suffix([])
default_string_file_path = os.path.join(expected_output_path, file_name_prefix + default_file_name_suffix + '.txt')
if os.path.exists(default_string_file_path):
with open(default_string_file_path) as default_file:
default_text = default_file.read().replace('\r\n', '\n')
else:
default_text = None
if actual_text == default_text:
if os.path.exists(expected_string_file_path):
os.remove(expected_string_file_path)
else:
with open(expected_string_file_path, 'w') as expected_file:
expected_file.write(actual_text)
else:
with open(expected_string_file_path, 'w') as expected_file:
expected_file.write(actual_text)
else:
actual_string_file_path = os.path.join(expected_output_path, '..', 'ActualOutput', file_name_prefix + file_name_suffix + '.txt')
with open(actual_string_file_path, 'w') as expected_file:
expected_file.write(actual_text)
await resolved_entity.in_document.save_as_async(resolved_entity.entity_name + file_name_suffix + '.cdm.json', save_referenced=False)
expected_file_name_suffix = ProjectionTestUtils.get_resolution_option_name_suffix(directives, expected_output_path, entity_name)
expected_string_file_path = os.path.join(expected_output_path, file_name_prefix + expected_file_name_suffix + '.txt')
with open(expected_string_file_path) as expected_file:
expected_text = expected_file.read()
test.assertEqual(expected_text.replace('\r\n', '\n'), actual_text.replace('\r\n', '\n'))
| true | true |
f7349ac7860f0c32e54eef76280f6a9d94e1c7a2 | 181 | py | Python | amocrm_api_client/token_provider/core/TokensBundle.py | iqtek/amocrm_api_client | 910ea42482698f5eb47d6b6e12d52ec09af77a3e | [
"MIT"
] | null | null | null | amocrm_api_client/token_provider/core/TokensBundle.py | iqtek/amocrm_api_client | 910ea42482698f5eb47d6b6e12d52ec09af77a3e | [
"MIT"
] | null | null | null | amocrm_api_client/token_provider/core/TokensBundle.py | iqtek/amocrm_api_client | 910ea42482698f5eb47d6b6e12d52ec09af77a3e | [
"MIT"
] | null | null | null | from dataclasses import dataclass
__all__ = [
"TokensBundle",
]
@dataclass(frozen=True)
class TokensBundle:
access_token: str
refresh_token: str
expires_in: int
| 12.928571 | 33 | 0.712707 | from dataclasses import dataclass
__all__ = [
"TokensBundle",
]
@dataclass(frozen=True)
class TokensBundle:
access_token: str
refresh_token: str
expires_in: int
| true | true |
f7349cbb230ae123ae90e4772c877e8f5dc3d700 | 6,878 | py | Python | pytition/petition/helpers.py | bbmt-bbmt/Pytition | ccfa68d517a8a8b8adb9971e1948f2ab6ece2641 | [
"BSD-3-Clause"
] | 64 | 2019-03-07T15:32:37.000Z | 2022-03-13T19:05:44.000Z | pytition/petition/helpers.py | bbmt-bbmt/Pytition | ccfa68d517a8a8b8adb9971e1948f2ab6ece2641 | [
"BSD-3-Clause"
] | 157 | 2019-02-28T07:32:03.000Z | 2021-09-24T07:07:09.000Z | pytition/petition/helpers.py | bbmt-bbmt/Pytition | ccfa68d517a8a8b8adb9971e1948f2ab6ece2641 | [
"BSD-3-Clause"
] | 33 | 2019-04-14T23:40:33.000Z | 2022-03-25T11:11:05.000Z | import requests
import lxml
from lxml.html.clean import Cleaner
from django.http import Http404, HttpResponseForbidden
from django.conf import settings
from django.urls import reverse
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import get_connection, EmailMultiAlternatives, EmailMessage
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
# Remove all moderated instances of Petition
def remove_user_moderated(petitions):
petitions = [p for p in petitions if not p.is_moderated]
return petitions
# Remove all javascripts from HTML code
def sanitize_html(unsecure_html_content):
cleaner = Cleaner(inline_style=False, scripts=True, javascript=True,
safe_attrs=lxml.html.defs.safe_attrs | set(['style']),
frames=False, embedded=False,
meta=True, links=True, page_structure=True, remove_tags=['body'])
try:
secure_html_content = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(unsecure_html_content)), method="html")
except:
secure_html_content = b''
return secure_html_content.decode()
# Get the client IP address, considering proxies and RP
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
# Get the user of the current session
def get_session_user(request):
from .models import PytitionUser
try:
pytitionuser = PytitionUser.objects.get(user__username=request.user.username)
except User.DoesNotExist:
raise Http404(_("not found"))
return pytitionuser
# Check if an user is in an organization
# FIXME : move this as an org method ?
def check_user_in_orga(user, orga):
if orga not in user.organizations.all():
return HttpResponseForbidden(_("You are not part of this organization"))
return None
# Return a 404 if a petition does not exist
def petition_from_id(id):
from .models import Petition
petition = Petition.by_id(id)
if petition is None:
raise Http404(_("Petition does not exist"))
else:
return petition
# Check if a petition is publicly accessible
def check_petition_is_accessible(request, petition):
if petition.published and not petition.moderated:
return True
if request.user.is_authenticated:
user = get_session_user(request)
if petition.owner_type == "user" and user == petition.owner:
return True
if petition.owner_type == "orga" and user in petition.owner.members:
return True
if petition.moderated:
raise Http404(_("This Petition has been moderated!"))
if not petition.published:
raise Http404(_("This Petition is not published yet!"))
# Get settings
def settings_context_processor(request):
return {'settings': settings}
# Get footer content
def footer_content_processor(request):
footer_content = None
if settings.FOOTER_TEMPLATE:
footer_content = render_to_string(settings.FOOTER_TEMPLATE)
return {'footer_content': footer_content}
# Send Confirmation email
def send_confirmation_email(request, signature):
petition = signature.petition
url = request.build_absolute_uri("/petition/{}/confirm/{}".format(petition.id, signature.confirmation_hash))
html_message = render_to_string("petition/confirmation_email.html", {'firstname': signature.first_name, 'url': url})
message = strip_tags(html_message)
with get_connection() as connection:
msg = EmailMultiAlternatives(_("Confirm your signature to our petition"),
message, to=[signature.email], connection=connection,
reply_to=[petition.confirmation_email_reply])
msg.attach_alternative(html_message, "text/html")
msg.send(fail_silently=False)
# Send welcome mail on account creation
def send_welcome_mail(user_infos):
html_message = render_to_string("registration/confirmation_email.html", user_infos)
message = strip_tags(html_message)
with get_connection() as connection:
msg = EmailMultiAlternatives(_("Account created !"),
message, to=[user_infos["email"]], connection=connection,
reply_to=[settings.DEFAULT_NOREPLY_MAIL])
msg.attach_alternative(html_message, "text/html")
msg.send(fail_silently=False)
# Generate a meta url for the HTML meta property
def petition_detail_meta(request, petition_id):
url = "{scheme}://{host}{petition_path}".format(
scheme=request.scheme,
host=request.get_host(),
petition_path=reverse('detail', args=[petition_id]))
return {'site_url': request.get_host(), 'petition_url': url}
def subscribe_to_newsletter(petition, email):
if petition.newsletter_subscribe_method in ["POST", "GET"]:
if petition.newsletter_subscribe_http_url == '':
return
data = petition.newsletter_subscribe_http_data
if data == '' or data is None:
data = {}
else:
import json
data = data.replace("'", "\"")
data = json.loads(data)
if petition.newsletter_subscribe_http_mailfield != '':
data[petition.newsletter_subscribe_http_mailfield] = email
if petition.newsletter_subscribe_method == "POST":
requests.post(petition.newsletter_subscribe_http_url, data)
elif petition.newsletter_subscribe_method == "GET":
requests.get(petition.newsletter_subscribe_http_url, data)
elif petition.newsletter_subscribe_method == "MAIL":
with get_connection(host=petition.newsletter_subscribe_mail_smtp_host,
port=petition.newsletter_subscribe_mail_smtp_port,
username=petition.newsletter_subscribe_mail_smtp_user,
password=petition.newsletter_subscribe_mail_smtp_password,
use_ssl=petition.newsletter_subscribe_mail_smtp_tls,
use_tls=petition.newsletter_subscribe_mail_smtp_starttls) as connection:
EmailMessage(petition.newsletter_subscribe_mail_subject.format(email), "",
petition.newsletter_subscribe_mail_from, [petition.newsletter_subscribe_mail_to],
connection=connection).send(fail_silently=True)
def get_update_form(user, data=None):
from .forms import UpdateInfoForm
if not data:
_data = {
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email
}
else:
_data = data
return UpdateInfoForm(user, _data)
| 41.185629 | 128 | 0.692207 | import requests
import lxml
from lxml.html.clean import Cleaner
from django.http import Http404, HttpResponseForbidden
from django.conf import settings
from django.urls import reverse
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import get_connection, EmailMultiAlternatives, EmailMessage
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
def remove_user_moderated(petitions):
petitions = [p for p in petitions if not p.is_moderated]
return petitions
def sanitize_html(unsecure_html_content):
cleaner = Cleaner(inline_style=False, scripts=True, javascript=True,
safe_attrs=lxml.html.defs.safe_attrs | set(['style']),
frames=False, embedded=False,
meta=True, links=True, page_structure=True, remove_tags=['body'])
try:
secure_html_content = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(unsecure_html_content)), method="html")
except:
secure_html_content = b''
return secure_html_content.decode()
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_session_user(request):
from .models import PytitionUser
try:
pytitionuser = PytitionUser.objects.get(user__username=request.user.username)
except User.DoesNotExist:
raise Http404(_("not found"))
return pytitionuser
def check_user_in_orga(user, orga):
if orga not in user.organizations.all():
return HttpResponseForbidden(_("You are not part of this organization"))
return None
def petition_from_id(id):
from .models import Petition
petition = Petition.by_id(id)
if petition is None:
raise Http404(_("Petition does not exist"))
else:
return petition
def check_petition_is_accessible(request, petition):
if petition.published and not petition.moderated:
return True
if request.user.is_authenticated:
user = get_session_user(request)
if petition.owner_type == "user" and user == petition.owner:
return True
if petition.owner_type == "orga" and user in petition.owner.members:
return True
if petition.moderated:
raise Http404(_("This Petition has been moderated!"))
if not petition.published:
raise Http404(_("This Petition is not published yet!"))
def settings_context_processor(request):
return {'settings': settings}
def footer_content_processor(request):
footer_content = None
if settings.FOOTER_TEMPLATE:
footer_content = render_to_string(settings.FOOTER_TEMPLATE)
return {'footer_content': footer_content}
def send_confirmation_email(request, signature):
petition = signature.petition
url = request.build_absolute_uri("/petition/{}/confirm/{}".format(petition.id, signature.confirmation_hash))
html_message = render_to_string("petition/confirmation_email.html", {'firstname': signature.first_name, 'url': url})
message = strip_tags(html_message)
with get_connection() as connection:
msg = EmailMultiAlternatives(_("Confirm your signature to our petition"),
message, to=[signature.email], connection=connection,
reply_to=[petition.confirmation_email_reply])
msg.attach_alternative(html_message, "text/html")
msg.send(fail_silently=False)
def send_welcome_mail(user_infos):
html_message = render_to_string("registration/confirmation_email.html", user_infos)
message = strip_tags(html_message)
with get_connection() as connection:
msg = EmailMultiAlternatives(_("Account created !"),
message, to=[user_infos["email"]], connection=connection,
reply_to=[settings.DEFAULT_NOREPLY_MAIL])
msg.attach_alternative(html_message, "text/html")
msg.send(fail_silently=False)
def petition_detail_meta(request, petition_id):
url = "{scheme}://{host}{petition_path}".format(
scheme=request.scheme,
host=request.get_host(),
petition_path=reverse('detail', args=[petition_id]))
return {'site_url': request.get_host(), 'petition_url': url}
def subscribe_to_newsletter(petition, email):
if petition.newsletter_subscribe_method in ["POST", "GET"]:
if petition.newsletter_subscribe_http_url == '':
return
data = petition.newsletter_subscribe_http_data
if data == '' or data is None:
data = {}
else:
import json
data = data.replace("'", "\"")
data = json.loads(data)
if petition.newsletter_subscribe_http_mailfield != '':
data[petition.newsletter_subscribe_http_mailfield] = email
if petition.newsletter_subscribe_method == "POST":
requests.post(petition.newsletter_subscribe_http_url, data)
elif petition.newsletter_subscribe_method == "GET":
requests.get(petition.newsletter_subscribe_http_url, data)
elif petition.newsletter_subscribe_method == "MAIL":
with get_connection(host=petition.newsletter_subscribe_mail_smtp_host,
port=petition.newsletter_subscribe_mail_smtp_port,
username=petition.newsletter_subscribe_mail_smtp_user,
password=petition.newsletter_subscribe_mail_smtp_password,
use_ssl=petition.newsletter_subscribe_mail_smtp_tls,
use_tls=petition.newsletter_subscribe_mail_smtp_starttls) as connection:
EmailMessage(petition.newsletter_subscribe_mail_subject.format(email), "",
petition.newsletter_subscribe_mail_from, [petition.newsletter_subscribe_mail_to],
connection=connection).send(fail_silently=True)
def get_update_form(user, data=None):
from .forms import UpdateInfoForm
if not data:
_data = {
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email
}
else:
_data = data
return UpdateInfoForm(user, _data)
| true | true |
f7349dc08557faf1ca4d990092a7aaa988dd1752 | 3,342 | py | Python | naslib/search_spaces/nasbench1shot1/search_spaces/search_space_1.py | az2104nas/sztnb302alsr2bs21on | 6084c82c59a4a89498a191d96c231f47df10317d | [
"Apache-2.0"
] | null | null | null | naslib/search_spaces/nasbench1shot1/search_spaces/search_space_1.py | az2104nas/sztnb302alsr2bs21on | 6084c82c59a4a89498a191d96c231f47df10317d | [
"Apache-2.0"
] | 4 | 2021-06-08T21:32:32.000Z | 2022-03-12T00:29:33.000Z | naslib/search_spaces/nasbench1shot1/search_spaces/search_space_1.py | az2104nas/sztnb302alsr2bs21on | 6084c82c59a4a89498a191d96c231f47df10317d | [
"Apache-2.0"
] | null | null | null | import itertools
import matplotlib.pyplot as plt
import numpy as np
from nasbench import api
from naslib.search_spaces.nasbench1shot1.search_space import SearchSpace
from naslib.search_spaces.nasbench1shot1.utils import upscale_to_nasbench_format, OUTPUT_NODE, INPUT, CONV1X1, OUTPUT
from naslib.search_spaces.nasbench1shot1.wrappers import Model, Architecture, NasbenchWrapper
class SearchSpace1(SearchSpace):
def __init__(self):
super(SearchSpace1, self).__init__(search_space_number=1, num_intermediate_nodes=4)
"""
SEARCH SPACE 1
"""
self.num_parents_per_node = {
'0': 0,
'1': 1,
'2': 2,
'3': 2,
'4': 2,
'5': 2
}
if sum(self.num_parents_per_node.values()) > 9:
raise ValueError('Each nasbench cell has at most 9 edges.')
self.test_min_error = 0.05448716878890991
self.valid_min_error = 0.049278855323791504
def create_nasbench_adjacency_matrix(self, parents, **kwargs):
adjacency_matrix = self._create_adjacency_matrix(parents, adjacency_matrix=np.zeros([6, 6]),
node=OUTPUT_NODE - 1)
# Create nasbench compatible adjacency matrix
return upscale_to_nasbench_format(adjacency_matrix)
def create_nasbench_adjacency_matrix_with_loose_ends(self, parents):
return upscale_to_nasbench_format(self._create_adjacency_matrix_with_loose_ends(parents))
def generate_adjacency_matrix_without_loose_ends(self):
for adjacency_matrix in self._generate_adjacency_matrix(adjacency_matrix=np.zeros([6, 6]),
node=OUTPUT_NODE - 1):
yield upscale_to_nasbench_format(adjacency_matrix)
def objective_function(self, nasbench, config, budget=108):
adjacency_matrix, node_list = super(SearchSpace1, self).convert_config_to_nasbench_format(config)
# adjacency_matrix = upscale_to_nasbench_format(adjacency_matrix)
node_list = [INPUT, *node_list, CONV1X1, OUTPUT]
adjacency_list = adjacency_matrix.astype(np.int).tolist()
model_spec = api.ModelSpec(matrix=adjacency_list, ops=node_list)
nasbench_data = nasbench.query(model_spec, epochs=budget)
# record the data to history
architecture = Model()
arch = Architecture(adjacency_matrix=adjacency_matrix,
node_list=node_list)
architecture.update_data(arch, nasbench_data, budget)
self.run_history.append(architecture)
return nasbench_data['validation_accuracy'], nasbench_data['training_time']
def generate_with_loose_ends(self):
for _, parent_node_3, parent_node_4, output_parents in itertools.product(
*[itertools.combinations(list(range(int(node))), num_parents) for node, num_parents in
self.num_parents_per_node.items()][2:]):
parents = {
'0': [],
'1': [0],
'2': [0, 1],
'3': parent_node_3,
'4': parent_node_4,
'5': output_parents
}
adjacency_matrix = self.create_nasbench_adjacency_matrix_with_loose_ends(parents)
yield adjacency_matrix
| 42.846154 | 117 | 0.652902 | import itertools
import matplotlib.pyplot as plt
import numpy as np
from nasbench import api
from naslib.search_spaces.nasbench1shot1.search_space import SearchSpace
from naslib.search_spaces.nasbench1shot1.utils import upscale_to_nasbench_format, OUTPUT_NODE, INPUT, CONV1X1, OUTPUT
from naslib.search_spaces.nasbench1shot1.wrappers import Model, Architecture, NasbenchWrapper
class SearchSpace1(SearchSpace):
def __init__(self):
super(SearchSpace1, self).__init__(search_space_number=1, num_intermediate_nodes=4)
self.num_parents_per_node = {
'0': 0,
'1': 1,
'2': 2,
'3': 2,
'4': 2,
'5': 2
}
if sum(self.num_parents_per_node.values()) > 9:
raise ValueError('Each nasbench cell has at most 9 edges.')
self.test_min_error = 0.05448716878890991
self.valid_min_error = 0.049278855323791504
def create_nasbench_adjacency_matrix(self, parents, **kwargs):
adjacency_matrix = self._create_adjacency_matrix(parents, adjacency_matrix=np.zeros([6, 6]),
node=OUTPUT_NODE - 1)
return upscale_to_nasbench_format(adjacency_matrix)
def create_nasbench_adjacency_matrix_with_loose_ends(self, parents):
return upscale_to_nasbench_format(self._create_adjacency_matrix_with_loose_ends(parents))
def generate_adjacency_matrix_without_loose_ends(self):
for adjacency_matrix in self._generate_adjacency_matrix(adjacency_matrix=np.zeros([6, 6]),
node=OUTPUT_NODE - 1):
yield upscale_to_nasbench_format(adjacency_matrix)
def objective_function(self, nasbench, config, budget=108):
adjacency_matrix, node_list = super(SearchSpace1, self).convert_config_to_nasbench_format(config)
node_list = [INPUT, *node_list, CONV1X1, OUTPUT]
adjacency_list = adjacency_matrix.astype(np.int).tolist()
model_spec = api.ModelSpec(matrix=adjacency_list, ops=node_list)
nasbench_data = nasbench.query(model_spec, epochs=budget)
architecture = Model()
arch = Architecture(adjacency_matrix=adjacency_matrix,
node_list=node_list)
architecture.update_data(arch, nasbench_data, budget)
self.run_history.append(architecture)
return nasbench_data['validation_accuracy'], nasbench_data['training_time']
def generate_with_loose_ends(self):
for _, parent_node_3, parent_node_4, output_parents in itertools.product(
*[itertools.combinations(list(range(int(node))), num_parents) for node, num_parents in
self.num_parents_per_node.items()][2:]):
parents = {
'0': [],
'1': [0],
'2': [0, 1],
'3': parent_node_3,
'4': parent_node_4,
'5': output_parents
}
adjacency_matrix = self.create_nasbench_adjacency_matrix_with_loose_ends(parents)
yield adjacency_matrix
| true | true |
f7349e0eff85b16a75f6ae2e63116d3777b41f8f | 40,035 | py | Python | intersight/api/ntp_api.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/api/ntp_api.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/api/ntp_api.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.api_client import ApiClient, Endpoint as _Endpoint
from intersight.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from intersight.model.error import Error
from intersight.model.ntp_policy import NtpPolicy
from intersight.model.ntp_policy_response import NtpPolicyResponse
class NtpApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_ntp_policy(
self,
ntp_policy,
**kwargs
):
"""Create a 'ntp.Policy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_ntp_policy(ntp_policy, async_req=True)
>>> result = thread.get()
Args:
ntp_policy (NtpPolicy): The 'ntp.Policy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
NtpPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['ntp_policy'] = \
ntp_policy
return self.call_with_http_info(**kwargs)
self.create_ntp_policy = _Endpoint(
settings={
'response_type': (NtpPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies',
'operation_id': 'create_ntp_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'ntp_policy',
'if_match',
'if_none_match',
],
'required': [
'ntp_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'ntp_policy':
(NtpPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'ntp_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_ntp_policy
)
def __delete_ntp_policy(
self,
moid,
**kwargs
):
"""Delete a 'ntp.Policy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ntp_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_ntp_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies/{Moid}',
'operation_id': 'delete_ntp_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_ntp_policy
)
def __get_ntp_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'ntp.Policy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ntp_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
NtpPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_ntp_policy_by_moid = _Endpoint(
settings={
'response_type': (NtpPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies/{Moid}',
'operation_id': 'get_ntp_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_ntp_policy_by_moid
)
def __get_ntp_policy_list(
self,
**kwargs
):
"""Read a 'ntp.Policy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ntp_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
NtpPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_ntp_policy_list = _Endpoint(
settings={
'response_type': (NtpPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies',
'operation_id': 'get_ntp_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_ntp_policy_list
)
def __patch_ntp_policy(
self,
moid,
ntp_policy,
**kwargs
):
"""Update a 'ntp.Policy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_ntp_policy(moid, ntp_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
ntp_policy (NtpPolicy): The 'ntp.Policy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
NtpPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['ntp_policy'] = \
ntp_policy
return self.call_with_http_info(**kwargs)
self.patch_ntp_policy = _Endpoint(
settings={
'response_type': (NtpPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies/{Moid}',
'operation_id': 'patch_ntp_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'ntp_policy',
'if_match',
],
'required': [
'moid',
'ntp_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'ntp_policy':
(NtpPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'ntp_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_ntp_policy
)
def __update_ntp_policy(
self,
moid,
ntp_policy,
**kwargs
):
"""Update a 'ntp.Policy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_ntp_policy(moid, ntp_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
ntp_policy (NtpPolicy): The 'ntp.Policy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
NtpPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['ntp_policy'] = \
ntp_policy
return self.call_with_http_info(**kwargs)
self.update_ntp_policy = _Endpoint(
settings={
'response_type': (NtpPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies/{Moid}',
'operation_id': 'update_ntp_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'ntp_policy',
'if_match',
],
'required': [
'moid',
'ntp_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'ntp_policy':
(NtpPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'ntp_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_ntp_policy
)
| 45.494318 | 1,678 | 0.515524 |
import re
import sys
from intersight.api_client import ApiClient, Endpoint as _Endpoint
from intersight.model_utils import (
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from intersight.model.error import Error
from intersight.model.ntp_policy import NtpPolicy
from intersight.model.ntp_policy_response import NtpPolicyResponse
class NtpApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_ntp_policy(
self,
ntp_policy,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['ntp_policy'] = \
ntp_policy
return self.call_with_http_info(**kwargs)
self.create_ntp_policy = _Endpoint(
settings={
'response_type': (NtpPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies',
'operation_id': 'create_ntp_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'ntp_policy',
'if_match',
'if_none_match',
],
'required': [
'ntp_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'ntp_policy':
(NtpPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'ntp_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_ntp_policy
)
def __delete_ntp_policy(
self,
moid,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_ntp_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies/{Moid}',
'operation_id': 'delete_ntp_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_ntp_policy
)
def __get_ntp_policy_by_moid(
self,
moid,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_ntp_policy_by_moid = _Endpoint(
settings={
'response_type': (NtpPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies/{Moid}',
'operation_id': 'get_ntp_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_ntp_policy_by_moid
)
def __get_ntp_policy_list(
self,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_ntp_policy_list = _Endpoint(
settings={
'response_type': (NtpPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies',
'operation_id': 'get_ntp_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_ntp_policy_list
)
def __patch_ntp_policy(
self,
moid,
ntp_policy,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['ntp_policy'] = \
ntp_policy
return self.call_with_http_info(**kwargs)
self.patch_ntp_policy = _Endpoint(
settings={
'response_type': (NtpPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies/{Moid}',
'operation_id': 'patch_ntp_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'ntp_policy',
'if_match',
],
'required': [
'moid',
'ntp_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'ntp_policy':
(NtpPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'ntp_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_ntp_policy
)
def __update_ntp_policy(
self,
moid,
ntp_policy,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['ntp_policy'] = \
ntp_policy
return self.call_with_http_info(**kwargs)
self.update_ntp_policy = _Endpoint(
settings={
'response_type': (NtpPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/ntp/Policies/{Moid}',
'operation_id': 'update_ntp_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'ntp_policy',
'if_match',
],
'required': [
'moid',
'ntp_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'ntp_policy':
(NtpPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'ntp_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_ntp_policy
)
| true | true |
f7349e1f31924a93f2969323365e9e84bd75b01f | 215 | py | Python | serverquest/__main__.py | paulomaateus/quest-backend | 96c5cdfd1e967e0c1702dcd0ec97a1dfbe8d7ba9 | [
"MIT"
] | null | null | null | serverquest/__main__.py | paulomaateus/quest-backend | 96c5cdfd1e967e0c1702dcd0ec97a1dfbe8d7ba9 | [
"MIT"
] | null | null | null | serverquest/__main__.py | paulomaateus/quest-backend | 96c5cdfd1e967e0c1702dcd0ec97a1dfbe8d7ba9 | [
"MIT"
] | null | null | null | from urllib.parse import urlparse
from . app import app
from . sqlite import setup_bd
url = urlparse('http://0.0.0.0:8000')
host, port = url.hostname, url.port
setup_bd()
app.run(host=host, port=port, debug=True)
| 21.5 | 41 | 0.730233 | from urllib.parse import urlparse
from . app import app
from . sqlite import setup_bd
url = urlparse('http://0.0.0.0:8000')
host, port = url.hostname, url.port
setup_bd()
app.run(host=host, port=port, debug=True)
| true | true |
f7349ece7e2828de32c74fa1e7bd0cd3f51274ad | 2,227 | py | Python | apispec/compat.py | theirix/apispec-py35-compat | 5aead501eb19997f8483192b0e4e4ac0f6ad9398 | [
"MIT"
] | null | null | null | apispec/compat.py | theirix/apispec-py35-compat | 5aead501eb19997f8483192b0e4e4ac0f6ad9398 | [
"MIT"
] | null | null | null | apispec/compat.py | theirix/apispec-py35-compat | 5aead501eb19997f8483192b0e4e4ac0f6ad9398 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import itertools
import functools
import inspect
PY2 = int(sys.version_info[0]) == 2
PY26 = PY2 and int(sys.version_info[1]) < 7
if PY2:
import urlparse
urlparse = urlparse
text_type = unicode
binary_type = str
string_types = (str, unicode)
unicode = unicode
basestring = basestring
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
zip_longest = itertools.izip_longest
if PY26:
from .ordereddict import OrderedDict
else:
from collections import OrderedDict
OrderedDict = OrderedDict
def get_func_args(func):
if isinstance(func, functools.partial):
return list(inspect.getargspec(func.func).args)
if inspect.isfunction(func) or inspect.ismethod(func):
return list(inspect.getargspec(func).args)
if callable(func):
return list(inspect.getargspec(func.__call__).args)
else:
import urllib.parse
urlparse = urllib.parse
text_type = str
binary_type = bytes
string_types = (str,)
unicode = str
basestring = (str, bytes)
iterkeys = lambda d: d.keys()
itervalues = lambda d: d.values()
iteritems = lambda d: d.items()
zip_longest = itertools.zip_longest
from collections import OrderedDict
OrderedDict = OrderedDict
def get_func_args(func):
if isinstance(func, functools.partial):
return list(inspect.signature(func.func).parameters)
if inspect.isfunction(func):
return list(inspect.signature(func).parameters)
if callable(func) or inspect.ismethod(func):
return ['self'] + list(inspect.signature(func.__call__).parameters)
# From six
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta): # noqa
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
| 32.275362 | 79 | 0.667265 |
import sys
import itertools
import functools
import inspect
PY2 = int(sys.version_info[0]) == 2
PY26 = PY2 and int(sys.version_info[1]) < 7
if PY2:
import urlparse
urlparse = urlparse
text_type = unicode
binary_type = str
string_types = (str, unicode)
unicode = unicode
basestring = basestring
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
zip_longest = itertools.izip_longest
if PY26:
from .ordereddict import OrderedDict
else:
from collections import OrderedDict
OrderedDict = OrderedDict
def get_func_args(func):
if isinstance(func, functools.partial):
return list(inspect.getargspec(func.func).args)
if inspect.isfunction(func) or inspect.ismethod(func):
return list(inspect.getargspec(func).args)
if callable(func):
return list(inspect.getargspec(func.__call__).args)
else:
import urllib.parse
urlparse = urllib.parse
text_type = str
binary_type = bytes
string_types = (str,)
unicode = str
basestring = (str, bytes)
iterkeys = lambda d: d.keys()
itervalues = lambda d: d.values()
iteritems = lambda d: d.items()
zip_longest = itertools.zip_longest
from collections import OrderedDict
OrderedDict = OrderedDict
def get_func_args(func):
if isinstance(func, functools.partial):
return list(inspect.signature(func.func).parameters)
if inspect.isfunction(func):
return list(inspect.signature(func).parameters)
if callable(func) or inspect.ismethod(func):
return ['self'] + list(inspect.signature(func.__call__).parameters)
def with_metaclass(meta, *bases):
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
| true | true |
f734a025203a7f4ca05ab44024b66da56788db5b | 1,357 | py | Python | algorithms/leetcode/contest/83/829.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | 1 | 2019-11-25T07:23:42.000Z | 2019-11-25T07:23:42.000Z | algorithms/leetcode/contest/83/829.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | 13 | 2020-01-07T16:09:47.000Z | 2022-03-02T12:51:44.000Z | algorithms/leetcode/contest/83/829.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
829. 连续整数求和
给定一个正整数 N,试求有多少组连续正整数满足所有数字之和为 N?
示例 1:
输入: 5
输出: 2
解释: 5 = 5 = 2 + 3,共有两组连续整数([5],[2,3])求和后为 5。
示例 2:
输入: 9
输出: 3
解释: 9 = 9 = 4 + 5 = 2 + 3 + 4
示例 3:
输入: 15
输出: 4
解释: 15 = 15 = 8 + 7 = 4 + 5 + 6 = 1 + 2 + 3 + 4 + 5
middle_ret肯定为有理数,否则不存在
N middle(数字个数) middle_ret(差不多为中间数字或者大1或者小1)
15 1 15 必须有
15 3 5 整除,中间数字为5, 5 >= (3-1)/2, 可以
15 5 3 整除,中间数字为3, 3 >= (5-1)/2, 可以
15 7 2.1xx 非整除,肯定不可以
15 2 7.5 可以 7 + 8
15 4 3.75, 3或者4,
15 6 2.5,2或者3
说明: 1 <= N <= 10 ^ 9
"""
import unittest
class Solution:
def consecutiveNumbersSum(self, N):
"""
:param N: int
:return:
"""
ret = 1
# 1.从1开始整除,如果可以有连续的结果,那么肯定为奇数或者偶数个整数相加得到
# 2.如果最终结果为奇数个数相加,则中间的整数middle能被整除,且整除结果middle_ret肯定不小于 (middle-1)/2
# 3.如果最终结果为偶数个数相加,则
for i in range(0, ):
pass
return ret
class TestDemo(unittest.TestCase):
def setUp(self):
self.solution = Solution().consecutiveNumbersSum
def test1(self):
self.assertEqual(2, self.solution(5))
def test2(self):
self.assertEqual(3, self.solution(9))
def test3(self):
self.assertEqual(4, self.solution(15))
if __name__ == '__main__':
unittest.main()
| 17.177215 | 76 | 0.553427 |
import unittest
class Solution:
def consecutiveNumbersSum(self, N):
ret = 1
for i in range(0, ):
pass
return ret
class TestDemo(unittest.TestCase):
def setUp(self):
self.solution = Solution().consecutiveNumbersSum
def test1(self):
self.assertEqual(2, self.solution(5))
def test2(self):
self.assertEqual(3, self.solution(9))
def test3(self):
self.assertEqual(4, self.solution(15))
if __name__ == '__main__':
unittest.main()
| true | true |
f734a0f335eab1452a47e6cc480befd545d92da6 | 1,921 | py | Python | doc/source/conf.py | Sushil-Thapa/rectif.ai | b308f613402097dca9734806a8c27ba3eef6a358 | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | Sushil-Thapa/rectif.ai | b308f613402097dca9734806a8c27ba3eef6a358 | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | Sushil-Thapa/rectif.ai | b308f613402097dca9734806a8c27ba3eef6a358 | [
"Apache-2.0"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.setrecursionlimit(1500)
# -- Project information -----------------------------------------------------
project = 'Rectif.ai'
copyright = '2019, Sushil Thapa'
author = 'Sushil Thapa'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 33.701754 | 79 | 0.664758 |
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.setrecursionlimit(1500)
project = 'Rectif.ai'
copyright = '2019, Sushil Thapa'
author = 'Sushil Thapa'
release = '0.1'
extensions = [
]
templates_path = ['_templates']
exclude_patterns = []
html_theme = 'alabaster'
html_static_path = ['_static'] | true | true |
f734a1ebae840c7bfd5231e3e270bc9608b6839a | 3,454 | py | Python | mvdcim/mvdcim.py | rstms/mvdcim | 52b5a6849e393eeb749e82af2f7d42a08e8607f1 | [
"MIT"
] | null | null | null | mvdcim/mvdcim.py | rstms/mvdcim | 52b5a6849e393eeb749e82af2f7d42a08e8607f1 | [
"MIT"
] | null | null | null | mvdcim/mvdcim.py | rstms/mvdcim | 52b5a6849e393eeb749e82af2f7d42a08e8607f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from arrow import Arrow
from pathlib import Path
from os import environ
from subprocess import run
from tempfile import mkdtemp
from shutil import rmtree
from pprint import pprint
class DCIM():
def __init__(self, device=None, source=None, target=None, confirm=False, date_path=True):
self.device=device or environ['DCIM_DEVICE']
self.source=source or environ['DCIM_SOURCE']
self.target = str(self.make_target_path(target or environ['DCIM_TARGET'], date_path))
self.confirm=confirm
def __enter__(self):
self.temp_dir = Path(mkdtemp())
return self
def __exit__(self, etype, value, tb):
rmtree(str(self.temp_dir))
return False
def output(self, argv):
p = run(argv, capture_output=True, universal_newlines=True)
lines = [l for l in p.stdout.split('\n') if l]
return lines
def call(self, args):
return run(args)
def rsync(self, args=[], target=True):
argv = ['rsync']
argv.extend(args)
argv.append(self.device+':'+self.source)
if target:
argv.append(self.target)
return argv
def ssh(self, cmd='uname', source=True):
if source:
_cmd = f"cd {self.source};{cmd}"
else:
_cmd = cmd
return ['ssh', self.device, _cmd]
def get_source_files(self):
source_files = self.output(self.rsync(['-az', '--list-only'], target=False))
self.write_output_file(source_files, 'source_files')
return source_files
def copy_files(self):
return self.call(self.rsync(['-az','--info=progress2']))
def get_file_list(self):
files = self.output(self.ssh('find . -type f'))
self.list_file = self.write_output_file(files, 'file_list')
return files
def write_output_file(self, lines, filename):
outpath = self.temp_dir / filename
outpath.write_text('\n'.join(lines))
return str(outpath)
def get_source_checksums(self):
file_list = self.get_file_list()
checksums = []
for pathname in file_list:
checksums.append(self.get_md5_checksum(pathname))
self.checksum_file = self.write_output_file(checksums, 'checksums')
return checksums
def get_md5_checksum(self, pathname):
return self.output(self.ssh(f'md5sum {pathname}'))[0]
def verify_target_checksums(self):
ret=[]
self.get_source_checksums()
p = run(['md5sum', '-c', self.checksum_file], cwd=self.target, universal_newlines=True, capture_output=True)
if p.returncode==0:
for line in str(p.stdout).split('\n'):
if not line:
continue
if not line.endswith(': OK'):
raise RuntimeError(f'Target Checksum Verify failed: {line}')
else:
ret.append(line[:-4])
return ret
else:
print(p.stderr)
return False
def make_target_path(self, target, date_path):
target = Path(target)
if not target.exists():
target.mkdir()
if date_path:
target = target / Arrow.now().date().strftime('%Y-%m-%d-DCIM')
if not target.exists():
target.mkdir()
return target
def delete_source(self, pathname):
return self.call(self.ssh(f"rm {pathname}"))
| 31.981481 | 116 | 0.601332 |
from arrow import Arrow
from pathlib import Path
from os import environ
from subprocess import run
from tempfile import mkdtemp
from shutil import rmtree
from pprint import pprint
class DCIM():
def __init__(self, device=None, source=None, target=None, confirm=False, date_path=True):
self.device=device or environ['DCIM_DEVICE']
self.source=source or environ['DCIM_SOURCE']
self.target = str(self.make_target_path(target or environ['DCIM_TARGET'], date_path))
self.confirm=confirm
def __enter__(self):
self.temp_dir = Path(mkdtemp())
return self
def __exit__(self, etype, value, tb):
rmtree(str(self.temp_dir))
return False
def output(self, argv):
p = run(argv, capture_output=True, universal_newlines=True)
lines = [l for l in p.stdout.split('\n') if l]
return lines
def call(self, args):
return run(args)
def rsync(self, args=[], target=True):
argv = ['rsync']
argv.extend(args)
argv.append(self.device+':'+self.source)
if target:
argv.append(self.target)
return argv
def ssh(self, cmd='uname', source=True):
if source:
_cmd = f"cd {self.source};{cmd}"
else:
_cmd = cmd
return ['ssh', self.device, _cmd]
def get_source_files(self):
source_files = self.output(self.rsync(['-az', '--list-only'], target=False))
self.write_output_file(source_files, 'source_files')
return source_files
def copy_files(self):
return self.call(self.rsync(['-az','--info=progress2']))
def get_file_list(self):
files = self.output(self.ssh('find . -type f'))
self.list_file = self.write_output_file(files, 'file_list')
return files
def write_output_file(self, lines, filename):
outpath = self.temp_dir / filename
outpath.write_text('\n'.join(lines))
return str(outpath)
def get_source_checksums(self):
file_list = self.get_file_list()
checksums = []
for pathname in file_list:
checksums.append(self.get_md5_checksum(pathname))
self.checksum_file = self.write_output_file(checksums, 'checksums')
return checksums
def get_md5_checksum(self, pathname):
return self.output(self.ssh(f'md5sum {pathname}'))[0]
def verify_target_checksums(self):
ret=[]
self.get_source_checksums()
p = run(['md5sum', '-c', self.checksum_file], cwd=self.target, universal_newlines=True, capture_output=True)
if p.returncode==0:
for line in str(p.stdout).split('\n'):
if not line:
continue
if not line.endswith(': OK'):
raise RuntimeError(f'Target Checksum Verify failed: {line}')
else:
ret.append(line[:-4])
return ret
else:
print(p.stderr)
return False
def make_target_path(self, target, date_path):
target = Path(target)
if not target.exists():
target.mkdir()
if date_path:
target = target / Arrow.now().date().strftime('%Y-%m-%d-DCIM')
if not target.exists():
target.mkdir()
return target
def delete_source(self, pathname):
return self.call(self.ssh(f"rm {pathname}"))
| true | true |
f734a2b46083402b4cce7977cf19d6a06b6757d4 | 431 | py | Python | src/helpers/diagnostic_helpers.py | Freudenberger/NeuralNetworks | 5390cdb7b0fe99487b12a95e32f0aa5954239b99 | [
"MIT"
] | null | null | null | src/helpers/diagnostic_helpers.py | Freudenberger/NeuralNetworks | 5390cdb7b0fe99487b12a95e32f0aa5954239b99 | [
"MIT"
] | null | null | null | src/helpers/diagnostic_helpers.py | Freudenberger/NeuralNetworks | 5390cdb7b0fe99487b12a95e32f0aa5954239b99 | [
"MIT"
] | null | null | null | import time
import helper
def time_measure(func):
"""
Decorator function to measure time
"""
def inner(*args_, **kwargs_):
"""
args_ contains: [team_id, ...]
"""
t0_ = time.time()
output = func(*args_, **kwargs_)
print("[{0}] Execution time of '{1}': {2} [s]".format(helper.get_date_time(), func.__name__, time.time() - t0_))
return output
return inner
| 23.944444 | 120 | 0.554524 | import time
import helper
def time_measure(func):
def inner(*args_, **kwargs_):
t0_ = time.time()
output = func(*args_, **kwargs_)
print("[{0}] Execution time of '{1}': {2} [s]".format(helper.get_date_time(), func.__name__, time.time() - t0_))
return output
return inner
| true | true |
f734a3c3651d6e1ef3e606899db9702c59c70e07 | 13,974 | py | Python | google/cloud/vision_v1p2beta1/services/image_annotator/transports/grpc.py | danielcressman/python-vision | ca56a71bda813ae6cfa386322e80a3e36852c3ef | [
"Apache-2.0"
] | null | null | null | google/cloud/vision_v1p2beta1/services/image_annotator/transports/grpc.py | danielcressman/python-vision | ca56a71bda813ae6cfa386322e80a3e36852c3ef | [
"Apache-2.0"
] | null | null | null | google/cloud/vision_v1p2beta1/services/image_annotator/transports/grpc.py | danielcressman/python-vision | ca56a71bda813ae6cfa386322e80a3e36852c3ef | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.vision_v1p2beta1.types import image_annotator
from google.longrunning import operations_pb2 # type: ignore
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
class ImageAnnotatorGrpcTransport(ImageAnnotatorTransport):
"""gRPC backend transport for ImageAnnotator.
Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def batch_annotate_images(
self,
) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
image_annotator.BatchAnnotateImagesResponse,
]:
r"""Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
~.BatchAnnotateImagesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_annotate_images" not in self._stubs:
self._stubs["batch_annotate_images"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p2beta1.ImageAnnotator/BatchAnnotateImages",
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs["batch_annotate_images"]
@property
def async_batch_annotate_files(
self,
) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest], operations_pb2.Operation
]:
r"""Return a callable for the async batch annotate files method over gRPC.
Run async image detection and annotation for a list of generic
files (e.g. PDF) which may contain multiple pages and multiple
images per page. Progress and results can be retrieved through
the ``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "async_batch_annotate_files" not in self._stubs:
self._stubs["async_batch_annotate_files"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p2beta1.ImageAnnotator/AsyncBatchAnnotateFiles",
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["async_batch_annotate_files"]
__all__ = ("ImageAnnotatorGrpcTransport",)
| 44.503185 | 94 | 0.64527 |
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.cloud.vision_v1p2beta1.types import image_annotator
from google.longrunning import operations_pb2
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
class ImageAnnotatorGrpcTransport(ImageAnnotatorTransport):
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
return self._operations_client
@property
def batch_annotate_images(
self,
) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
image_annotator.BatchAnnotateImagesResponse,
]:
if "batch_annotate_images" not in self._stubs:
self._stubs["batch_annotate_images"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p2beta1.ImageAnnotator/BatchAnnotateImages",
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs["batch_annotate_images"]
@property
def async_batch_annotate_files(
self,
) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest], operations_pb2.Operation
]:
if "async_batch_annotate_files" not in self._stubs:
self._stubs["async_batch_annotate_files"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p2beta1.ImageAnnotator/AsyncBatchAnnotateFiles",
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["async_batch_annotate_files"]
__all__ = ("ImageAnnotatorGrpcTransport",)
| true | true |
f734a3c8c618e126ea427c6d178a8945d777e923 | 2,707 | py | Python | .test-infra/metrics/sync/github/queries.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 5,279 | 2016-12-29T04:00:44.000Z | 2022-03-31T22:56:45.000Z | .test-infra/metrics/sync/github/queries.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 14,149 | 2016-12-28T00:43:50.000Z | 2022-03-31T23:50:22.000Z | .test-infra/metrics/sync/github/queries.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 3,763 | 2016-12-29T04:06:10.000Z | 2022-03-31T22:25:49.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
'''
This query is used to fetch PR data from github via Github API v4 (GraphQL).
Returns PRs on apache/beam repo that are older than provided timestamp.
Time format "2017-10-26T20:00:00Z
'''
MAIN_PR_QUERY = '''
query {
search(query: "type:pr repo:apache/beam updated:><TemstampSubstitueLocation> sort:updated-asc", type: ISSUE, first: 100) {
issueCount
pageInfo {
endCursor
startCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
... on PullRequest {
number
createdAt
updatedAt
closedAt
comments(first: 100) {
pageInfo {
endCursor
startCursor
hasNextPage
hasPreviousPage
}
edges {
node {
author {
login
}
body
createdAt
}
}
}
reviewRequests(first: 50) {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
node {
requestedReviewer {
... on User {
login
}
}
}
}
}
assignees(first: 50) {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
node {
login
}
}
}
reviews (first:50) {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
node {
author {
login
}
body
createdAt
state
}
}
}
author {
login
}
url
body
merged
mergedAt
mergedBy {
login
}
}
}
}
}
}
'''
| 23.136752 | 124 | 0.437754 |
MAIN_PR_QUERY = '''
query {
search(query: "type:pr repo:apache/beam updated:><TemstampSubstitueLocation> sort:updated-asc", type: ISSUE, first: 100) {
issueCount
pageInfo {
endCursor
startCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
... on PullRequest {
number
createdAt
updatedAt
closedAt
comments(first: 100) {
pageInfo {
endCursor
startCursor
hasNextPage
hasPreviousPage
}
edges {
node {
author {
login
}
body
createdAt
}
}
}
reviewRequests(first: 50) {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
node {
requestedReviewer {
... on User {
login
}
}
}
}
}
assignees(first: 50) {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
node {
login
}
}
}
reviews (first:50) {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
node {
author {
login
}
body
createdAt
state
}
}
}
author {
login
}
url
body
merged
mergedAt
mergedBy {
login
}
}
}
}
}
}
'''
| true | true |
f734a6e1fdf6fe69c8627fb3e248786bf735f7b7 | 4,892 | py | Python | tests/handlers/influxdb_test.py | slaclab/gafaelfawr | 7a64b0f159003d4745531c89d5b0f7d9777f7bce | [
"MIT"
] | null | null | null | tests/handlers/influxdb_test.py | slaclab/gafaelfawr | 7a64b0f159003d4745531c89d5b0f7d9777f7bce | [
"MIT"
] | null | null | null | tests/handlers/influxdb_test.py | slaclab/gafaelfawr | 7a64b0f159003d4745531c89d5b0f7d9777f7bce | [
"MIT"
] | null | null | null | """Tests for the ``/auth/tokens/influxdb`` route."""
from __future__ import annotations
from pathlib import Path
from unittest.mock import ANY
import jwt
import pytest
from _pytest.logging import LogCaptureFixture
from httpx import AsyncClient
from gafaelfawr.config import Config
from gafaelfawr.factory import ComponentFactory
from ..support.constants import TEST_HOSTNAME
from ..support.headers import assert_unauthorized_is_correct
from ..support.logging import parse_log
from ..support.settings import configure
from ..support.tokens import create_session_token
@pytest.mark.asyncio
async def test_influxdb(
client: AsyncClient,
config: Config,
factory: ComponentFactory,
caplog: LogCaptureFixture,
) -> None:
token_data = await create_session_token(factory)
assert token_data.expires
influxdb_secret = config.issuer.influxdb_secret
assert influxdb_secret
caplog.clear()
r = await client.get(
"/auth/tokens/influxdb/new",
headers={"Authorization": f"bearer {token_data.token}"},
)
assert r.status_code == 200
data = r.json()
assert data == {"token": ANY}
influxdb_token = data["token"]
header = jwt.get_unverified_header(influxdb_token)
assert header == {"alg": "HS256", "typ": "JWT"}
claims = jwt.decode(influxdb_token, influxdb_secret, algorithms=["HS256"])
assert claims == {
"username": token_data.username,
"exp": int(token_data.expires.timestamp()),
"iat": ANY,
}
assert parse_log(caplog) == [
{
"event": "Issued InfluxDB token",
"influxdb_username": token_data.username,
"httpRequest": {
"requestMethod": "GET",
"requestUrl": (
f"https://{TEST_HOSTNAME}/auth/tokens/influxdb/new"
),
"remoteIp": "127.0.0.1",
},
"scope": "user:token",
"severity": "info",
"token": token_data.token.key,
"token_source": "bearer",
"user": token_data.username,
}
]
@pytest.mark.asyncio
async def test_no_auth(client: AsyncClient, config: Config) -> None:
r = await client.get("/auth/tokens/influxdb/new")
assert_unauthorized_is_correct(r, config)
@pytest.mark.asyncio
async def test_not_configured(
tmp_path: Path,
client: AsyncClient,
factory: ComponentFactory,
caplog: LogCaptureFixture,
) -> None:
config = await configure(tmp_path, "oidc")
factory.reconfigure(config)
token_data = await create_session_token(factory)
caplog.clear()
r = await client.get(
"/auth/tokens/influxdb/new",
headers={"Authorization": f"bearer {token_data.token}"},
)
assert r.status_code == 404
assert r.json()["detail"]["type"] == "not_supported"
assert parse_log(caplog) == [
{
"error": "No InfluxDB issuer configuration",
"event": "Not configured",
"httpRequest": {
"requestMethod": "GET",
"requestUrl": (
f"https://{TEST_HOSTNAME}/auth/tokens/influxdb/new"
),
"remoteIp": "127.0.0.1",
},
"scope": "user:token",
"severity": "warning",
"token": token_data.token.key,
"token_source": "bearer",
"user": token_data.username,
}
]
@pytest.mark.asyncio
async def test_influxdb_force_username(
tmp_path: Path,
client: AsyncClient,
factory: ComponentFactory,
caplog: LogCaptureFixture,
) -> None:
config = await configure(tmp_path, "influxdb-username")
factory.reconfigure(config)
token_data = await create_session_token(factory)
assert token_data.expires
influxdb_secret = config.issuer.influxdb_secret
assert influxdb_secret
caplog.clear()
r = await client.get(
"/auth/tokens/influxdb/new",
headers={"Authorization": f"bearer {token_data.token}"},
)
assert r.status_code == 200
data = r.json()
claims = jwt.decode(data["token"], influxdb_secret, algorithms=["HS256"])
assert claims == {
"username": "influxdb-user",
"exp": int(token_data.expires.timestamp()),
"iat": ANY,
}
assert parse_log(caplog) == [
{
"event": "Issued InfluxDB token",
"influxdb_username": "influxdb-user",
"httpRequest": {
"requestMethod": "GET",
"requestUrl": (
f"https://{TEST_HOSTNAME}/auth/tokens/influxdb/new"
),
"remoteIp": "127.0.0.1",
},
"scope": "user:token",
"severity": "info",
"token": token_data.token.key,
"token_source": "bearer",
"user": token_data.username,
}
]
| 29.119048 | 78 | 0.600164 |
from __future__ import annotations
from pathlib import Path
from unittest.mock import ANY
import jwt
import pytest
from _pytest.logging import LogCaptureFixture
from httpx import AsyncClient
from gafaelfawr.config import Config
from gafaelfawr.factory import ComponentFactory
from ..support.constants import TEST_HOSTNAME
from ..support.headers import assert_unauthorized_is_correct
from ..support.logging import parse_log
from ..support.settings import configure
from ..support.tokens import create_session_token
@pytest.mark.asyncio
async def test_influxdb(
client: AsyncClient,
config: Config,
factory: ComponentFactory,
caplog: LogCaptureFixture,
) -> None:
token_data = await create_session_token(factory)
assert token_data.expires
influxdb_secret = config.issuer.influxdb_secret
assert influxdb_secret
caplog.clear()
r = await client.get(
"/auth/tokens/influxdb/new",
headers={"Authorization": f"bearer {token_data.token}"},
)
assert r.status_code == 200
data = r.json()
assert data == {"token": ANY}
influxdb_token = data["token"]
header = jwt.get_unverified_header(influxdb_token)
assert header == {"alg": "HS256", "typ": "JWT"}
claims = jwt.decode(influxdb_token, influxdb_secret, algorithms=["HS256"])
assert claims == {
"username": token_data.username,
"exp": int(token_data.expires.timestamp()),
"iat": ANY,
}
assert parse_log(caplog) == [
{
"event": "Issued InfluxDB token",
"influxdb_username": token_data.username,
"httpRequest": {
"requestMethod": "GET",
"requestUrl": (
f"https://{TEST_HOSTNAME}/auth/tokens/influxdb/new"
),
"remoteIp": "127.0.0.1",
},
"scope": "user:token",
"severity": "info",
"token": token_data.token.key,
"token_source": "bearer",
"user": token_data.username,
}
]
@pytest.mark.asyncio
async def test_no_auth(client: AsyncClient, config: Config) -> None:
r = await client.get("/auth/tokens/influxdb/new")
assert_unauthorized_is_correct(r, config)
@pytest.mark.asyncio
async def test_not_configured(
tmp_path: Path,
client: AsyncClient,
factory: ComponentFactory,
caplog: LogCaptureFixture,
) -> None:
config = await configure(tmp_path, "oidc")
factory.reconfigure(config)
token_data = await create_session_token(factory)
caplog.clear()
r = await client.get(
"/auth/tokens/influxdb/new",
headers={"Authorization": f"bearer {token_data.token}"},
)
assert r.status_code == 404
assert r.json()["detail"]["type"] == "not_supported"
assert parse_log(caplog) == [
{
"error": "No InfluxDB issuer configuration",
"event": "Not configured",
"httpRequest": {
"requestMethod": "GET",
"requestUrl": (
f"https://{TEST_HOSTNAME}/auth/tokens/influxdb/new"
),
"remoteIp": "127.0.0.1",
},
"scope": "user:token",
"severity": "warning",
"token": token_data.token.key,
"token_source": "bearer",
"user": token_data.username,
}
]
@pytest.mark.asyncio
async def test_influxdb_force_username(
tmp_path: Path,
client: AsyncClient,
factory: ComponentFactory,
caplog: LogCaptureFixture,
) -> None:
config = await configure(tmp_path, "influxdb-username")
factory.reconfigure(config)
token_data = await create_session_token(factory)
assert token_data.expires
influxdb_secret = config.issuer.influxdb_secret
assert influxdb_secret
caplog.clear()
r = await client.get(
"/auth/tokens/influxdb/new",
headers={"Authorization": f"bearer {token_data.token}"},
)
assert r.status_code == 200
data = r.json()
claims = jwt.decode(data["token"], influxdb_secret, algorithms=["HS256"])
assert claims == {
"username": "influxdb-user",
"exp": int(token_data.expires.timestamp()),
"iat": ANY,
}
assert parse_log(caplog) == [
{
"event": "Issued InfluxDB token",
"influxdb_username": "influxdb-user",
"httpRequest": {
"requestMethod": "GET",
"requestUrl": (
f"https://{TEST_HOSTNAME}/auth/tokens/influxdb/new"
),
"remoteIp": "127.0.0.1",
},
"scope": "user:token",
"severity": "info",
"token": token_data.token.key,
"token_source": "bearer",
"user": token_data.username,
}
]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.