language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/auth_tests/test_views.py | {
"start": 41054,
"end": 41752
} | class ____(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy("login"))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next="/else/where/")
expected = "/login/?next=/else/where/"
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy("login"))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next="/else/where/झ/")
expected = "/login/?next=/else/where/%E0%A4%9D/"
self.assertEqual(expected, login_redirect_response.url)
| RedirectToLoginTests |
python | astropy__astropy | astropy/modeling/optimizers.py | {
"start": 595,
"end": 2195
} | class ____(ABC):
"""
Base class for optimizers.
Parameters
----------
opt_method : callable
Implements optimization method
Notes
-----
The base Optimizer does not support any constraints by default; individual
optimizers should explicitly set this list to the specific constraints
it supports.
"""
supported_constraints = []
@abstractmethod
def __init__(self) -> None: ...
def _init_opt_method(self, opt_method):
self._opt_method = opt_method
self._maxiter = DEFAULT_MAXITER
self._eps = DEFAULT_EPS
self._acc = DEFAULT_ACC
@property
def maxiter(self):
"""Maximum number of iterations."""
return self._maxiter
@maxiter.setter
def maxiter(self, val):
"""Set maxiter."""
self._maxiter = val
@property
def eps(self):
"""Step for the forward difference approximation of the Jacobian."""
return self._eps
@eps.setter
def eps(self, val):
"""Set eps value."""
self._eps = val
@property
def acc(self):
"""Requested accuracy."""
return self._acc
@acc.setter
def acc(self, val):
"""Set accuracy."""
self._acc = val
def __repr__(self):
fmt = f"{self.__class__.__name__}()"
return fmt
@property
def opt_method(self):
"""Return the optimization method."""
return self._opt_method
@abstractmethod
def __call__(self):
raise NotImplementedError("Subclasses should implement this method")
| Optimization |
python | django__django | tests/auth_tests/test_forms.py | {
"start": 2862,
"end": 12606
} | class ____(TestDataMixin, TestCase):
form_class = BaseUserCreationForm
def test_form_fields(self):
form = self.form_class()
self.assertEqual(
list(form.fields.keys()), ["username", "password1", "password2"]
)
def test_user_already_exists(self):
data = {
"username": "testclient",
"password1": "test123",
"password2": "test123",
}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["username"].errors,
[str(User._meta.get_field("username").error_messages["unique"])],
)
def test_invalid_data(self):
data = {
"username": "jsmith!",
"password1": "test123",
"password2": "test123",
}
form = self.form_class(data)
self.assertFalse(form.is_valid())
validator = next(
v
for v in User._meta.get_field("username").validators
if v.code == "invalid"
)
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
"username": "jsmith",
"password1": "test123",
"password2": "test",
}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["password2"].errors, [str(form.error_messages["password_mismatch"])]
)
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {"username": "jsmith"}
form = self.form_class(data)
required_error = [str(Field.default_error_messages["required"])]
self.assertFalse(form.is_valid())
self.assertEqual(form["password1"].errors, required_error)
self.assertEqual(form["password2"].errors, required_error)
data["password2"] = "test123"
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password1"].errors, required_error)
self.assertEqual(form["password2"].errors, [])
@mock.patch("django.contrib.auth.password_validation.password_changed")
def test_success(self, password_changed):
# The success case.
data = {
"username": "jsmith@example.com",
"password1": "test123",
"password2": "test123",
}
form = self.form_class(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), "<User: jsmith@example.com>")
def test_unicode_username(self):
data = {
"username": "宝",
"password1": "test123",
"password2": "test123",
}
form = self.form_class(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, "宝")
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = "testΩ" # U+2126 OHM SIGN
data = {
"username": ohm_username,
"password1": "pwd2",
"password2": "pwd2",
}
form = self.form_class(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, "testΩ") # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_invalid_username_no_normalize(self):
field = UsernameField(max_length=254)
# Usernames are not normalized if they are too long.
self.assertEqual(field.to_python("½" * 255), "½" * 255)
self.assertEqual(field.to_python("ff" * 254), "ff" * 254)
def test_duplicate_normalized_unicode(self):
"""
To prevent almost identical usernames, visually identical but differing
by their unicode code points only, Unicode NFKC normalization should
make appear them equal to Django.
"""
omega_username = "iamtheΩ" # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = "iamtheΩ" # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password="pwd")
data = {
"username": ohm_username,
"password1": "pwd2",
"password2": "pwd2",
}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["username"], ["A user with that username already exists."]
)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
{
"NAME": (
"django.contrib.auth.password_validation.MinimumLengthValidator"
),
"OPTIONS": {
"min_length": 12,
},
},
]
)
def test_validates_password(self):
data = {
"username": "otherclient",
"password1": "otherclient",
"password2": "otherclient",
}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["password2"].errors), 2)
self.assertIn(
"The password is too similar to the username.", form["password2"].errors
)
self.assertIn(
"This password is too short. It must contain at least 12 characters.",
form["password2"].errors,
)
def test_password_whitespace_not_stripped(self):
data = {
"username": "testuser",
"password1": " testpassword ",
"password2": " testpassword ",
}
form = self.form_class(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["password1"], data["password1"])
self.assertEqual(form.cleaned_data["password2"], data["password2"])
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
]
)
def test_password_help_text(self):
form = self.form_class()
self.assertEqual(
form.fields["password1"].help_text,
"<ul><li>"
"Your password can’t be too similar to your other personal information."
"</li></ul>",
)
def test_password_extra_validations(self):
class ExtraValidationForm(ExtraValidationFormMixin, self.form_class):
def clean_password1(self):
return self.failing_helper("password1")
def clean_password2(self):
return self.failing_helper("password2")
data = {"username": "extra", "password1": "abc", "password2": "abc"}
for fields in (["password1"], ["password2"], ["password1", "password2"]):
with self.subTest(fields=fields):
errors = {field: [f"Extra validation for {field}."] for field in fields}
form = ExtraValidationForm(data, failing_fields=errors)
self.assertIs(form.is_valid(), False)
self.assertDictEqual(form.errors, errors)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
)
},
]
)
def test_user_create_form_validates_password_with_all_data(self):
"""
BaseUserCreationForm password validation uses all of the form's data.
"""
class CustomUserCreationForm(self.form_class):
class Meta(self.form_class.Meta):
model = User
fields = ("username", "email", "first_name", "last_name")
form = CustomUserCreationForm(
{
"username": "testuser",
"password1": "testpassword",
"password2": "testpassword",
"first_name": "testpassword",
"last_name": "lastname",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["password2"],
["The password is too similar to the first name."],
)
def test_username_field_autocapitalize_none(self):
form = self.form_class()
self.assertEqual(
form.fields["username"].widget.attrs.get("autocapitalize"), "none"
)
def test_html_autocomplete_attributes(self):
form = self.form_class()
tests = (
("username", "username"),
("password1", "new-password"),
("password2", "new-password"),
)
for field_name, autocomplete in tests:
with self.subTest(field_name=field_name, autocomplete=autocomplete):
self.assertEqual(
form.fields[field_name].widget.attrs["autocomplete"], autocomplete
)
def test_user_creation_form_class_getitem(self):
self.assertIs(BaseUserCreationForm["MyCustomUser"], BaseUserCreationForm)
| BaseUserCreationFormTest |
python | astropy__astropy | astropy/time/tests/test_guess.py | {
"start": 112,
"end": 1075
} | class ____:
"""Test guessing the input value format"""
def test_guess1(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t = Time(times, scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>"
)
def test_guess2(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01 00:00:00"]
with pytest.raises(ValueError):
Time(times, scale="utc")
def test_guess3(self):
times = ["1999:001:00:00:00.123456789", "2010:001"]
t = Time(times, scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='yday' "
"value=['1999:001:00:00:00.123' '2010:001:00:00:00.000']>"
)
def test_guess4(self):
times = [10, 20]
with pytest.raises(ValueError):
Time(times, scale="utc")
| TestGuess |
python | ipython__ipython | tests/test_pretty.py | {
"start": 1422,
"end": 1506
} | class ____(object):
def __repr__(self):
return "Breaking(\n)"
| BreakingRepr |
python | doocs__leetcode | solution/0800-0899/0816.Ambiguous Coordinates/Solution.py | {
"start": 0,
"end": 537
} | class ____:
def ambiguousCoordinates(self, s: str) -> List[str]:
def f(i, j):
res = []
for k in range(1, j - i + 1):
l, r = s[i : i + k], s[i + k : j]
ok = (l == '0' or not l.startswith('0')) and not r.endswith('0')
if ok:
res.append(l + ('.' if k < j - i else '') + r)
return res
n = len(s)
return [
f'({x}, {y})' for i in range(2, n - 1) for x in f(1, i) for y in f(i, n - 1)
]
| Solution |
python | viewflow__viewflow | tests/components/test_base_page_components.py | {
"start": 374,
"end": 3940
} | class ____(LiveTestCase):
fixtures = ["users.json"]
def setUp(self):
self.client.login(username="admin", password="admin")
cookie = self.client.cookies["sessionid"]
self.browser.get(self.live_server_url)
self.browser.add_cookie(
{"name": "sessionid", "value": cookie.value, "secure": False, "path": "/"}
)
self.browser.refresh()
def test_page_navigation(self):
self.browser.get(f"{self.live_server_url}/application/test/test/")
self.assertNoJsErrors()
app_nav_link = self.browser.find_element(
By.XPATH, '//aside//a[@href="/application/test/"]'
)
self.assertIn(
"mdc-list-item--selected", app_nav_link.get_attribute("class").split(" ")
)
site_nav_link = self.browser.find_element(
By.XPATH, '//aside//a[@href="/application/"]'
)
self.assertIn(
"mdc-list-item--selected", site_nav_link.get_attribute("class").split(" ")
)
# navigation click
site_nav_link = self.browser.find_element(
By.XPATH, '//aside//a[@href="/application2/"]'
)
site_nav_link.click()
site_nav_link = self.browser.find_element(
By.XPATH, '//aside//a[@href="/application2/"]'
)
self.assertIn(
"mdc-list-item--selected", site_nav_link.get_attribute("class").split(" ")
)
self.assertNoJsErrors()
def test_drawer_resize(self):
self.browser.set_window_size(1280, 947)
self.browser.get(f"{self.live_server_url}/application/test/")
drawer = self.browser.find_element(By.CSS_SELECTOR, ".vf-page__menu")
drawer_classes = drawer.get_attribute("class").split(" ")
self.assertIn("mdc-drawer--dismissible", drawer_classes)
self.assertIn("mdc-drawer--open", drawer_classes)
self.assertNotIn("mdc-drawer--modal", drawer_classes)
self.browser.set_window_size(640, 480)
self.browser.execute_script("return;") # allow js resize hooks to e executed
drawer_classes = drawer.get_attribute("class").split(" ")
self.assertNotIn("mdc-drawer--dismissible", drawer_classes)
self.assertNotIn("mdc-drawer--open", drawer_classes)
self.assertIn("mdc-drawer--modal", drawer_classes)
self.browser.set_window_size(1280, 947)
self.browser.execute_script("return;") # allow js resize hooks to e executed
drawer_classes = drawer.get_attribute("class").split(" ")
self.assertIn("mdc-drawer--dismissible", drawer_classes)
self.assertIn("mdc-drawer--open", drawer_classes)
self.assertNotIn("mdc-drawer--modal", drawer_classes)
self.assertNoJsErrors()
def test_header_menu(self):
self.assertTrue(self.login(username="admin", password="admin"))
self.browser.get(f"{self.live_server_url}/application/test/")
primary_menu = self.browser.find_element(
By.CSS_SELECTOR, ".vf-page__menu-primary"
)
secondary_menu = self.browser.find_element(
By.CSS_SELECTOR, ".vf-page__menu-secondary"
)
button = self.browser.find_element(
By.CSS_SELECTOR, ".vf-page__menu-toggle-button"
)
self.assertTrue(primary_menu.is_displayed())
self.assertFalse(secondary_menu.is_displayed())
button.click()
self.assertFalse(primary_menu.is_displayed())
self.assertTrue(secondary_menu.is_displayed())
self.assertNoJsErrors()
| Test |
python | astropy__astropy | astropy/coordinates/tests/test_sky_coord_velocities.py | {
"start": 8074,
"end": 9229
} | class ____:
"""Test that going in between spherical and unit-spherical, we do not
change differential type (since both can handle the same types).
"""
def test_sc_unit_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(ra=[10, 20] * u.deg, dec=[-10, 10] * u.deg, **diff_info)
assert isinstance(sc.data, UnitSphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("spherical")
assert isinstance(sr, SphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
def test_sc_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(
ra=[10, 20] * u.deg,
dec=[-10, 10] * u.deg,
distance=1.0 * u.kpc,
**diff_info,
)
assert isinstance(sc.data, SphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("unitspherical")
assert isinstance(sr, UnitSphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
| TestDifferentialClassPropagation |
python | facebook__pyre-check | stubs/integration_test/fixture_source/integration_test/constructor_tito.py | {
"start": 246,
"end": 310
} | class ____:
def __init__(self, arg): ...
| ParentWithConstructor |
python | anthropics__anthropic-sdk-python | src/anthropic/types/citation_search_result_location_param.py | {
"start": 261,
"end": 588
} | class ____(TypedDict, total=False):
cited_text: Required[str]
end_block_index: Required[int]
search_result_index: Required[int]
source: Required[str]
start_block_index: Required[int]
title: Required[Optional[str]]
type: Required[Literal["search_result_location"]]
| CitationSearchResultLocationParam |
python | facebookresearch__faiss | tests/test_fast_scan_ivf.py | {
"start": 17184,
"end": 20752
} | class ____(unittest.TestCase):
""" test reconstruct and sa_encode / sa_decode
(also for a few additive quantizer variants) """
def do_test(self, by_residual=False):
d = 32
metric = faiss.METRIC_L2
ds = datasets.SyntheticDataset(d, 250, 200, 10)
index = faiss.IndexIVFPQFastScan(
faiss.IndexFlatL2(d), d, 50, d // 2, 4, metric)
index.by_residual = by_residual
index.make_direct_map(True)
index.train(ds.get_train())
index.add(ds.get_database())
# Test reconstruction
v123 = index.reconstruct(123) # single id
v120_10 = index.reconstruct_n(120, 10)
np.testing.assert_array_equal(v120_10[3], v123)
v120_10 = index.reconstruct_batch(np.arange(120, 130))
np.testing.assert_array_equal(v120_10[3], v123)
# Test original list reconstruction
index.orig_invlists = faiss.ArrayInvertedLists(
index.nlist, index.code_size)
index.reconstruct_orig_invlists()
assert index.orig_invlists.compute_ntotal() == index.ntotal
# compare with non fast-scan index
index2 = faiss.IndexIVFPQ(
index.quantizer, d, 50, d // 2, 4, metric)
index2.by_residual = by_residual
index2.pq = index.pq
index2.is_trained = True
index2.replace_invlists(index.orig_invlists, False)
index2.ntotal = index.ntotal
index2.make_direct_map(True)
assert np.all(index.reconstruct(123) == index2.reconstruct(123))
def test_no_residual(self):
self.do_test(by_residual=False)
def test_by_residual(self):
self.do_test(by_residual=True)
def do_test_generic(self, factory_string,
by_residual=False, metric=faiss.METRIC_L2):
d = 32
ds = datasets.SyntheticDataset(d, 250, 200, 10)
index = faiss.index_factory(ds.d, factory_string, metric)
if "IVF" in factory_string:
index.by_residual = by_residual
index.make_direct_map(True)
index.train(ds.get_train())
index.add(ds.get_database())
# Test reconstruction
v123 = index.reconstruct(123) # single id
v120_10 = index.reconstruct_n(120, 10)
np.testing.assert_array_equal(v120_10[3], v123)
v120_10 = index.reconstruct_batch(np.arange(120, 130))
np.testing.assert_array_equal(v120_10[3], v123)
codes = index.sa_encode(ds.get_database()[120:130])
np.testing.assert_array_equal(index.sa_decode(codes), v120_10)
# make sure pointers are correct after serialization
index2 = faiss.deserialize_index(faiss.serialize_index(index))
codes2 = index2.sa_encode(ds.get_database()[120:130])
np.testing.assert_array_equal(codes, codes2)
def test_ivfpq_residual(self):
self.do_test_generic("IVF20,PQ16x4fs", by_residual=True)
def test_ivfpq_no_residual(self):
self.do_test_generic("IVF20,PQ16x4fs", by_residual=False)
def test_pq(self):
self.do_test_generic("PQ16x4fs")
def test_rq(self):
self.do_test_generic("RQ4x4fs", metric=faiss.METRIC_INNER_PRODUCT)
def test_ivfprq(self):
self.do_test_generic("IVF20,PRQ8x2x4fs", by_residual=True, metric=faiss.METRIC_INNER_PRODUCT)
def test_ivfprq_no_residual(self):
self.do_test_generic("IVF20,PRQ8x2x4fs", by_residual=False, metric=faiss.METRIC_INNER_PRODUCT)
def test_prq(self):
self.do_test_generic("PRQ8x2x4fs", metric=faiss.METRIC_INNER_PRODUCT)
| TestReconstruct |
python | crytic__slither | slither/solc_parsing/declarations/contract.py | {
"start": 1681,
"end": 37188
} | class ____(CallerContextExpression):
def __init__(
self, slither_parser: "SlitherCompilationUnitSolc", contract: Contract, data: Dict[str, Any]
) -> None:
# assert slitherSolc.solc_version.startswith('0.4')
self._contract = contract
self._slither_parser = slither_parser
self._data = data
self._functionsNotParsed: List[Dict] = []
self._modifiersNotParsed: List[Dict] = []
self._functions_no_params: List[FunctionSolc] = []
self._modifiers_no_params: List[ModifierSolc] = []
self._eventsNotParsed: List[Dict] = []
self._variablesNotParsed: List[Dict] = []
self._enumsNotParsed: List[Dict] = []
self._structuresNotParsed: List[Dict] = []
self._usingForNotParsed: List[Dict] = []
self._customErrorsNotParsed: List[Dict] = []
self._functions_parser: List[FunctionSolc] = []
self._modifiers_parser: List[ModifierSolc] = []
self._structures_parser: List[StructureContractSolc] = []
self._custom_errors_parser: List[CustomErrorSolc] = []
self._is_analyzed: bool = False
# use to remap inheritance id
self._remapping: Dict[str, str] = {}
# (referencedDeclaration, offset)
self.baseContracts: List[Tuple[int, str]] = []
self.baseConstructorContractsCalled: List[str] = []
self._linearized_base_contracts: List[int]
self._variables_parser: List[StateVariableSolc] = []
# Export info
if self.is_compact_ast:
self._contract.name = self._data["name"]
self._handle_comment(self._data)
else:
self._contract.name = self._data["attributes"][self.get_key()]
self._handle_comment(self._data["attributes"])
self._contract.id = self._data["id"]
self._parse_contract_info()
self._parse_contract_items()
###################################################################################
###################################################################################
# region General Properties
###################################################################################
###################################################################################
@property
def is_analyzed(self) -> bool:
return self._is_analyzed
def set_is_analyzed(self, is_analyzed: bool) -> None:
self._is_analyzed = is_analyzed
@property
def underlying_contract(self) -> Contract:
return self._contract
@property
def linearized_base_contracts(self) -> List[int]:
return self._linearized_base_contracts
@property
def compilation_unit(self) -> "SlitherCompilationUnit":
return self._contract.compilation_unit
@property
def slither_parser(self) -> "SlitherCompilationUnitSolc":
return self._slither_parser
@property
def functions_parser(self) -> List["FunctionSolc"]:
return self._functions_parser
@property
def modifiers_parser(self) -> List["ModifierSolc"]:
return self._modifiers_parser
@property
def structures_not_parsed(self) -> List[Dict]:
return self._structuresNotParsed
@property
def enums_not_parsed(self) -> List[Dict]:
return self._enumsNotParsed
###################################################################################
###################################################################################
# region AST
###################################################################################
###################################################################################
def get_key(self) -> str:
return self._slither_parser.get_key()
def get_children(self, key: str = "nodes") -> str:
if self.is_compact_ast:
return key
return "children"
@property
def remapping(self) -> Dict[str, str]:
return self._remapping
@property
def is_compact_ast(self) -> bool:
return self._slither_parser.is_compact_ast
# endregion
###################################################################################
###################################################################################
# region SlithIR
###################################################################################
###################################################################################
def _parse_contract_info(self) -> None:
if self.is_compact_ast:
attributes = self._data
else:
attributes = self._data["attributes"]
self._contract.is_interface = False
if "contractKind" in attributes:
if attributes["contractKind"] == "interface":
self._contract.is_interface = True
elif attributes["contractKind"] == "library":
self._contract.is_library = True
self._contract.contract_kind = attributes["contractKind"]
self._contract.is_fully_implemented = attributes["fullyImplemented"]
self._linearized_base_contracts = attributes["linearizedBaseContracts"]
if "abstract" in attributes:
self._contract.is_abstract = attributes["abstract"]
# Parse base contract information
self._parse_base_contract_info()
# trufle does some re-mapping of id
if "baseContracts" in self._data:
for elem in self._data["baseContracts"]:
if elem["nodeType"] == "InheritanceSpecifier":
self._remapping[elem["baseName"]["referencedDeclaration"]] = elem["baseName"][
"name"
]
def _parse_base_contract_info(self) -> None: # pylint: disable=too-many-branches
# Parse base contracts (immediate, non-linearized)
if self.is_compact_ast:
# Parse base contracts + constructors in compact-ast
if "baseContracts" in self._data:
for base_contract in self._data["baseContracts"]:
if base_contract["nodeType"] != "InheritanceSpecifier":
continue
if (
"baseName" not in base_contract
or "referencedDeclaration" not in base_contract["baseName"]
):
continue
# Obtain our contract reference and add it to our base contract list
referencedDeclaration = base_contract["baseName"]["referencedDeclaration"]
self.baseContracts.append(
(referencedDeclaration, base_contract["baseName"]["src"])
)
# If we have defined arguments in our arguments object, this is a constructor invocation.
# (note: 'arguments' can be [], which is not the same as None. [] implies a constructor was
# called with no arguments, while None implies no constructor was called).
if "arguments" in base_contract and base_contract["arguments"] is not None:
self.baseConstructorContractsCalled.append(referencedDeclaration)
else:
# Parse base contracts + constructors in legacy-ast
if "children" in self._data:
for base_contract in self._data["children"]:
if base_contract["name"] != "InheritanceSpecifier":
continue
if "children" not in base_contract or len(base_contract["children"]) == 0:
continue
# Obtain all items for this base contract specification (base contract, followed by arguments)
base_contract_items = base_contract["children"]
if (
"name" not in base_contract_items[0]
or base_contract_items[0]["name"] != "UserDefinedTypeName"
):
continue
if (
"attributes" not in base_contract_items[0]
or "referencedDeclaration" not in base_contract_items[0]["attributes"]
):
continue
# Obtain our contract reference and add it to our base contract list
referencedDeclaration = base_contract_items[0]["attributes"][
"referencedDeclaration"
]
self.baseContracts.append(
(referencedDeclaration, base_contract_items[0]["src"])
)
# If we have an 'attributes'->'arguments' which is None, this is not a constructor call.
if (
"attributes" not in base_contract
or "arguments" not in base_contract["attributes"]
or base_contract["attributes"]["arguments"] is not None
):
self.baseConstructorContractsCalled.append(referencedDeclaration)
def _parse_contract_items(self) -> None:
# pylint: disable=too-many-branches
if not self.get_children() in self._data: # empty contract
return
for item in self._data[self.get_children()]:
if item[self.get_key()] == "FunctionDefinition":
self._functionsNotParsed.append(item)
elif item[self.get_key()] == "EventDefinition":
self._eventsNotParsed.append(item)
elif item[self.get_key()] == "InheritanceSpecifier":
# we dont need to parse it as it is redundant
# with self.linearizedBaseContracts
continue
elif item[self.get_key()] == "VariableDeclaration":
self._variablesNotParsed.append(item)
elif item[self.get_key()] == "EnumDefinition":
self._enumsNotParsed.append(item)
elif item[self.get_key()] == "ModifierDefinition":
self._modifiersNotParsed.append(item)
elif item[self.get_key()] == "StructDefinition":
self._structuresNotParsed.append(item)
elif item[self.get_key()] == "UsingForDirective":
self._usingForNotParsed.append(item)
elif item[self.get_key()] == "ErrorDefinition":
self._customErrorsNotParsed.append(item)
elif item[self.get_key()] == "UserDefinedValueTypeDefinition":
self._parse_type_alias(item)
else:
raise ParsingError("Unknown contract item: " + item[self.get_key()])
return
def parse_type_alias(self) -> None:
# We keep parse_ in the name just to keep the naming convention even if we already parsed them initially.
# Here we only update the current contract type_aliases_as_dict with the fathers' values
# It's useful to keep using the same pattern anyway as we know all the fathers have been analyzed
for father in self._contract.inheritance_reverse:
self._contract.type_aliases_as_dict.update(father.type_aliases_as_dict)
def _parse_type_alias(self, item: Dict) -> None:
assert "name" in item
assert "underlyingType" in item
underlying_type = item["underlyingType"]
assert "nodeType" in underlying_type and underlying_type["nodeType"] == "ElementaryTypeName"
assert "name" in underlying_type
original_type = ElementaryType(underlying_type["name"])
# For user defined types defined at the contract level the lookup can be done
# Using the name or the canonical name
# For example during the type parsing the canonical name
# Note that Solidity allows shadowing of user defined types
# Between top level and contract definitions
alias = item["name"]
alias_canonical = self._contract.name + "." + item["name"]
type_alias = TypeAliasContract(original_type, alias, self.underlying_contract)
type_alias.set_offset(item["src"], self.compilation_unit)
self._contract.type_aliases_as_dict[alias] = type_alias
self._contract.file_scope.type_aliases[alias_canonical] = type_alias
def _parse_struct(self, struct: Dict) -> None:
st = StructureContract(self._contract.compilation_unit)
st.set_contract(self._contract)
st.set_offset(struct["src"], self._contract.compilation_unit)
st_parser = StructureContractSolc(st, struct, self) # type: ignore
self._contract.structures_as_dict[st.name] = st
self._structures_parser.append(st_parser)
def parse_structs(self) -> None:
for father in self._contract.inheritance_reverse:
self._contract.structures_as_dict.update(father.structures_as_dict)
for struct in self._structuresNotParsed:
self._parse_struct(struct)
self._structuresNotParsed = []
def _parse_custom_error(self, custom_error: Dict) -> None:
ce = CustomErrorContract(self.compilation_unit)
ce.set_contract(self._contract)
ce.set_offset(custom_error["src"], self.compilation_unit)
ce_parser = CustomErrorSolc(ce, custom_error, self, self._slither_parser)
self._contract.custom_errors_as_dict[ce.name] = ce
self._custom_errors_parser.append(ce_parser)
def parse_custom_errors(self) -> None:
for father in self._contract.inheritance_reverse:
self._contract.custom_errors_as_dict.update(father.custom_errors_as_dict)
for custom_error in self._customErrorsNotParsed:
self._parse_custom_error(custom_error)
self._customErrorsNotParsed = []
def parse_state_variables(self) -> None:
for father in self._contract.inheritance_reverse:
self._contract.variables_as_dict.update(
{
name: v
for name, v in father.variables_as_dict.items()
if v.visibility != "private"
}
)
self._contract.add_state_variables_ordered(
[
var
for var in father.state_variables_ordered
if var not in self._contract.state_variables_ordered
]
)
for varNotParsed in self._variablesNotParsed:
var = StateVariable()
var.set_offset(varNotParsed["src"], self._contract.compilation_unit)
var.set_contract(self._contract)
var_parser = StateVariableSolc(var, varNotParsed)
self._variables_parser.append(var_parser)
assert var.name
if var_parser.reference_id is not None:
self._contract.state_variables_by_ref_id[var_parser.reference_id] = var
self._contract.variables_as_dict[var.name] = var
self._contract.add_state_variables_ordered([var])
def _parse_modifier(self, modifier_data: Dict) -> None:
modif = Modifier(self._contract.compilation_unit)
modif.set_offset(modifier_data["src"], self._contract.compilation_unit)
modif.set_contract(self._contract)
modif.set_contract_declarer(self._contract)
modif_parser = ModifierSolc(modif, modifier_data, self, self.slither_parser) # type: ignore
self._contract.compilation_unit.add_modifier(modif)
self._modifiers_no_params.append(modif_parser)
self._modifiers_parser.append(modif_parser)
self._slither_parser.add_function_or_modifier_parser(modif_parser)
def parse_modifiers(self) -> None:
for modifier in self._modifiersNotParsed:
self._parse_modifier(modifier)
self._modifiersNotParsed = []
def _parse_function(self, function_data: Dict) -> None:
func = FunctionContract(self._contract.compilation_unit)
func.set_offset(function_data["src"], self._contract.compilation_unit)
func.set_contract(self._contract)
func.set_contract_declarer(self._contract)
func_parser = FunctionSolc(func, function_data, self, self._slither_parser) # type: ignore
self._contract.compilation_unit.add_function(func)
self._functions_no_params.append(func_parser)
self._functions_parser.append(func_parser)
self._slither_parser.add_function_or_modifier_parser(func_parser)
def parse_functions(self) -> None:
for function in self._functionsNotParsed:
self._parse_function(function)
self._functionsNotParsed = []
# endregion
###################################################################################
###################################################################################
# region Analyze
###################################################################################
###################################################################################
def log_incorrect_parsing(self, error: str) -> None:
if self._contract.compilation_unit.core.disallow_partial:
raise ParsingError(error)
LOGGER.error(error)
self._contract.is_incorrectly_constructed = True
def analyze_content_modifiers(self) -> None:
try:
for modifier_parser in self._modifiers_parser:
modifier_parser.analyze_content()
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(f"Missing modifier {e}")
def analyze_content_functions(self) -> None:
try:
for function_parser in self._functions_parser:
function_parser.analyze_content()
except (VariableNotFound, KeyError, ParsingError) as e:
self.log_incorrect_parsing(f"Missing function {e}")
def analyze_params_modifiers(self) -> None:
try:
elements_no_params = self._modifiers_no_params
getter = lambda c: c.modifiers_parser
getter_available = lambda c: c.modifiers_declared
Cls = Modifier
Cls_parser = ModifierSolc
modifiers = self._analyze_params_elements(
elements_no_params,
getter,
getter_available,
Cls,
Cls_parser,
self._modifiers_parser,
)
# modifiers will be using Modifier so we can ignore the next type check
self._contract.set_modifiers(modifiers) # type: ignore
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(f"Missing params {e}")
self._modifiers_no_params = []
def analyze_params_functions(self) -> None:
try:
elements_no_params = self._functions_no_params
getter = lambda c: c.functions_parser
getter_available = lambda c: c.functions_declared
Cls = FunctionContract
Cls_parser = FunctionSolc
functions = self._analyze_params_elements(
elements_no_params,
getter,
getter_available,
Cls,
Cls_parser,
self._functions_parser,
)
# function will be using FunctionContract so we can ignore the next type check
self._contract.set_functions(functions) # type: ignore
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(f"Missing params {e}")
self._functions_no_params = []
def _analyze_params_element( # pylint: disable=too-many-arguments
self,
Cls: Callable,
Cls_parser: Callable,
element_parser: FunctionSolc,
explored_reference_id: Set[str],
parser: Union[List[FunctionSolc], List[ModifierSolc]],
all_elements: Dict[str, Function],
) -> None:
elem = Cls(self._contract.compilation_unit)
elem.set_contract(self._contract)
underlying_function = element_parser.underlying_function
# TopLevel function are not analyzed here
assert isinstance(underlying_function, FunctionContract)
elem.set_contract_declarer(underlying_function.contract_declarer)
elem.set_offset(
element_parser.function_not_parsed["src"],
self._contract.compilation_unit,
)
elem_parser = Cls_parser(
elem, element_parser.function_not_parsed, self, self.slither_parser
)
if (
element_parser.underlying_function.id
and element_parser.underlying_function.id in explored_reference_id
):
# Already added from other fathers
return
if element_parser.underlying_function.id:
explored_reference_id.add(element_parser.underlying_function.id)
elem_parser.analyze_params()
if isinstance(elem, Modifier):
self._contract.compilation_unit.add_modifier(elem)
else:
self._contract.compilation_unit.add_function(elem)
self._slither_parser.add_function_or_modifier_parser(elem_parser)
all_elements[elem.canonical_name] = elem
parser.append(elem_parser)
def _analyze_params_elements( # pylint: disable=too-many-arguments,too-many-locals
self,
elements_no_params: Sequence[FunctionSolc],
getter: Callable[["ContractSolc"], List[FunctionSolc]],
getter_available: Callable[[Contract], List[FunctionContract]],
Cls: Callable,
Cls_parser: Callable,
parser: Union[List[FunctionSolc], List[ModifierSolc]],
) -> Dict[str, Function]:
"""
Analyze the parameters of the given elements (Function or Modifier).
The function iterates over the inheritance to create an instance or inherited elements (Function or Modifier)
If the element is shadowed, set is_shadowed to True
:param elements_no_params: list of elements to analyzer
:param getter: fun x
:param getter_available: fun x
:param Cls: Class to create for collision
:return:
"""
all_elements: Dict[str, Function] = {}
explored_reference_id: Set[str] = set()
try:
for father in self._contract.inheritance:
father_parser = self._slither_parser.underlying_contract_to_parser[father]
for element_parser in getter(father_parser): # type: ignore
self._analyze_params_element(
Cls, Cls_parser, element_parser, explored_reference_id, parser, all_elements
)
accessible_elements = self._contract.available_elements_from_inheritances(
all_elements, getter_available
)
# If there is a constructor in the functions
# We remove the previous constructor
# As only one constructor is present per contracts
#
# Note: contract.all_functions_called returns the constructors of the base contracts
has_constructor = False
for element_parser in elements_no_params:
element_parser.analyze_params()
if element_parser.underlying_function.is_constructor:
has_constructor = True
if has_constructor:
_accessible_functions = {
k: v for (k, v) in accessible_elements.items() if not v.is_constructor
}
for element_parser in elements_no_params:
accessible_elements[
element_parser.underlying_function.full_name
] = element_parser.underlying_function
all_elements[
element_parser.underlying_function.canonical_name
] = element_parser.underlying_function
for element in all_elements.values():
if accessible_elements[element.full_name] != all_elements[element.canonical_name]:
element.is_shadowed = True
accessible_elements[element.full_name].shadows = True
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(
f"Missing params {e} {self._contract.source_mapping.to_detailed_str()}"
)
return all_elements
def analyze_constant_state_variables(self) -> None:
for var_parser in self._variables_parser:
if var_parser.underlying_variable.is_constant:
# can't parse constant expression based on function calls
try:
var_parser.analyze(self)
except (VariableNotFound, KeyError) as e:
LOGGER.error(e)
def analyze_state_variables(self) -> None:
try:
for var_parser in self._variables_parser:
var_parser.analyze(self)
return
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(f"Missing state variable {e}")
def analyze_using_for(self) -> None: # pylint: disable=too-many-branches
try:
for father in self._contract.inheritance:
self._contract.using_for.update(father.using_for)
if self.is_compact_ast:
for using_for in self._usingForNotParsed:
if "typeName" in using_for and using_for["typeName"]:
type_name: USING_FOR_KEY = parse_type(using_for["typeName"], self)
else:
type_name = "*"
if type_name not in self._contract.using_for:
self._contract.using_for[type_name] = []
if "libraryName" in using_for:
self._contract.using_for[type_name].append(
parse_type(using_for["libraryName"], self)
)
else:
# We have a list of functions. A function can be topLevel or a library function
self._analyze_function_list(using_for["functionList"], type_name)
else:
for using_for in self._usingForNotParsed:
children = using_for[self.get_children()]
assert children and len(children) <= 2
if len(children) == 2:
new = parse_type(children[0], self)
old: USING_FOR_KEY = parse_type(children[1], self)
else:
new = parse_type(children[0], self)
old = "*"
if old not in self._contract.using_for:
self._contract.using_for[old] = []
self._contract.using_for[old].append(new)
self._usingForNotParsed = []
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(f"Missing using for {e}")
def _analyze_function_list(self, function_list: List, type_name: USING_FOR_KEY) -> None:
for f in function_list:
full_name_split = f["function"]["name"].split(".")
if len(full_name_split) == 1:
# Top level function
function_name = full_name_split[0]
self._analyze_top_level_function(function_name, type_name)
elif len(full_name_split) == 2:
# It can be a top level function behind an aliased import
# or a library function
first_part = full_name_split[0]
function_name = full_name_split[1]
self._check_aliased_import(first_part, function_name, type_name)
else:
# MyImport.MyLib.a we don't care of the alias
library_name = full_name_split[1]
function_name = full_name_split[2]
self._analyze_library_function(library_name, function_name, type_name)
def _check_aliased_import(
self, first_part: str, function_name: str, type_name: USING_FOR_KEY
) -> None:
# We check if the first part appear as alias for an import
# if it is then function_name must be a top level function
# otherwise it's a library function
for i in self._contract.file_scope.imports:
if i.alias == first_part:
self._analyze_top_level_function(function_name, type_name)
return
self._analyze_library_function(first_part, function_name, type_name)
def _analyze_top_level_function(self, function_name: str, type_name: USING_FOR_KEY) -> None:
for tl_function in self.compilation_unit.functions_top_level:
if tl_function.name == function_name:
self._contract.using_for[type_name].append(tl_function)
def _analyze_library_function(
self, library_name: str, function_name: str, type_name: USING_FOR_KEY
) -> None:
# Get the library function
found = False
for c in self.compilation_unit.contracts:
if found:
break
if c.name == library_name:
for f in c.functions:
if f.name == function_name:
self._contract.using_for[type_name].append(f)
found = True
break
if not found:
self.log_incorrect_parsing(
f"Contract level using for: Library {library_name} - function {function_name} not found"
)
def analyze_enums(self) -> None:
try:
for father in self._contract.inheritance:
self._contract.enums_as_dict.update(father.enums_as_dict)
for enum in self._enumsNotParsed:
# for enum, we can parse and analyze it
# at the same time
self._analyze_enum(enum)
self._enumsNotParsed = []
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(f"Missing enum {e}")
def _analyze_enum(
self,
enum: Dict,
) -> None:
# Enum can be parsed in one pass
if self.is_compact_ast:
name = enum["name"]
canonicalName = enum["canonicalName"]
else:
name = enum["attributes"][self.get_key()]
if "canonicalName" in enum["attributes"]:
canonicalName = enum["attributes"]["canonicalName"]
else:
canonicalName = self._contract.name + "." + name
values = []
for child in enum[self.get_children("members")]:
assert child[self.get_key()] == "EnumValue"
if self.is_compact_ast:
values.append(child["name"])
else:
values.append(child["attributes"][self.get_key()])
new_enum = EnumContract(name, canonicalName, values)
new_enum.set_contract(self._contract)
new_enum.set_offset(enum["src"], self._contract.compilation_unit)
self._contract.enums_as_dict[canonicalName] = new_enum
def _analyze_struct(self, struct: StructureContractSolc) -> None:
struct.analyze()
def analyze_structs(self) -> None:
try:
for struct in self._structures_parser:
self._analyze_struct(struct)
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(f"Missing struct {e}")
def analyze_custom_errors(self) -> None:
for custom_error in self._custom_errors_parser:
custom_error.analyze_params()
def analyze_events(self) -> None:
try:
for father in self._contract.inheritance_reverse:
self._contract.events_as_dict.update(father.events_as_dict)
for event_to_parse in self._eventsNotParsed:
event = EventContract()
event.set_contract(self._contract)
event.set_offset(event_to_parse["src"], self._contract.compilation_unit)
event_parser = EventContractSolc(event, event_to_parse, self) # type: ignore
event_parser.analyze() # type: ignore
self._contract.events_as_dict[event.full_name] = event
except (VariableNotFound, KeyError) as e:
self.log_incorrect_parsing(f"Missing event {e}")
self._eventsNotParsed = []
# endregion
###################################################################################
###################################################################################
# region Internal
###################################################################################
###################################################################################
def delete_content(self) -> None:
"""
Remove everything not parsed from the contract
This is used only if something went wrong with the inheritance parsing
:return:
"""
self._functionsNotParsed = []
self._modifiersNotParsed = []
self._functions_no_params = []
self._modifiers_no_params = []
self._eventsNotParsed = []
self._variablesNotParsed = []
self._enumsNotParsed = []
self._structuresNotParsed = []
self._usingForNotParsed = []
self._customErrorsNotParsed = []
def _handle_comment(self, attributes: Dict) -> None:
"""
Save the contract comment in self.comments
And handle custom slither comments
Args:
attributes:
Returns:
"""
# Old solc versions store the comment in attributes["documentation"]
# More recent ones store it in attributes["documentation"]["text"]
if (
"documentation" in attributes
and attributes["documentation"] is not None
and (
"text" in attributes["documentation"]
or isinstance(attributes["documentation"], str)
)
):
text = (
attributes["documentation"]
if isinstance(attributes["documentation"], str)
else attributes["documentation"]["text"]
)
self._contract.comments = text
# Look for custom comments
candidates = text.replace("\n", ",").split(",")
for candidate in candidates:
if "@custom:security isDelegatecallProxy" in candidate:
self._contract.is_upgradeable_proxy = True
if "@custom:security isUpgradeable" in candidate:
self._contract.is_upgradeable = True
version_name = re.search(r"@custom:version name=([\w-]+)", candidate)
if version_name:
self._contract.upgradeable_version = version_name.group(1)
# endregion
###################################################################################
###################################################################################
# region Built in definitions
###################################################################################
###################################################################################
def __hash__(self) -> int:
return self._contract.id
# endregion
| ContractSolc |
python | pypa__warehouse | tests/unit/manage/test_views.py | {
"start": 102475,
"end": 155162
} | class ____:
@pytest.mark.parametrize("enabled", [False, True])
def test_manage_project_settings(self, enabled, monkeypatch):
request = pretend.stub(organization_access=enabled)
project = pretend.stub(organization=None, lifecycle_status=None)
view = views.ManageProjectSettingsViews(project, request)
form = pretend.stub()
view.transfer_organization_project_form_class = lambda *a, **kw: form
view.add_alternate_repository_form_class = lambda *a, **kw: form
user_organizations = pretend.call_recorder(
lambda *a, **kw: {
"organizations_managed": [],
"organizations_owned": [],
"organizations_billing": [],
}
)
monkeypatch.setattr(views, "user_organizations", user_organizations)
assert view.manage_project_settings() == {
"project": project,
"MAX_FILESIZE": MAX_FILESIZE,
"MAX_PROJECT_SIZE": MAX_PROJECT_SIZE,
"transfer_organization_project_form": form,
"add_alternate_repository_form_class": form,
}
def test_manage_project_settings_in_organization_managed(self, monkeypatch):
request = pretend.stub(organization_access=True)
organization_managed = pretend.stub(name="managed-org", is_active=True)
organization_owned = pretend.stub(name="owned-org", is_active=True)
project = pretend.stub(organization=organization_managed, lifecycle_status=None)
view = views.ManageProjectSettingsViews(project, request)
form = pretend.stub()
view.transfer_organization_project_form_class = pretend.call_recorder(
lambda *a, **kw: form
)
view.add_alternate_repository_form_class = lambda *a, **kw: form
user_organizations = pretend.call_recorder(
lambda *a, **kw: {
"organizations_managed": [organization_managed],
"organizations_owned": [organization_owned],
"organizations_billing": [],
}
)
monkeypatch.setattr(views, "user_organizations", user_organizations)
assert view.manage_project_settings() == {
"project": project,
"MAX_FILESIZE": MAX_FILESIZE,
"MAX_PROJECT_SIZE": MAX_PROJECT_SIZE,
"transfer_organization_project_form": form,
"add_alternate_repository_form_class": form,
}
assert view.transfer_organization_project_form_class.calls == [
pretend.call(organization_choices={organization_owned})
]
def test_manage_project_settings_in_organization_owned(self, monkeypatch):
request = pretend.stub(organization_access=True)
organization_managed = pretend.stub(name="managed-org", is_active=True)
organization_owned = pretend.stub(name="owned-org", is_active=True)
project = pretend.stub(organization=organization_owned, lifecycle_status=None)
view = views.ManageProjectSettingsViews(project, request)
form = pretend.stub()
view.transfer_organization_project_form_class = pretend.call_recorder(
lambda *a, **kw: form
)
view.add_alternate_repository_form_class = lambda *a, **kw: form
user_organizations = pretend.call_recorder(
lambda *a, **kw: {
"organizations_managed": [organization_managed],
"organizations_owned": [organization_owned],
"organizations_billing": [],
}
)
monkeypatch.setattr(views, "user_organizations", user_organizations)
assert view.manage_project_settings() == {
"project": project,
"MAX_FILESIZE": MAX_FILESIZE,
"MAX_PROJECT_SIZE": MAX_PROJECT_SIZE,
"transfer_organization_project_form": form,
"add_alternate_repository_form_class": form,
}
assert view.transfer_organization_project_form_class.calls == [
pretend.call(organization_choices={organization_managed})
]
def test_add_alternate_repository(self, monkeypatch, db_request):
project = ProjectFactory.create(name="foo")
db_request.POST = MultiDict(
{
"display_name": "foo alt repo",
"link_url": "https://example.org",
"description": "foo alt repo descr",
"alternate_repository_location": "add",
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
add_alternate_repository_form_class = pretend.call_recorder(
views.AddAlternateRepositoryForm
)
monkeypatch.setattr(
views,
"AddAlternateRepositoryForm",
add_alternate_repository_form_class,
)
settings_views = views.ManageProjectSettingsViews(project, db_request)
result = settings_views.add_project_alternate_repository()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call("Added alternate repository 'foo alt repo'", queue="success")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert add_alternate_repository_form_class.calls == [
pretend.call(db_request.POST)
]
def test_add_alternate_repository_invalid(self, monkeypatch, db_request):
project = ProjectFactory.create(name="foo")
db_request.POST = MultiDict(
{
"display_name": "foo alt repo",
"link_url": "invalid link",
"description": "foo alt repo descr",
"alternate_repository_location": "add",
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
add_alternate_repository_form_class = pretend.call_recorder(
views.AddAlternateRepositoryForm
)
monkeypatch.setattr(
views,
"AddAlternateRepositoryForm",
add_alternate_repository_form_class,
)
settings_views = views.ManageProjectSettingsViews(project, db_request)
result = settings_views.add_project_alternate_repository()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call("Invalid alternate repository location details", queue="error")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert add_alternate_repository_form_class.calls == [
pretend.call(db_request.POST)
]
def test_delete_alternate_repository(self, db_request):
project = ProjectFactory.create(name="foo")
alt_repo = AlternateRepositoryFactory.create(project=project)
db_request.POST = MultiDict(
{
"alternate_repository_id": str(alt_repo.id),
"confirm_alternate_repository_name": alt_repo.name,
"alternate_repository_location": "delete",
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
settings_views = views.ManageProjectSettingsViews(project, db_request)
result = settings_views.delete_project_alternate_repository()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call(
f"Deleted alternate repository '{alt_repo.name}'", queue="success"
)
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
@pytest.mark.parametrize("alt_repo_id", [None, "", "blah"])
def test_delete_alternate_repository_invalid_id(self, db_request, alt_repo_id):
project = ProjectFactory.create(name="foo")
alt_repo = AlternateRepositoryFactory.create(project=project)
db_request.POST = MultiDict(
{
"alternate_repository_id": alt_repo_id,
"confirm_alternate_repository_name": alt_repo.name,
"alternate_repository_location": "delete",
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
settings_views = views.ManageProjectSettingsViews(project, db_request)
result = settings_views.delete_project_alternate_repository()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call("Invalid alternate repository id", queue="error")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
def test_delete_alternate_repository_wrong_id(self, db_request):
project = ProjectFactory.create(name="foo")
alt_repo = AlternateRepositoryFactory.create(project=project)
db_request.POST = MultiDict(
{
"alternate_repository_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"confirm_alternate_repository_name": alt_repo.name,
"alternate_repository_location": "delete",
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
settings_views = views.ManageProjectSettingsViews(project, db_request)
result = settings_views.delete_project_alternate_repository()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call("Invalid alternate repository for project", queue="error")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
def test_delete_alternate_repository_no_confirm(self, db_request):
project = ProjectFactory.create(name="foo")
alt_repo = AlternateRepositoryFactory.create(project=project)
db_request.POST = MultiDict(
{
"alternate_repository_id": str(alt_repo.id),
"alternate_repository_location": "delete",
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
settings_views = views.ManageProjectSettingsViews(project, db_request)
result = settings_views.delete_project_alternate_repository()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call("Confirm the request", queue="error")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
def test_delete_alternate_repository_wrong_confirm(self, db_request):
project = ProjectFactory.create(name="foo")
alt_repo = AlternateRepositoryFactory.create(project=project)
db_request.POST = MultiDict(
{
"alternate_repository_id": str(alt_repo.id),
"confirm_alternate_repository_name": f"invalid-confirm-{alt_repo.name}",
"alternate_repository_location": "delete",
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
settings_views = views.ManageProjectSettingsViews(project, db_request)
result = settings_views.delete_project_alternate_repository()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call(
f"Could not delete alternate repository - "
f"invalid-confirm-{alt_repo.name} is not the same as {alt_repo.name}",
queue="error",
)
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
def test_remove_organization_project_no_confirm(self):
user = pretend.stub()
project = pretend.stub(
name="foo",
normalized_name="foo",
organization=pretend.stub(owners=[user]),
owners=[user],
)
request = pretend.stub(
POST={},
user=user,
organization_access=True,
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
org_views.remove_organization_project(project, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call("Confirm the request", queue="error")
]
def test_remove_organization_project_wrong_confirm(self):
user = pretend.stub()
project = pretend.stub(
name="foo",
normalized_name="foo",
organization=pretend.stub(owners=[user]),
owners=[user],
)
request = pretend.stub(
POST={"confirm_remove_organization_project_name": "FOO"},
user=user,
organization_access=True,
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
org_views.remove_organization_project(project, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call(
(
"Could not remove project from organization - "
"'FOO' is not the same as 'foo'"
),
queue="error",
)
]
def test_remove_organization_project_disable_organizations(self):
project = pretend.stub(name="foo", normalized_name="foo")
request = pretend.stub(
organization_access=False,
route_path=pretend.call_recorder(lambda *a, **kw: "/the-redirect"),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
)
result = org_views.remove_organization_project(project, request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert request.session.flash.calls == [
pretend.call("Organizations are disabled", queue="error")
]
assert request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
def test_remove_organization_project_no_current_organization(
self, monkeypatch, db_request
):
project = ProjectFactory.create(name="foo")
db_request.POST = MultiDict(
{
"confirm_remove_organization_project_name": project.name,
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
send_organization_project_removed_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_removed_email",
send_organization_project_removed_email,
)
result = org_views.remove_organization_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call(
"Could not remove project from organization - no organization found",
queue="error",
)
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert send_organization_project_removed_email.calls == []
def test_remove_organization_project_not_organization_owner(self):
user = pretend.stub()
project = pretend.stub(
name="foo",
normalized_name="foo",
organization=pretend.stub(owners=[]),
owners=[user],
)
request = pretend.stub(
POST={},
user=user,
organization_access=True,
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
result = org_views.remove_organization_project(project, request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call(
(
"Could not remove project from organization - "
"you do not have the required permissions"
),
queue="error",
)
]
def test_remove_organization_project_no_individual_owner(
self, monkeypatch, db_request
):
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(
organization=OrganizationFactory.create(name="bar"), project=project
)
db_request.POST = MultiDict(
{
"confirm_remove_organization_project_name": project.name,
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=project.organization, user=db_request.user, role_name="Owner"
)
result = org_views.remove_organization_project(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call(
(
"Could not remove project from organization - "
"you do not have the required permissions"
),
queue="error",
)
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
def test_remove_organization_project(self, monkeypatch, db_request):
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(
organization=OrganizationFactory.create(name="bar"), project=project
)
db_request.POST = MultiDict(
{
"confirm_remove_organization_project_name": project.name,
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=project.organization, user=db_request.user, role_name="Owner"
)
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
send_organization_project_removed_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_removed_email",
send_organization_project_removed_email,
)
result = org_views.remove_organization_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Removed the project 'foo' from 'bar'", queue="success")
]
assert db_request.route_path.calls == [
pretend.call(
"manage.organization.projects",
organization_name=project.organization.normalized_name,
)
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert send_organization_project_removed_email.calls == [
pretend.call(
db_request,
{db_request.user},
organization_name=project.organization.name,
project_name=project.name,
),
]
def test_transfer_organization_project_no_confirm(self):
user = pretend.stub()
project = pretend.stub(
name="foo",
normalized_name="foo",
organization=pretend.stub(owners=[user]),
)
request = pretend.stub(
POST={},
user=user,
organization_access=True,
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
org_views.transfer_organization_project(project, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call("Confirm the request", queue="error")
]
def test_transfer_organization_project_wrong_confirm(self):
user = pretend.stub()
project = pretend.stub(
name="foo",
normalized_name="foo",
organization=pretend.stub(owners=[user]),
)
request = pretend.stub(
POST={"confirm_transfer_organization_project_name": "FOO"},
user=user,
organization_access=True,
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
org_views.transfer_organization_project(project, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call(
"Could not transfer project - 'FOO' is not the same as 'foo'",
queue="error",
)
]
def test_transfer_organization_project_disable_organizations(self):
project = pretend.stub(name="foo", normalized_name="foo")
request = pretend.stub(
organization_access=False,
route_path=pretend.call_recorder(lambda *a, **kw: "/the-redirect"),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
)
result = org_views.transfer_organization_project(project, request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert request.session.flash.calls == [
pretend.call("Organizations are disabled", queue="error")
]
assert request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
def test_transfer_organization_project_no_current_organization(
self, monkeypatch, db_request
):
organization = OrganizationFactory.create(name="baz")
project = ProjectFactory.create(name="foo")
db_request.POST = MultiDict(
{
"organization": str(organization.id),
"confirm_transfer_organization_project_name": project.name,
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=organization, user=db_request.user, role_name="Owner"
)
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
send_organization_project_removed_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_removed_email",
send_organization_project_removed_email,
)
send_organization_project_added_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_added_email",
send_organization_project_added_email,
)
result = org_views.transfer_organization_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Transferred the project 'foo' to 'baz'", queue="success")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert send_organization_project_removed_email.calls == []
assert send_organization_project_added_email.calls == [
pretend.call(
db_request,
{db_request.user},
organization_name=organization.name,
project_name=project.name,
)
]
def test_transfer_organization_project_not_organization_owner(self):
user = pretend.stub()
project = pretend.stub(
name="foo",
normalized_name="foo",
organization=pretend.stub(owners=[]),
)
request = pretend.stub(
POST={},
user=user,
organization_access=True,
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
result = org_views.transfer_organization_project(project, request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call(
(
"Could not transfer project - "
"you do not have the required permissions"
),
queue="error",
)
]
def test_transfer_organization_project_no_individual_owner(
self, monkeypatch, db_request
):
organization = OrganizationFactory.create(name="baz")
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(
organization=OrganizationFactory.create(name="bar"), project=project
)
db_request.POST = MultiDict(
{
"organization": str(organization.id),
"confirm_transfer_organization_project_name": project.name,
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=organization, user=db_request.user, role_name="Owner"
)
OrganizationRoleFactory.create(
organization=project.organization, user=db_request.user, role_name="Owner"
)
send_organization_project_removed_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_removed_email",
send_organization_project_removed_email,
)
send_organization_project_added_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_added_email",
send_organization_project_added_email,
)
result = org_views.transfer_organization_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Transferred the project 'foo' to 'baz'", queue="success")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert send_organization_project_removed_email.calls == [
pretend.call(
db_request,
{db_request.user},
organization_name=project.organization.name,
project_name=project.name,
)
]
assert send_organization_project_added_email.calls == [
pretend.call(
db_request,
{db_request.user},
organization_name=organization.name,
project_name=project.name,
)
]
def test_transfer_organization_project_invalid(self, monkeypatch, db_request):
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(
organization=OrganizationFactory.create(name="bar"), project=project
)
db_request.POST = MultiDict(
{
"organization": "",
"confirm_transfer_organization_project_name": project.name,
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=project.organization, user=db_request.user, role_name="Owner"
)
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
result = org_views.transfer_organization_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Select organization", queue="error")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
def test_transfer_organization_project_from_organization_managed(
self, monkeypatch, db_request
):
organization = OrganizationFactory.create(name="baz")
organization_managed = OrganizationFactory.create(name="bar-managed")
organization_owned = OrganizationFactory.create(name="bar-owned")
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(
organization=organization_managed, project=project
)
db_request.POST = MultiDict(
{
"organization": str(organization.id),
"confirm_transfer_organization_project_name": project.name,
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=organization, user=db_request.user, role_name="Owner"
)
OrganizationRoleFactory.create(
organization=project.organization, user=db_request.user, role_name="Owner"
)
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
user_organizations = pretend.call_recorder(
lambda *a, **kw: {
"organizations_managed": [organization_managed],
"organizations_owned": [organization_owned, organization],
"organizations_billing": [],
}
)
monkeypatch.setattr(org_views, "user_organizations", user_organizations)
transfer_organization_project_form_class = pretend.call_recorder(
views.TransferOrganizationProjectForm
)
monkeypatch.setattr(
org_views,
"TransferOrganizationProjectForm",
transfer_organization_project_form_class,
)
send_organization_project_removed_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_removed_email",
send_organization_project_removed_email,
)
send_organization_project_added_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_added_email",
send_organization_project_added_email,
)
result = org_views.transfer_organization_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Transferred the project 'foo' to 'baz'", queue="success")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert transfer_organization_project_form_class.calls == [
pretend.call(
db_request.POST, organization_choices={organization, organization_owned}
)
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert send_organization_project_removed_email.calls == [
pretend.call(
db_request,
{db_request.user},
organization_name=project.organization.name,
project_name=project.name,
)
]
assert send_organization_project_added_email.calls == [
pretend.call(
db_request,
{db_request.user},
organization_name=organization.name,
project_name=project.name,
)
]
def test_transfer_organization_project_from_organization_owned(
self, monkeypatch, db_request
):
organization = OrganizationFactory.create(name="baz")
organization_managed = OrganizationFactory.create(name="bar-managed")
organization_owned = OrganizationFactory.create(name="bar-owned")
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(
organization=organization_owned, project=project
)
db_request.POST = MultiDict(
{
"organization": str(organization.id),
"confirm_transfer_organization_project_name": project.name,
}
)
db_request.flags = pretend.stub(enabled=pretend.call_recorder(lambda *a: False))
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=organization, user=db_request.user, role_name="Owner"
)
OrganizationRoleFactory.create(
organization=project.organization, user=db_request.user, role_name="Owner"
)
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
user_organizations = pretend.call_recorder(
lambda *a, **kw: {
"organizations_managed": [organization_managed],
"organizations_owned": [organization_owned, organization],
"organizations_billing": [],
}
)
monkeypatch.setattr(org_views, "user_organizations", user_organizations)
transfer_organization_project_form_class = pretend.call_recorder(
views.TransferOrganizationProjectForm
)
monkeypatch.setattr(
org_views,
"TransferOrganizationProjectForm",
transfer_organization_project_form_class,
)
send_organization_project_removed_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_removed_email",
send_organization_project_removed_email,
)
send_organization_project_added_email = pretend.call_recorder(
lambda req, user, **k: None
)
monkeypatch.setattr(
org_views,
"send_organization_project_added_email",
send_organization_project_added_email,
)
result = org_views.transfer_organization_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Transferred the project 'foo' to 'baz'", queue="success")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert transfer_organization_project_form_class.calls == [
pretend.call(
db_request.POST,
organization_choices={organization_managed, organization},
)
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert send_organization_project_removed_email.calls == [
pretend.call(
db_request,
{db_request.user},
organization_name=project.organization.name,
project_name=project.name,
)
]
assert send_organization_project_added_email.calls == [
pretend.call(
db_request,
{db_request.user},
organization_name=organization.name,
project_name=project.name,
)
]
def test_delete_project_no_confirm(self):
project = pretend.stub(normalized_name="foo")
request = pretend.stub(
POST={},
flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
views.delete_project(project, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.flags.enabled.calls == [
pretend.call(AdminFlagValue.DISALLOW_DELETION)
]
assert request.session.flash.calls == [
pretend.call("Confirm the request", queue="error")
]
def test_delete_project_wrong_confirm(self):
project = pretend.stub(name="foo", normalized_name="foo")
request = pretend.stub(
POST={"confirm_project_name": "FOO"},
flags=pretend.stub(enabled=pretend.call_recorder(lambda *a: False)),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
views.delete_project(project, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.flags.enabled.calls == [
pretend.call(AdminFlagValue.DISALLOW_DELETION)
]
assert request.session.flash.calls == [
pretend.call(
"Could not delete project - 'FOO' is not the same as 'foo'",
queue="error",
)
]
def test_delete_project_disallow_deletion(self, pyramid_request):
project = pretend.stub(name="foo", normalized_name="foo")
pyramid_request.flags = pretend.stub(
enabled=pretend.call_recorder(lambda *a: True)
)
pyramid_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/the-redirect"
)
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.delete_project(project, pyramid_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert pyramid_request.flags.enabled.calls == [
pretend.call(AdminFlagValue.DISALLOW_DELETION)
]
assert pyramid_request.session.flash.calls == [
pretend.call(
(
"Project deletion temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
]
assert pyramid_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
def test_get_user_role_in_project_single_role_owner(self, db_request):
project = ProjectFactory.create(name="foo")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None),
)
db_request.user = UserFactory.create()
RoleFactory(user=db_request.user, project=project, role_name="Owner")
res = views.get_user_role_in_project(project, db_request.user, db_request)
assert res == "Owner"
def test_get_user_role_in_project_single_role_maintainer(self, db_request):
project = ProjectFactory.create(name="foo")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None),
)
db_request.user = UserFactory.create()
RoleFactory(user=db_request.user, project=project, role_name="Maintainer")
res = views.get_user_role_in_project(project, db_request.user, db_request)
assert res == "Maintainer"
def test_get_user_role_in_project_org_owner(self, db_request):
organization = OrganizationFactory.create(name="baz")
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(organization=organization, project=project)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=organization, user=db_request.user, role_name="Owner"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None),
)
res = views.get_user_role_in_project(project, db_request.user, db_request)
assert res == "Owner"
def test_get_user_role_in_project_team_project_owner(self, db_request):
organization = OrganizationFactory.create(name="baz")
team = TeamFactory(organization=organization)
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(organization=organization, project=project)
db_request.user = UserFactory.create()
OrganizationRoleFactory.create(
organization=organization,
user=db_request.user,
role_name=OrganizationRoleType.Member,
)
TeamRoleFactory.create(team=team, user=db_request.user)
TeamProjectRoleFactory.create(
team=team,
project=project,
role_name=TeamProjectRoleType.Owner,
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None),
)
res = views.get_user_role_in_project(project, db_request.user, db_request)
assert res == "Owner"
def test_delete_project(self, monkeypatch, db_request):
project = ProjectFactory.create(name="foo")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST["confirm_project_name"] = project.name
db_request.user = UserFactory.create()
RoleFactory.create(project=project, user=db_request.user, role_name="Owner")
get_user_role_in_project = pretend.call_recorder(
lambda project, user, req: "Owner"
)
monkeypatch.setattr(views, "get_user_role_in_project", get_user_role_in_project)
send_removed_project_email = pretend.call_recorder(lambda req, user, **k: None)
monkeypatch.setattr(
views, "send_removed_project_email", send_removed_project_email
)
result = views.delete_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Deleted the project 'foo'", queue="success")
]
assert db_request.route_path.calls == [pretend.call("manage.projects")]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert get_user_role_in_project.calls == [
pretend.call(project, db_request.user, db_request),
pretend.call(project, db_request.user, db_request),
]
assert send_removed_project_email.calls == [
pretend.call(
db_request,
db_request.user,
project_name=project.name,
submitter_name=db_request.user.username,
submitter_role="Owner",
recipient_role="Owner",
)
]
assert not (db_request.db.query(Project).filter(Project.name == "foo").count())
def test_delete_project_sends_emails_to_owners(self, monkeypatch, db_request):
organization = OrganizationFactory.create(name="baz")
project = ProjectFactory.create(name="foo")
OrganizationProjectFactory.create(organization=organization, project=project)
db_request.user = UserFactory.create(username="owner1")
OrganizationRoleFactory.create(
organization=organization,
user=db_request.user,
role_name=OrganizationRoleType.Owner,
)
# Add a second Owner
owner2 = UserFactory.create(username="owner2")
OrganizationRoleFactory.create(
organization=organization,
user=owner2,
role_name=OrganizationRoleType.Owner,
)
# Add a Manager, who won't receive the email
manager = UserFactory.create()
OrganizationRoleFactory.create(
organization=organization,
user=manager,
role_name=OrganizationRoleType.Manager,
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST["confirm_project_name"] = project.name
get_user_role_in_project = pretend.call_recorder(
lambda project, user, req: "Owner"
)
monkeypatch.setattr(views, "get_user_role_in_project", get_user_role_in_project)
send_removed_project_email = pretend.call_recorder(lambda req, user, **k: None)
monkeypatch.setattr(
views, "send_removed_project_email", send_removed_project_email
)
result = views.delete_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Deleted the project 'foo'", queue="success")
]
assert db_request.route_path.calls == [pretend.call("manage.projects")]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert get_user_role_in_project.calls == [
pretend.call(project, db_request.user, db_request),
pretend.call(project, db_request.user, db_request),
pretend.call(project, owner2, db_request),
]
assert send_removed_project_email.calls == [
pretend.call(
db_request,
db_request.user,
project_name=project.name,
submitter_name=db_request.user.username,
submitter_role="Owner",
recipient_role="Owner",
),
pretend.call(
db_request,
owner2,
project_name=project.name,
submitter_name=db_request.user.username,
submitter_role="Owner",
recipient_role="Owner",
),
]
assert not (db_request.db.query(Project).filter(Project.name == "foo").count())
| TestManageProjectSettings |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/caching/impl.py | {
"start": 2164,
"end": 2519
} | class ____:
"""
'Exposes' the underlying caching and its versioning system so that tests can fully validate the
concurrent consistency of the implementation. Not intended as a public interface.
"""
get_cache = staticmethod(_get_cache)
delete_cache = staticmethod(_delete_cache)
set_cache = staticmethod(_set_cache)
| CacheBackend |
python | pallets__werkzeug | examples/plnt/database.py | {
"start": 1310,
"end": 1623
} | class ____:
query = session.query_property()
def __init__(self, name, url, feed_url, description=""):
self.name = name
self.url = url
self.feed_url = feed_url
self.description = description
def __repr__(self):
return f"<{type(self).__name__} {self.url!r}>"
| Blog |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_run.py | {
"start": 3194,
"end": 5291
} | class ____(StrictBaseModel):
"""Trigger DAG Run Serializer for POST body."""
dag_run_id: str | None = None
data_interval_start: AwareDatetime | None = None
data_interval_end: AwareDatetime | None = None
logical_date: AwareDatetime | None
run_after: datetime | None = Field(default_factory=timezone.utcnow)
conf: dict | None = Field(default_factory=dict)
note: str | None = None
partition_key: str | None = None
@model_validator(mode="after")
def check_data_intervals(self):
if (self.data_interval_start is None) != (self.data_interval_end is None):
raise ValueError(
"Either both data_interval_start and data_interval_end must be provided or both must be None"
)
return self
def validate_context(self, dag: SerializedDAG) -> dict:
coerced_logical_date = timezone.coerce_datetime(self.logical_date)
run_after = self.run_after or timezone.utcnow()
data_interval = None
if coerced_logical_date:
if self.data_interval_start and self.data_interval_end:
data_interval = DataInterval(
start=timezone.coerce_datetime(self.data_interval_start),
end=timezone.coerce_datetime(self.data_interval_end),
)
else:
data_interval = dag.timetable.infer_manual_data_interval(
run_after=coerced_logical_date or timezone.coerce_datetime(run_after)
)
run_after = data_interval.end
run_id = self.dag_run_id or dag.timetable.generate_run_id(
run_type=DagRunType.MANUAL,
run_after=timezone.coerce_datetime(run_after),
data_interval=data_interval,
)
return {
"run_id": run_id,
"logical_date": coerced_logical_date,
"data_interval": data_interval,
"run_after": run_after,
"conf": self.conf,
"note": self.note,
"partition_key": self.partition_key,
}
| TriggerDAGRunPostBody |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 63096,
"end": 63521
} | class ____(Enum):
"""Enum that indicate whether zero_point is in integer domain or floating point domain
integer domain: quantized_val = (float_val / scale) (integer) + zero_point (integer)
float domain: quantized_val = (float_val - (zero_point (float) - scale * mid_point)) / scale
none domain: quantized_val = (float_val / scale)
"""
INT = auto()
FLOAT = auto()
NONE = auto()
| ZeroPointDomain |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-gitbook/llama_index/readers/gitbook/base.py | {
"start": 273,
"end": 3727
} | class ____(BaseReader):
"""
Simple gitbook reader.
Convert each gitbook page into Document used by LlamaIndex.
Args:
api_token (str): Gitbook API Token.
api_url (str): Gitbook API Endpoint.
"""
def __init__(self, api_token: str, api_url: str = None) -> None:
"""Initialize with parameters."""
self.client = GitbookClient(api_token, api_url)
def load_data(
self,
space_id: str,
metadata_names: Optional[List[str]] = None,
show_progress=False,
) -> List[Document]:
"""
Load data from the input directory.
Args:
space_id (str): Gitbook space id
metadata_names (Optional[List[str]]): names of the fields to be added
to the metadata attribute of the Document.
only 'path', 'title', 'description', 'parent' are available
Defaults to None
show_progress (bool, optional): Show progress bar. Defaults to False
Returns:
List[Document]: A list of documents.
"""
if metadata_names:
invalid_fields = set(metadata_names) - VALID_METADATA_FIELDS
if invalid_fields:
raise ValueError(
f"Invalid metadata fields: {', '.join(invalid_fields)}"
)
documents = []
pages = self.client.list_pages(space_id)
if show_progress:
from tqdm import tqdm
iterator = tqdm(pages, desc="Downloading pages")
else:
iterator = pages
for page in iterator:
id = page.get("id")
content = self.client.get_page_markdown(space_id, id)
if not content:
print(f"Warning: No content found for page ID {id}. Skipping...")
continue
if metadata_names is None:
documents.append(
Document(text=content, id_=id, metadata={"path": page.get("path")})
)
else:
try:
metadata = {name: page.get(name) for name in metadata_names}
except KeyError as err:
raise ValueError(
f"{err.args[0]} field is not available. Choose from {', '.join(VALID_METADATA_FIELDS)}"
) from err
documents.append(Document(text=content, id_=id, metadata=metadata))
return documents
if __name__ == "__main__":
import os
import sys
def load_env_file():
"""Load environment variables from .env file."""
current_dir = os.path.dirname(os.path.abspath(__file__))
env_path = os.path.join(current_dir, "../../../.env")
if os.path.exists(env_path):
with open(env_path) as f:
for line in f:
line = line.strip()
if line and not line.startswith("#"):
key, value = line.split("=", 1)
os.environ[key.strip()] = value.strip()
load_env_file()
api_token = os.getenv("GITBOOK_API_TOKEN")
space_id = os.getenv("GITBOOK_SPACE_ID")
if not api_token or not space_id:
print("Error: GITBOOK_API_TOKEN and GITBOOK_SPACE_ID must be set in .env file")
sys.exit(1)
reader = SimpleGitbookReader(api_token)
print(reader.load_data(space_id, show_progress=True))
| SimpleGitbookReader |
python | explosion__spaCy | spacy/lang/pt/__init__.py | {
"start": 282,
"end": 539
} | class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = TOKENIZER_INFIXES
prefixes = TOKENIZER_PREFIXES
lex_attr_getters = LEX_ATTRS
syntax_iterators = SYNTAX_ITERATORS
stop_words = STOP_WORDS
| PortugueseDefaults |
python | ansible__ansible | lib/ansible/utils/version.py | {
"start": 1987,
"end": 3107
} | class ____:
"""Class to easily allow comparing numbers
Largely this exists to make comparing an integer and a string on py3
so that it works like py2.
"""
def __init__(self, specifier):
self.specifier = int(specifier)
def __repr__(self):
return repr(self.specifier)
def __eq__(self, other):
if isinstance(other, _Numeric):
return self.specifier == other.specifier
elif isinstance(other, int):
return self.specifier == other
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, _Numeric):
return self.specifier < other.specifier
elif isinstance(other, int):
return self.specifier < other
elif isinstance(other, _Alpha):
return True
raise ValueError
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
| _Numeric |
python | imageio__imageio | imageio/plugins/_bsdf.py | {
"start": 31907,
"end": 32753
} | class ____(Extension):
name = "ndarray"
def __init__(self):
if "numpy" in sys.modules:
import numpy as np
self.cls = np.ndarray
def match(self, s, v): # pragma: no cover - e.g. work for nd arrays in JS
return hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "tobytes")
def encode(self, s, v):
return dict(shape=v.shape, dtype=text_type(v.dtype), data=v.tobytes())
def decode(self, s, v):
try:
import numpy as np
except ImportError: # pragma: no cover
return v
a = np.frombuffer(v["data"], dtype=v["dtype"])
a.shape = v["shape"]
return a
standard_extensions = [ComplexExtension, NDArrayExtension]
if __name__ == "__main__":
# Invoke CLI
import bsdf_cli
bsdf_cli.main()
| NDArrayExtension |
python | openai__openai-python | src/openai/resources/responses/input_items.py | {
"start": 8531,
"end": 8774
} | class ____:
def __init__(self, input_items: AsyncInputItems) -> None:
self._input_items = input_items
self.list = async_to_streamed_response_wrapper(
input_items.list,
)
| AsyncInputItemsWithStreamingResponse |
python | huggingface__transformers | src/transformers/models/oneformer/image_processing_oneformer_fast.py | {
"start": 10282,
"end": 40076
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 800, "longest_edge": 1333}
crop_size = None
do_resize = True
do_rescale = True
do_normalize = True
default_to_square = False
do_center_crop = False
do_convert_rgb = True
rescale_factor = 1 / 255
ignore_index = None
do_reduce_labels = False
repo_path = "shi-labs/oneformer_demo"
class_info_file = None
num_text = None
num_labels = None
valid_kwargs = OneFormerImageProcessorKwargs
model_input_names = ["pixel_values", "pixel_mask", "task_inputs"]
def __init__(self, **kwargs: Unpack[OneFormerImageProcessorKwargs]):
super().__init__(**kwargs)
if self.class_info_file:
self.metadata = prepare_metadata(load_metadata(self.repo_path, self.class_info_file))
@auto_docstring
def preprocess(
self,
images: ImageInput,
task_inputs: Optional[list[str]] = None,
segmentation_maps: Optional[ImageInput] = None,
instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]] = None,
**kwargs: Unpack[OneFormerImageProcessorKwargs],
) -> BatchFeature:
r"""
task_inputs (`list[str]`, *optional*):
List of tasks (`"panoptic"`, `"instance"`, `"semantic"`) for each image in the batch.
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps.
instance_id_to_semantic_id (`Union[list[dict[int, int]], dict[int, int]]`, *optional*):
A mapping from instance IDs to semantic IDs.
"""
return super().preprocess(
images,
task_inputs,
segmentation_maps,
instance_id_to_semantic_id,
**kwargs,
)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
task_inputs: Optional[list[str]],
segmentation_maps: ImageInput,
instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[OneFormerImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
To be overridden by subclasses when image-like inputs other than images should be processed.
It can be used for segmentation maps, depth maps, etc.
"""
# Prepare input images
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
if segmentation_maps is not None:
segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
return self._preprocess(images, task_inputs, segmentation_maps, instance_id_to_semantic_id, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
task_inputs: Optional[list[str]],
segmentation_maps: list["torch.Tensor"],
instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
ignore_index: Optional[int],
do_reduce_labels: Optional[bool],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_segmentation_maps = None
if segmentation_maps is not None:
grouped_segmentation_maps, grouped_segmentation_maps_index = group_images_by_shape(
segmentation_maps, disable_grouping=disable_grouping
)
processed_segmentation_maps_grouped = {}
for shape, stacked_segmentation_maps in grouped_segmentation_maps.items():
if do_resize:
stacked_segmentation_maps = self.resize(
stacked_segmentation_maps, size=size, interpolation=F.InterpolationMode.NEAREST_EXACT
)
processed_segmentation_maps_grouped[shape] = stacked_segmentation_maps
processed_segmentation_maps = reorder_images(
processed_segmentation_maps_grouped, grouped_segmentation_maps_index
)
encoded_inputs = self._encode_inputs_fast(
processed_images,
task_inputs,
segmentation_maps=processed_segmentation_maps,
instance_id_to_semantic_id=instance_id_to_semantic_id,
ignore_index=ignore_index,
do_reduce_labels=do_reduce_labels,
return_tensors=return_tensors,
)
return encoded_inputs
def _pad_image_fast(
self,
image: "torch.Tensor",
output_size: tuple[int, int],
constant_values: float = 0,
) -> "torch.Tensor":
"""
Pad an image with zeros to the given size using torch operations.
Args:
image (`torch.Tensor`):
Image tensor in channel-first format (C, H, W).
output_size (`tuple[int, int]`):
Target output size (height, width).
constant_values (`float`, *optional*, defaults to 0):
The value to use for padding.
Returns:
`torch.Tensor`: The padded image.
"""
input_height, input_width = image.shape[1], image.shape[2]
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padded_image = F.pad(image, padding=[0, 0, pad_right, pad_bottom], fill=constant_values)
return padded_image
def pad(
self,
images: list["torch.Tensor"],
return_pixel_mask: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
) -> BatchFeature:
"""
Pad a batch of images to the same size using torch operations.
Args:
images (`List[torch.Tensor]`):
List of image tensors in channel-first format.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return pixel masks.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
Returns:
`BatchFeature`: Padded images and optional pixel masks.
"""
outputs = super().pad(images, return_mask=return_pixel_mask)
padded_images = outputs[0] if return_pixel_mask else outputs
pixel_masks = outputs[1] if return_pixel_mask else None
if return_tensors:
padded_images = torch.stack(padded_images, dim=0)
if return_pixel_mask:
pixel_masks = torch.stack(pixel_masks, dim=0)
data = {"pixel_values": padded_images}
if return_pixel_mask:
data["pixel_mask"] = pixel_masks
return BatchFeature(data=data, tensor_type=return_tensors)
def convert_segmentation_map_to_binary_masks(
self,
segmentation_map: "torch.Tensor",
instance_id_to_semantic_id: Optional[dict[int, int]] = None,
ignore_index: Optional[int] = None,
do_reduce_labels: bool = False,
):
return convert_segmentation_map_to_binary_masks_fast(
segmentation_map=segmentation_map,
instance_id_to_semantic_id=instance_id_to_semantic_id,
ignore_index=ignore_index,
do_reduce_labels=do_reduce_labels,
)
def get_semantic_annotations(self, label, num_class_obj):
annotation_classes = label["classes"]
annotation_masks = label["masks"]
texts = ["a semantic photo"] * self.num_text
classes = []
masks = []
for idx in range(len(annotation_classes)):
class_id = annotation_classes[idx]
mask = annotation_masks[idx]
if not torch.all(mask == 0):
if class_id not in classes:
cls_name = self.metadata[str(class_id.cpu().item())]
classes.append(class_id)
masks.append(mask)
num_class_obj[cls_name] += 1
else:
idx = classes.index(class_id)
masks[idx] += mask
masks[idx] = torch.clamp(masks[idx], 0, 1)
num = 0
for i, cls_name in enumerate(self.metadata["class_names"]):
if num_class_obj[cls_name] > 0:
for _ in range(num_class_obj[cls_name]):
if num >= len(texts):
break
texts[num] = f"a photo with a {cls_name}"
num += 1
classes = torch.stack(classes)
masks = torch.stack(masks)
return classes, masks, texts
def get_instance_annotations(self, label, num_class_obj):
annotation_classes = label["classes"]
annotation_masks = label["masks"]
texts = ["an instance photo"] * self.num_text
classes = []
masks = []
for idx in range(len(annotation_classes)):
class_id = annotation_classes[idx]
mask = annotation_masks[idx]
if class_id in self.metadata["thing_ids"]:
if not torch.all(mask == 0):
cls_name = self.metadata[str(class_id.cpu().item())]
classes.append(class_id)
masks.append(mask)
num_class_obj[cls_name] += 1
num = 0
for i, cls_name in enumerate(self.metadata["class_names"]):
if num_class_obj[cls_name] > 0:
for _ in range(num_class_obj[cls_name]):
if num >= len(texts):
break
texts[num] = f"a photo with a {cls_name}"
num += 1
classes = torch.stack(classes)
masks = torch.stack(masks)
return classes, masks, texts
def get_panoptic_annotations(self, label, num_class_obj):
annotation_classes = label["classes"]
annotation_masks = label["masks"]
texts = ["an panoptic photo"] * self.num_text
classes = []
masks = []
for idx in range(len(annotation_classes)):
class_id = annotation_classes[idx]
mask = annotation_masks[idx] if hasattr(annotation_masks[idx], "data") else annotation_masks[idx]
if not torch.all(mask == 0):
cls_name = self.metadata[str(class_id.cpu().item())]
classes.append(class_id)
masks.append(mask)
num_class_obj[cls_name] += 1
num = 0
for i, cls_name in enumerate(self.metadata["class_names"]):
if num_class_obj[cls_name] > 0:
for _ in range(num_class_obj[cls_name]):
if num >= len(texts):
break
texts[num] = f"a photo with a {cls_name}"
num += 1
classes = torch.stack(classes)
masks = torch.stack(masks)
return classes, masks, texts
def _encode_inputs_fast(
self,
pixel_values_list: list["torch.Tensor"],
task_inputs: Optional[list[str]] = None,
segmentation_maps: Optional[list["torch.Tensor"]] = None,
instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]] = None,
ignore_index: Optional[int] = None,
do_reduce_labels: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
) -> BatchFeature:
if task_inputs is None:
task_inputs = ["panoptic"]
pad_size = get_max_height_width(pixel_values_list)
encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors)
annotations = None
if segmentation_maps is not None:
annotations = []
for idx, segmentation_map in enumerate(segmentation_maps):
# Use instance2class_id mapping per image
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
# Convert segmentation map to binary masks using torch operations
masks, classes = self.convert_segmentation_map_to_binary_masks(
segmentation_map,
instance_id,
ignore_index=ignore_index,
do_reduce_labels=do_reduce_labels,
)
annotations.append({"masks": masks, "classes": classes})
if annotations is not None:
mask_labels = []
class_labels = []
text_inputs = []
num_class_obj = dict.fromkeys(self.metadata["class_names"], 0)
for i, label in enumerate(annotations):
task = task_inputs[i]
if task == "semantic":
classes, masks, texts = self.get_semantic_annotations(label, num_class_obj)
elif task == "instance":
classes, masks, texts = self.get_instance_annotations(label, num_class_obj)
elif task == "panoptic":
classes, masks, texts = self.get_panoptic_annotations(label, num_class_obj)
else:
raise ValueError(f"{task} was not expected, expected `semantic`, `instance` or `panoptic`")
# Pad masks to max size using torch operations
padded_masks = [
self._pad_image_fast(image=mask, output_size=pad_size, constant_values=ignore_index)
for mask in masks
]
padded_masks = torch.cat(padded_masks, dim=0)
mask_labels.append(padded_masks)
class_labels.append(classes)
text_inputs.append(texts)
encoded_inputs["mask_labels"] = mask_labels
encoded_inputs["class_labels"] = class_labels
encoded_inputs["text_inputs"] = text_inputs
encoded_inputs["task_inputs"] = [f"the task is {task_input}" for task_input in task_inputs]
return encoded_inputs
def post_process_semantic_segmentation(
self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None
) -> "torch.Tensor":
"""
Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
target_sizes (`List[Tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`List[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Semantic segmentation logits of shape (batch_size, num_classes, height, width)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = F.resize(
segmentation[idx].unsqueeze(dim=0),
size=target_sizes[idx],
interpolation=F.InterpolationMode.BILINEAR,
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_instance_segmentation(
self,
outputs,
task_type: str = "instance",
is_demo: bool = True,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_sizes: Optional[list[tuple[int, int]]] = None,
return_coco_annotation: Optional[bool] = False,
):
"""
Converts the output of [`OneFormerForUniversalSegmentationOutput`] into image instance segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`OneFormerForUniversalSegmentationOutput`]):
The outputs from [`OneFormerForUniversalSegmentationOutput`].
task_type (`str`, *optional*, defaults to "instance"):
The post processing depends on the task token input. If the `task_type` is "panoptic", we need to
ignore the stuff predictions.
is_demo (`bool`, *optional)*, defaults to `True`):
Whether the model is in demo mode. If true, use threshold to predict final masks.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`List[Tuple]`, *optional*):
List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
return_coco_annotation (`bool`, *optional)*, defaults to `False`):
Whether to return predictions in COCO format.
Returns:
`List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
device = masks_queries_logits.device
batch_size = class_queries_logits.shape[0]
num_queries = class_queries_logits.shape[1]
num_classes = class_queries_logits.shape[-1] - 1
# Loop over items in batch size
results: list[dict[str, torch.Tensor]] = []
for i in range(batch_size):
# [Q, K]
scores = nn.functional.softmax(class_queries_logits[i], dim=-1)[:, :-1]
labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
# scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor")
# mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1)
mask_pred = masks_queries_logits[i][topk_indices]
# Only consider scores with confidence over [threshold] for demo
if is_demo:
keep = scores_per_image > threshold
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
# if this is panoptic segmentation, we only keep the "thing" classes
if task_type == "panoptic":
keep = torch.zeros_like(scores_per_image).bool()
for j, lab in enumerate(labels_per_image):
keep[j] = lab in self.metadata["thing_ids"]
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
if mask_pred.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_pred.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
if "ade20k" in self.class_info_file and not is_demo and "instance" in task_type:
for j in range(labels_per_image.shape[0]):
labels_per_image[j] = self.metadata["thing_ids"].index(labels_per_image[j].item())
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_pred,
scores_per_image,
labels_per_image,
mask_threshold,
overlap_mask_area_threshold,
set(),
target_size,
)
# Return segmentation map in run-length encoding (RLE) format
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_panoptic_segmentation
def post_process_panoptic_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[set[int]] = None,
target_sizes: Optional[list[tuple[int, int]]] = None,
) -> list[dict]:
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning("`label_ids_to_fuse` unset. No instance will be fused.")
label_ids_to_fuse = set()
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=label_ids_to_fuse,
target_size=target_size,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["OneFormerImageProcessorFast"]
| OneFormerImageProcessorFast |
python | RaRe-Technologies__gensim | gensim/corpora/textcorpus.py | {
"start": 15917,
"end": 23920
} | class ____(TextCorpus):
"""Read documents recursively from a directory.
Each file/line (depends on `lines_are_documents`) is interpreted as a plain text document.
"""
def __init__(self, input, dictionary=None, metadata=False, min_depth=0, max_depth=None,
pattern=None, exclude_pattern=None, lines_are_documents=False, encoding='utf-8', **kwargs):
"""
Parameters
----------
input : str
Path to input file/folder.
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`, optional
If a dictionary is provided, it will not be updated with the given corpus on initialization.
If None - new dictionary will be built for the given corpus.
If `input` is None, the dictionary will remain uninitialized.
metadata : bool, optional
If True - yield metadata with each document.
min_depth : int, optional
Minimum depth in directory tree at which to begin searching for files.
max_depth : int, optional
Max depth in directory tree at which files will no longer be considered.
If None - not limited.
pattern : str, optional
Regex to use for file name inclusion, all those files *not* matching this pattern will be ignored.
exclude_pattern : str, optional
Regex to use for file name exclusion, all files matching this pattern will be ignored.
lines_are_documents : bool, optional
If True - each line is considered a document, otherwise - each file is one document.
encoding : str, optional
Encoding used to read the specified file or files in the specified directory.
kwargs: keyword arguments passed through to the `TextCorpus` constructor.
See :meth:`gemsim.corpora.textcorpus.TextCorpus.__init__` docstring for more details on these.
"""
self._min_depth = min_depth
self._max_depth = sys.maxsize if max_depth is None else max_depth
self.pattern = pattern
self.exclude_pattern = exclude_pattern
self.lines_are_documents = lines_are_documents
self.encoding = encoding
super(TextDirectoryCorpus, self).__init__(input, dictionary, metadata, **kwargs)
@property
def lines_are_documents(self):
return self._lines_are_documents
@lines_are_documents.setter
def lines_are_documents(self, lines_are_documents):
self._lines_are_documents = lines_are_documents
self.length = None
@property
def pattern(self):
return self._pattern
@pattern.setter
def pattern(self, pattern):
self._pattern = None if pattern is None else re.compile(pattern)
self.length = None
@property
def exclude_pattern(self):
return self._exclude_pattern
@exclude_pattern.setter
def exclude_pattern(self, pattern):
self._exclude_pattern = None if pattern is None else re.compile(pattern)
self.length = None
@property
def min_depth(self):
return self._min_depth
@min_depth.setter
def min_depth(self, min_depth):
self._min_depth = min_depth
self.length = None
@property
def max_depth(self):
return self._max_depth
@max_depth.setter
def max_depth(self, max_depth):
self._max_depth = max_depth
self.length = None
def iter_filepaths(self):
"""Generate (lazily) paths to each file in the directory structure within the specified range of depths.
If a filename pattern to match was given, further filter to only those filenames that match.
Yields
------
str
Path to file
"""
for depth, dirpath, dirnames, filenames in walk(self.input):
if self.min_depth <= depth <= self.max_depth:
if self.pattern is not None:
filenames = (n for n in filenames if self.pattern.match(n) is not None)
if self.exclude_pattern is not None:
filenames = (n for n in filenames if self.exclude_pattern.match(n) is None)
for name in filenames:
yield os.path.join(dirpath, name)
def getstream(self):
"""Generate documents from the underlying plain text collection (of one or more files).
Yields
------
str
One document (if lines_are_documents - True), otherwise - each file is one document.
"""
num_texts = 0
for path in self.iter_filepaths():
with open(path, 'rt', encoding=self.encoding) as f:
if self.lines_are_documents:
for line in f:
yield line.strip()
num_texts += 1
else:
yield f.read().strip()
num_texts += 1
self.length = num_texts
def __len__(self):
"""Get length of corpus.
Returns
-------
int
Length of corpus.
"""
if self.length is None:
self._cache_corpus_length()
return self.length
def _cache_corpus_length(self):
"""Calculate length of corpus and cache it to `self.length`."""
if not self.lines_are_documents:
self.length = sum(1 for _ in self.iter_filepaths())
else:
self.length = sum(1 for _ in self.getstream())
def walk(top, topdown=True, onerror=None, followlinks=False, depth=0):
"""Generate the file names in a directory tree by walking the tree either top-down or bottom-up.
For each directory in the tree rooted at directory top (including top itself), it yields a 4-tuple
(depth, dirpath, dirnames, filenames).
Parameters
----------
top : str
Root directory.
topdown : bool, optional
If True - you can modify dirnames in-place.
onerror : function, optional
Some function, will be called with one argument, an OSError instance.
It can report the error to continue with the walk, or raise the exception to abort the walk.
Note that the filename is available as the filename attribute of the exception object.
followlinks : bool, optional
If True - visit directories pointed to by symlinks, on systems that support them.
depth : int, optional
Height of file-tree, don't pass it manually (this used as accumulator for recursion).
Notes
-----
This is a mostly copied version of `os.walk` from the Python 2 source code.
The only difference is that it returns the depth in the directory tree structure
at which each yield is taking place.
Yields
------
(int, str, list of str, list of str)
Depth, current path, visited directories, visited non-directories.
See Also
--------
`os.walk documentation <https://docs.python.org/2/library/os.html#os.walk>`_
"""
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
try:
# Should be O(1) since it's probably just reading your filesystem journal
names = os.listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
# O(n) where n = number of files in the directory
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield depth, top, dirs, nondirs
# Again O(n), where n = number of directories in the directory
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
# Generator so besides the recursive `walk()` call, no additional cost here.
for x in walk(new_path, topdown, onerror, followlinks, depth + 1):
yield x
if not topdown:
yield depth, top, dirs, nondirs
| TextDirectoryCorpus |
python | gevent__gevent | src/greentest/3.10/test_smtpd.py | {
"start": 37185,
"end": 41262
} | class ____(unittest.TestCase):
def setUp(self):
smtpd.socket = asyncore.socket = mock_socket
self.old_debugstream = smtpd.DEBUGSTREAM
self.debug = smtpd.DEBUGSTREAM = io.StringIO()
self.server = DummyServer((socket_helper.HOST, 0), ('b', 0),
enable_SMTPUTF8=True)
conn, addr = self.server.accept()
self.channel = smtpd.SMTPChannel(self.server, conn, addr,
enable_SMTPUTF8=True)
def tearDown(self):
asyncore.close_all()
asyncore.socket = smtpd.socket = socket
smtpd.DEBUGSTREAM = self.old_debugstream
def write_line(self, line):
self.channel.socket.queue_recv(line)
self.channel.handle_read()
def test_MAIL_command_accepts_SMTPUTF8_when_announced(self):
self.write_line(b'EHLO example')
self.write_line(
'MAIL from: <naïve@example.com> BODY=8BITMIME SMTPUTF8'.encode(
'utf-8')
)
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_process_smtputf8_message(self):
self.write_line(b'EHLO example')
for mail_parameters in [b'', b'BODY=8BITMIME SMTPUTF8']:
self.write_line(b'MAIL from: <a@example> ' + mail_parameters)
self.assertEqual(self.channel.socket.last[0:3], b'250')
self.write_line(b'rcpt to:<b@example.com>')
self.assertEqual(self.channel.socket.last[0:3], b'250')
self.write_line(b'data')
self.assertEqual(self.channel.socket.last[0:3], b'354')
self.write_line(b'c\r\n.')
if mail_parameters == b'':
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
else:
self.assertEqual(self.channel.socket.last,
b'250 SMTPUTF8 message okish\r\n')
def test_utf8_data(self):
self.write_line(b'EHLO example')
self.write_line(
'MAIL From: naïve@examplé BODY=8BITMIME SMTPUTF8'.encode('utf-8'))
self.assertEqual(self.channel.socket.last[0:3], b'250')
self.write_line('RCPT To:späm@examplé'.encode('utf-8'))
self.assertEqual(self.channel.socket.last[0:3], b'250')
self.write_line(b'DATA')
self.assertEqual(self.channel.socket.last[0:3], b'354')
self.write_line(b'utf8 enriched text: \xc5\xbc\xc5\xba\xc4\x87')
self.write_line(b'.')
self.assertEqual(
self.channel.received_data,
b'utf8 enriched text: \xc5\xbc\xc5\xba\xc4\x87')
def test_MAIL_command_limit_extended_with_SIZE_and_SMTPUTF8(self):
self.write_line(b'ehlo example')
fill_len = (512 + 26 + 10) - len('mail from:<@example>')
self.write_line(b'MAIL from:<' +
b'a' * (fill_len + 1) +
b'@example>')
self.assertEqual(self.channel.socket.last,
b'500 Error: line too long\r\n')
self.write_line(b'MAIL from:<' +
b'a' * fill_len +
b'@example>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_multiple_emails_with_extended_command_length(self):
self.write_line(b'ehlo example')
fill_len = (512 + 26 + 10) - len('mail from:<@example>')
for char in [b'a', b'b', b'c']:
self.write_line(b'MAIL from:<' + char * fill_len + b'a@example>')
self.assertEqual(self.channel.socket.last[0:3], b'500')
self.write_line(b'MAIL from:<' + char * fill_len + b'@example>')
self.assertEqual(self.channel.socket.last[0:3], b'250')
self.write_line(b'rcpt to:<hans@example.com>')
self.assertEqual(self.channel.socket.last[0:3], b'250')
self.write_line(b'data')
self.assertEqual(self.channel.socket.last[0:3], b'354')
self.write_line(b'test\r\n.')
self.assertEqual(self.channel.socket.last[0:3], b'250')
| SMTPDChannelTestWithEnableSMTPUTF8True |
python | python-openxml__python-docx | tests/oxml/unitdata/text.py | {
"start": 847,
"end": 948
} | class ____(BaseBuilder):
__tag__ = "w:rPr"
__nspfxs__ = ("w",)
__attrs__ = ()
| CT_RPrBuilder |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 10078,
"end": 10429
} | class ____(AbstractTemplate):
def generic(self, args, kws):
if len(args) == 1 and isinstance(args[0], MaskedType):
return_type = self.context.resolve_function_type(
self.key, (args[0].value_type,), kws
).return_type
return nb_signature(MaskedType(return_type), args[0])
| MaskedScalarUnaryOp |
python | h5py__h5py | setup_build.py | {
"start": 2169,
"end": 8022
} | class ____(build_ext):
"""
Custom setuptools command which encapsulates api_gen pre-building,
Cython building, and C compilation.
Also handles making the Extension modules, since we can't rely on
NumPy being present in the main body of the setup script.
"""
@classmethod
def _make_extensions(cls, config, templ_config):
""" Produce a list of Extension instances which can be passed to
cythonize().
This is the point at which custom directories, MPI options, etc.
enter the build process.
"""
import numpy
settings = COMPILER_SETTINGS.copy()
settings['include_dirs'][:0] = config.hdf5_includedirs
settings['library_dirs'][:0] = config.hdf5_libdirs
settings['define_macros'].extend(config.hdf5_define_macros)
if config.msmpi:
settings['include_dirs'].extend(config.msmpi_inc_dirs)
settings['library_dirs'].extend(config.msmpi_lib_dirs)
settings['libraries'].append('msmpi')
try:
numpy_includes = numpy.get_include()
except AttributeError:
# if numpy is not installed get the headers from the .egg directory
import numpy.core
numpy_includes = os.path.join(os.path.dirname(numpy.core.__file__), 'include')
settings['include_dirs'] += [numpy_includes]
if config.mpi:
import mpi4py
settings['include_dirs'] += [mpi4py.get_include()]
# TODO: should this only be done on UNIX?
if os.name != 'nt':
settings['runtime_library_dirs'] = settings['library_dirs']
for module in ALL_MODULES:
raw_path = Path(localpath("h5py")).joinpath(module).resolve()
for ext in ['.pyx', '.pxd', '.pxi']:
if not (templ := raw_path.with_suffix(f'.templ{ext}')).exists():
continue
if (target := raw_path.with_suffix(ext)).exists():
current_text = target.read_text('utf-8')
else:
current_text = ""
new_text = tempita.sub(templ.read_text(), **templ_config)
if new_text != current_text:
target.write_text(new_text, 'utf-8')
settings['define_macros'].append(('NPY_TARGET_VERSION', 'NPY_1_21_API_VERSION'))
extensions = [cls._make_extension(m, settings) for m in MODULES]
if int(numpy.__version__.split('.')[0]) >= 2:
# Enable NumPy 2.0 C API for modules that require it.
# NUMPY2_MODULES will not be importable when NumPy 1.x is installed.
settings['define_macros'].append(('NPY_TARGET_VERSION', 'NPY_2_0_API_VERSION'))
extensions.extend(cls._make_extension(m, settings) for m in MODULES_NUMPY2_ONLY)
return extensions
@staticmethod
def _make_extension(module, settings):
sources = [localpath('h5py', module + '.pyx')] + EXTRA_SRC.get(module, [])
settings = copy.deepcopy(settings)
settings['libraries'] += EXTRA_LIBRARIES.get(module, [])
return Extension('h5py.' + module, sources, **settings)
def run(self):
""" Distutils calls this method to run the command """
from Cython import __version__ as cython_version
from Cython.Build import cythonize
import numpy
complex256_support = hasattr(numpy, 'complex256')
# This allows ccache to recognise the files when pip builds in a temp
# directory. It speeds up repeatedly running tests through tox with
# ccache configured (CC="ccache gcc"). It should have no effect if
# ccache is not in use.
os.environ['CCACHE_BASEDIR'] = op.dirname(op.abspath(__file__))
os.environ['CCACHE_NOHASHDIR'] = '1'
# Get configuration from environment variables
config = BuildConfig.from_env()
config.summarise()
if config.hdf5_version < (1, 10, 7) or config.hdf5_version == (1, 12, 0):
raise Exception(
f"This version of h5py requires HDF5 >= 1.10.7 and != 1.12.0 (got version "
f"{config.hdf5_version} from environment variable or library)"
)
# Refresh low-level defs if missing or stale
print("Executing api_gen rebuild of defs")
api_gen.run()
templ_config = {
"MPI": bool(config.mpi),
"ROS3": bool(config.ros3),
"HDF5_VERSION": config.hdf5_version,
"DIRECT_VFD": bool(config.direct_vfd),
"VOL_MIN_HDF5_VERSION": (1, 11, 5),
"COMPLEX256_SUPPORT": complex256_support,
"NUMPY_BUILD_VERSION": numpy.__version__,
"NUMPY_BUILD_VERSION_TUPLE": tuple(int(x) for x in numpy.__version__.split('.')[:3]),
"CYTHON_BUILD_VERSION": cython_version,
"PLATFORM_SYSTEM": platform.system(),
"OBJECTS_USE_LOCKING": True,
"OBJECTS_DEBUG_ID": False,
"FREE_THREADING": sysconfig.get_config_var("Py_GIL_DISABLED") == 1,
}
compiler_directives = {}
if Version(cython_version) >= Version("3.1.0b1"):
compiler_directives["freethreading_compatible"] = True
# Run Cython
print("Executing cythonize()")
self.extensions = self.distribution.ext_modules = cythonize(
self._make_extensions(config, templ_config),
force=config.changed() or self.force,
compiler_directives=compiler_directives,
language_level=3
)
# Perform the build
self.swig_opts = None # workaround https://github.com/pypa/setuptools/pull/5083
self.finalize_options()
super().run()
# Record the configuration we built
config.record_built()
| h5py_build_ext |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_bool_returned.py | {
"start": 582,
"end": 713
} | class ____:
""" __bool__ returns an integer """
def __bool__(self): # [invalid-bool-returned]
return 1
| FirstBadBool |
python | weaviate__weaviate-python-client | weaviate/collections/grpc/query.py | {
"start": 1473,
"end": 1581
} | class ____:
force: float
concepts: List[str]
objects: List[uuid_lib.UUID]
A = TypeVar("A")
| _Move |
python | google__jax | tests/mosaic/matmul_test.py | {
"start": 1791,
"end": 6955
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if matmul is None:
self.skipTest("Mosaic GPU not available.")
if not jtu.test_device_matches(["cuda"]):
self.skipTest("Test needs a GPU device")
self.context = mlir.make_ir_context()
mgpu_dialect.register_dialect(self.context)
self.enter_context(config.traceback_filtering("off"))
self.enter_context(self.context)
self.enter_context(ir.Location.unknown())
@parameterized.named_parameters(
(f"_shard{i}", i) for i in range(5)
)
@seed_hypothesis
@hp.settings(max_examples=100) # Add verbosity=hp.Verbosity.verbose to debug
@hp.given(hps.data())
def test_matmul_sm90(self, data):
if not jtu.is_cuda_compute_capability_equal("9.0"):
self.skipTest("Only works on GPU with capability sm90a")
in_dtype = data.draw(
hps.sampled_from([jnp.float16, jnp.bfloat16, jnp.float32]),
label="in_dtype",
)
out_dtype = jnp.float32
if in_dtype != jnp.float32:
out_dtype = data.draw(
hps.sampled_from([in_dtype, jnp.float32]),
label="out_dtype",
)
bytewidth = jnp.dtype(in_dtype).itemsize
m, n, k = (
data.draw(hps.sampled_from([128, 256, 512, 2048]), label=d)
for d in "mnk"
)
stages = data.draw(hps.integers(2, 5), label="stages")
swizzle = data.draw(hps.sampled_from([32, 64, 128]), label="swizzle")
tile_m = data.draw(
hps.sampled_from([t for t in [64, 128, 256] if t <= m]), label="tile_m"
)
tile_n = data.draw(
hps.sampled_from([t for t in [64, 128, 256] if t <= n]), label="tile_n"
)
grid_m, grid_n = m // tile_m, n // tile_n
grid_tile_n = data.draw(hps.sampled_from([1, 2, 4, 8, 16]), label="grid_tile_n")
hp.assume(grid_n % grid_tile_n == 0)
cluster_m = data.draw(hps.sampled_from([1, 2, 4]), label="cluster_m")
hp.assume(grid_m % cluster_m == 0)
cluster_n = data.draw(hps.sampled_from([1, 2, 4]), label="cluster_n")
hp.assume(grid_n % cluster_n == 0)
# TODO(apaszke): Non-portable clusters (16 blocks) sometimes deadlock.
hp.assume(cluster_m * cluster_n <= 8)
if bytewidth == 4:
rhs_transpose = True
else:
rhs_transpose = data.draw(hps.booleans(), label="rhs_transpose")
try:
matmul.verify(
m,
k,
n,
stages=stages,
tile_m=tile_m,
tile_n=tile_n,
in_dtype=in_dtype,
out_dtype=out_dtype,
cluster_m=cluster_m,
cluster_n=cluster_n,
grid_tile_n=grid_tile_n,
swizzle=swizzle,
rhs_transpose=rhs_transpose,
)
except ValueError as e:
if "Mosaic GPU kernel exceeds available shared memory" in str(e):
hp.assume(False)
raise e
@parameterized.named_parameters(
# TODO(apaszke): Increase shard count once we have more B200s in CI.
(f"_shard{i}", i) for i in range(1)
)
@seed_hypothesis
@hp.settings(max_examples=100) # Add verbosity=hp.Verbosity.verbose to debug
@hp.given(hps.data())
def test_matmul_sm100(self, data):
if not jtu.is_cuda_compute_capability_equal("10.0"):
self.skipTest("Only works on GPU with capability sm100a")
dtype = data.draw(
hps.sampled_from([jnp.float16, jnp.bfloat16]),
label="dtype",
)
m, n, k = (
data.draw(hps.sampled_from([128, 256, 512, 2048, 8192]), label=d) for d in "mnk"
)
max_concurrent_steps = data.draw(
hps.integers(2, 5), label="max_concurrent_steps"
)
collective = data.draw(hps.booleans(), label="collective")
num_ctas = 2 if collective else 1
hp.assume(not (m == 128 and collective)) # Too small for collective MMA.
tile_m = data.draw(
hps.sampled_from([t for t in [128] if t * num_ctas <= m]), label="tile_m"
)
tmem_cols = 512
tile_n = data.draw(
hps.sampled_from([
t
for t in [64, 128, 256]
# We're double buffering TMEM in the kernel, hence the 2x.
if t * num_ctas <= n and 2 * t * num_ctas <= tmem_cols
]),
label="tile_n",
)
grid_m = m // (num_ctas * tile_m)
grid_tile_m = data.draw(hps.sampled_from([1, 2, 4, 8, 16]), label="grid_tile_m")
hp.assume(grid_m % grid_tile_m == 0)
try:
kernel = matmul_blackwell.build_kernel(
m,
k,
n,
dtype=dtype,
tile_m=tile_m,
tile_n=tile_n,
grid_tile_m=grid_tile_m,
max_concurrent_steps=max_concurrent_steps,
collective=collective,
)
except ValueError as e:
if "Mosaic GPU kernel exceeds available shared memory" in str(e):
hp.assume(False)
raise
ka, kb = jax.random.split(jax.random.key(0), 2)
a = jax.random.normal(key=ka, shape=(m, k), dtype=dtype)
b = jax.random.normal(key=kb, shape=(n, k), dtype=dtype)
out = kernel(a, b)
out_ref = jnp.dot(a, b.T)
np.testing.assert_allclose(
out, out_ref, atol=2e-3, rtol=1e-2
)
if __name__ == "__main__":
absltest.main(argv=["python"], testLoader=jtu.JaxTestLoader())
| MatmulTestCase |
python | getsentry__sentry | src/sentry/shared_integrations/exceptions/__init__.py | {
"start": 2905,
"end": 3428
} | class ____(ApiError):
code = 503
@classmethod
def from_exception(cls, exception: Exception) -> ApiHostError:
maybe_request = getattr(exception, "request", None)
if maybe_request is not None:
return cls.from_request(maybe_request)
return cls("Unable to reach host")
@classmethod
def from_request(cls, request: _RequestHasUrl) -> ApiHostError:
host = urlparse(request.url).netloc
return cls(f"Unable to reach host: {host}", url=request.url)
| ApiHostError |
python | pytorch__pytorch | torch/distributed/pipelining/schedules.py | {
"start": 123568,
"end": 141596
} | class ____(_PipelineScheduleRuntime):
"""
The DualPipeV schedule. A more efficient schedule variant based on the
DualPipe schedule introduced by DeepSeek in https://arxiv.org/pdf/2412.19437
Based on the open sourced code from https://github.com/deepseek-ai/DualPipe
"""
def __init__(
self,
stages: list[_PipelineStageBase],
n_microbatches: int,
loss_fn: Callable | None = None,
args_chunk_spec: tuple[TensorChunkSpec, ...] | None = None,
kwargs_chunk_spec: dict[str, TensorChunkSpec] | None = None,
output_merge_spec: dict[str, Any] | tuple[Any] | None = None,
scale_grads: bool = True,
backward_requires_autograd: bool = True,
):
# TODO: we dont support input/weight backward split with torch.compile
_check_torch_compile_compatibility(stages, self.__class__.__name__)
self.pp_group_size = stages[0].group_size
super().__init__(
stages=stages,
n_microbatches=n_microbatches,
loss_fn=loss_fn,
args_chunk_spec=args_chunk_spec,
kwargs_chunk_spec=kwargs_chunk_spec,
output_merge_spec=output_merge_spec,
scale_grads=scale_grads,
backward_requires_autograd=backward_requires_autograd,
)
self.stage_index_to_group_rank = generate_stage_to_rank_mapping(
self.pp_group_size, self._num_stages, style="v"
)
for stage in self._stages:
stage.stage_index_to_group_rank = self.stage_index_to_group_rank
self.n_local_stages = len(stages)
if self.n_local_stages != 2:
raise ValueError(
"ZBV requires exactly 2 stages per rank, but got "
f"{self.n_local_stages}."
)
if n_microbatches < self._num_stages:
raise ValueError(
"DualPipeV requires at least as many microbatches as stages, but got "
f"{n_microbatches} microbatches and {self._num_stages} stages."
)
self.rank = stages[0].group_rank
self.num_stages = stages[0].num_stages
# 1. Create the pipeline_order (all ranks do this calculation)
# This will be used to keep track of the current state of the entire pipeline
# pipeline_order[rank] = [Action(computation_type, microbatch_index, stage_index), ...]
self.pipeline_order: dict[int, list[_Action | None]] = {}
for rank in range(self.pp_group_size):
rank_ops = self._calculate_single_rank_operations(rank)
self.pipeline_order[rank] = rank_ops
# Initialize the pipeline order with communication necessary to run with _PipelineScheduleRuntime
self._prepare_schedule_with_comms(self.pipeline_order)
def _calculate_single_rank_operations(self, rank) -> list[_Action | None]:
actions: list[_Action | None] = []
counters: dict[
tuple[int, _ComputationType], int
] = {} # (stage_index, computation_type) -> mb_index
weight_queue = [] # Queue of (stage_index, mb_index) for pending weight actions
num_ranks = self.pp_group_size
num_chunks = self._n_microbatches
rank_to_stages = generate_rank_to_stage_mapping(
num_ranks, num_ranks * 2, style="v"
)
stage0_index, stage1_index = rank_to_stages[rank]
def increment_backward_counts(stage_index: int):
"""Helper method to increment BACKWARD_INPUT and BACKWARD_WEIGHT counters when FULL_BACKWARD is used."""
input_key = (stage_index, BACKWARD_INPUT)
weight_key = (stage_index, BACKWARD_WEIGHT)
counters[input_key] = counters.get(input_key, 0) + 1
counters[weight_key] = counters.get(weight_key, 0) + 1
def add_overlap_f_b(
actions: list,
forward_stage: int,
backward_stage: int,
):
"""Helper method to add an overlapped forward+backward action which tracks microbatch index."""
# Create new overlapped forward+backward action with sub_actions
forward_key = (forward_stage, FORWARD)
backward_key = (backward_stage, BACKWARD_INPUT)
forward_mb = counters.get(forward_key, 0)
backward_mb = counters.get(backward_key, 0)
sub_actions = (
_Action(forward_stage, FORWARD, forward_mb),
_Action(backward_stage, FULL_BACKWARD, backward_mb),
)
actions.append(_Action(-1, OVERLAP_F_B, None, sub_actions))
# Update counters for sub_actions
counters[forward_key] = forward_mb + 1
increment_backward_counts(backward_stage)
def add_action(
actions: list,
stage_index: int,
computation_type: _ComputationType,
):
# Regular single action, for FULL_BACKWARD we only use the BACKWARD_INPUT counter
key = (
(stage_index, computation_type)
if computation_type != FULL_BACKWARD
else (stage_index, BACKWARD_INPUT)
)
mb_index = counters.get(key, 0)
actions.append(_Action(stage_index, computation_type, mb_index))
# If FULL_BACKWARD is used, just increment the separate BACKWARD_INPUT and BACKWARD_WEIGHT counters
if computation_type == FULL_BACKWARD:
increment_backward_counts(stage_index)
else:
# If BACKWARD_INPUT is updated, add corresponding weight action to queue
if computation_type == BACKWARD_INPUT:
# Add weight action to queue for later processing
weight_queue.append((stage_index, mb_index))
counters[key] = mb_index + 1
def add_weight_action_if_pending(actions: list):
"""Helper method to add a weight action from the queue."""
if not weight_queue:
return # No pending weight actions, skip
# Pop the oldest weight action from the queue
actual_stage_index, weight_mb_index = weight_queue.pop(0)
actions.append(
_Action(
actual_stage_index,
BACKWARD_WEIGHT,
weight_mb_index,
)
)
# Update the counter for the actual stage that was processed
weight_key = (actual_stage_index, BACKWARD_WEIGHT)
counters[weight_key] = counters.get(weight_key, 0) + 1
# Step 1: F0
step_1 = (num_ranks - rank - 1) * 2
for _ in range(step_1):
add_action(actions, stage0_index, FORWARD)
# Step 2: F0F1
step_2 = rank + 1
for _ in range(step_2):
add_action(actions, stage0_index, FORWARD)
add_action(actions, stage1_index, FORWARD)
# Step 3: I1W1F1 (Use zero bubble)
step_3 = num_ranks - rank - 1
for _ in range(step_3):
add_action(actions, stage1_index, BACKWARD_INPUT)
add_weight_action_if_pending(actions)
add_action(actions, stage1_index, FORWARD)
# Step 4 (Main step): F0B1-F1B0 (combined, overlapped forward+backward)
step_4 = num_chunks - num_ranks * 2 + rank + 1
for i in range(step_4):
if i == 0 and rank == num_ranks - 1:
# NOTE: We don't overlap these two chunks to further reduce bubble size.
add_action(actions, stage0_index, FORWARD)
add_action(actions, stage1_index, FULL_BACKWARD)
else:
add_overlap_f_b(
actions,
forward_stage=stage0_index,
backward_stage=stage1_index,
)
add_overlap_f_b(
actions,
forward_stage=stage1_index,
backward_stage=stage0_index,
)
# Step 5: B1-F1B0
step_5 = num_ranks - rank - 1
for _ in range(step_5):
add_action(actions, stage1_index, FULL_BACKWARD)
add_overlap_f_b(
actions,
forward_stage=stage1_index,
backward_stage=stage0_index,
)
# Step 6: B1B0 (The second half of the chunks use zero bubble)
step_6 = rank + 1
enable_zb = False
for i in range(step_6):
if i == step_6 // 2 and rank % 2 == 1:
enable_zb = True
comp_type = BACKWARD_INPUT if enable_zb else FULL_BACKWARD
add_action(actions, stage1_index, comp_type)
if i == step_6 // 2 and rank % 2 == 0:
enable_zb = True
comp_type = BACKWARD_INPUT if enable_zb else FULL_BACKWARD
add_action(actions, stage0_index, comp_type)
# Step 7: W0B0
step_7 = num_ranks - rank - 1
for _ in range(step_7):
add_weight_action_if_pending(actions)
comp_type = BACKWARD_INPUT if enable_zb else FULL_BACKWARD
add_action(actions, stage0_index, comp_type)
# Step 8: W0
step_8 = rank + 1
for _ in range(step_8):
add_weight_action_if_pending(actions)
return actions
def get_schedule_class(schedule_name: str):
"""
Maps a schedule name (case insensitive) to its corresponding class object.
Args:
schedule_name (str): The name of the schedule.
"""
schedule_map = {
"1F1B": Schedule1F1B,
"Interleaved1F1B": ScheduleInterleaved1F1B,
"GPipe": ScheduleGPipe,
"LoopedBFS": ScheduleLoopedBFS,
"InterleavedZeroBubble": ScheduleInterleavedZeroBubble,
"PipelineScheduleSingle": PipelineScheduleSingle,
"PipelineScheduleMulti": PipelineScheduleMulti,
"ZBVZeroBubble": ScheduleZBVZeroBubble,
"DualPipeV": ScheduleDualPipeV,
}
lowercase_keys = {k.lower(): k for k in schedule_map}
lowercase_schedule_name = schedule_name.lower()
if lowercase_schedule_name not in lowercase_keys:
raise ValueError(
f"Unknown schedule name '{schedule_name}'. The valid options are {list(schedule_map.keys())}"
)
return schedule_map[lowercase_keys[lowercase_schedule_name]]
def _simulate_comms_compute(
pipeline_order, stage_to_rank: Callable[[int], int], num_stages: int
):
"""This function dry-run simulates the actions in the schedule from the perspective of all ranks, and flags
any deadlocks caused by missing or misordered communications. It also simulates any bubbles in time where a rank
can not execute any action due to waiting for unmet dependencies. The total number of simulator steps can be used
as a metric for unit tests involving IR optimization passes as reordering and merging of IR can reduce the number
of simulated steps.
The simulation is not high-fidelity and does not model overlapping of compute and communication, or cuda streams.
Future work may be to enhance this and model the compute time, comms overlap, and even memory.
"""
pipeline_order = {
rank: [a for a in pipeline_order[rank] if a is not None]
for rank in sorted(pipeline_order)
}
_schedule: dict[int, list[_Action | None]] = {
rank: [] for rank in sorted(pipeline_order)
}
_prev_ops_rank: dict[int, set[_Action]] = {rank: set() for rank in _schedule}
def add_to_schedule(rank: int, action: _Action | None):
_schedule[rank].append(action)
if action is not None:
_prev_ops_rank[rank].add(action)
def _ready_to_schedule(action: _Action | None) -> bool:
if action is None:
return True
stage_idx = action.stage_index
prev_ops = _prev_ops_rank[stage_to_rank(stage_idx)]
if action.computation_type == F:
if action.stage_index == 0:
return True
elif (
_Action(action.stage_index, RECV_F, action.microbatch_index) in prev_ops
):
return True
elif (
_Action(action.stage_index - 1, F, action.microbatch_index) in prev_ops
):
return True
return False
elif action.computation_type in (BACKWARD_INPUT, FULL_BACKWARD):
if action.stage_index == num_stages - 1:
return True
if _Action(action.stage_index, RECV_B, action.microbatch_index) in prev_ops:
return True
if (
_Action(action.stage_index + 1, BACKWARD_INPUT, action.microbatch_index)
in prev_ops
):
return True
if (
_Action(action.stage_index + 1, FULL_BACKWARD, action.microbatch_index)
in prev_ops
):
return True
return False
elif action.computation_type == BACKWARD_WEIGHT:
return True
elif action.computation_type == SEND_F:
expected_f = _Action(action.stage_index, F, action.microbatch_index)
return expected_f in prev_ops
elif action.computation_type == RECV_F:
peer_stage_idx = stage_idx - 1
expected_send = _Action(peer_stage_idx, SEND_F, action.microbatch_index)
return expected_send in _prev_ops_rank[stage_to_rank(peer_stage_idx)]
elif action.computation_type == SEND_B:
expected_b = _Action(
action.stage_index, BACKWARD_INPUT, action.microbatch_index
)
expected_bw = _Action(
action.stage_index, FULL_BACKWARD, action.microbatch_index
)
return expected_b in prev_ops or expected_bw in prev_ops
elif action.computation_type == RECV_B:
peer_stage_idx = stage_idx + 1
expected_send = _Action(peer_stage_idx, SEND_B, action.microbatch_index)
return expected_send in _prev_ops_rank[stage_to_rank(peer_stage_idx)]
else:
raise ValueError(f"Unsupported action type {action}")
while pipeline_order:
progress = False
for rank in sorted(pipeline_order):
if len(pipeline_order[rank]) == 0:
continue
action = pipeline_order[rank][0]
if _ready_to_schedule(action):
if action is not None:
add_to_schedule(rank, action)
pipeline_order[rank].pop(0)
progress = True
else:
add_to_schedule(rank, None)
for i in sorted(pipeline_order, reverse=True):
if len(pipeline_order[i]) == 0:
del pipeline_order[i]
# hacky, but do a second pass to replace any 'none' at this timestep with a real action, if it got unblocked
# by one of the later ranks
for rank in sorted(pipeline_order):
if len(pipeline_order[rank]) == 0:
continue
if _schedule[rank][-1] is not None:
continue
action = pipeline_order[rank][0]
if _ready_to_schedule(action):
if action is not None:
_schedule[rank][-1] = action
_prev_ops_rank[rank].add(action)
pipeline_order[rank].pop(0)
for i in sorted(pipeline_order, reverse=True):
if len(pipeline_order[i]) == 0:
del pipeline_order[i]
if not progress:
print("WIP comms schedule:\n", _format_pipeline_order(_schedule))
for rank in pipeline_order:
print(f"{rank=} next action= {pipeline_order[rank][0]}")
raise ValueError("Schedule is not progressing")
return _schedule
def _dump_chrometrace(schedule, filename):
"""
This function dumps a schedule IR into a chrometrace format so it can be visualized.
It is currently very basic and only serves as a graphical alternative to dumping the schedule IR as text.
As future work we may extend this to include more accurate heuristics for durations, or let users input durations,
add 'flow events' to let the UI show the connection between sends and recvs, and model cuda streams for comm/compute
as separate streams on the chrometrace view.
"""
events = []
for rank in sorted(schedule):
for timestep, action in enumerate(schedule[rank]):
if action is None:
continue
events.append(
{
"name": str(action),
"cat": (
"computation"
if action.computation_type in (F, B, W)
else "communication"
),
"ph": "X",
"pid": rank,
"tid": rank,
"ts": timestep,
"dur": 1,
}
)
import json
with open(filename, "w") as f:
json.dump({"traceEvents": events}, f)
def _check_torch_compile_compatibility(
stages: list[_PipelineStageBase], schedule_name: str
):
"""
Check if the schedule is compatible with torch.compile.
Args:
stages: List of pipeline stages to check
schedule_name: Name of the schedule for error message
Raises:
RuntimeError: If any stage uses torch.compile
"""
for stage in stages:
if not isinstance(stage.submod, torch.nn.Module):
continue
for module in stage.submod.modules():
if isinstance(module, OptimizedModule):
raise RuntimeError(
f"The {schedule_name} schedule is not supported with "
"stage modules that have used torch.compile. "
f"Found OptimizedModule in {type(module).__name__}"
)
| ScheduleDualPipeV |
python | huggingface__transformers | src/transformers/models/helium/configuration_helium.py | {
"start": 774,
"end": 8106
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`HeliumModel`]. It is used to instantiate an Helium
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Helium 2b model.
e.g. [kyutai/helium-2b](https://huggingface.co/kyutai/helium-2b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 48000):
Vocabulary size of the Helium model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`HeliumModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 7040):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 20):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The legacy activation function. It is overwritten by the `hidden_activation`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-08):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
pad_token_id (`int`, *optional*, defaults to 3):
Padding token id.
eos_token_id (`int` | `list`, *optional*, defaults to 2):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
```python
>>> from transformers import HeliumModel, HeliumConfig
>>> # Initializing a Helium 2b style configuration
>>> configuration = HeliumConfig()
>>> # Initializing a model from the Helium 2b style configuration
>>> model = HeliumModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "helium"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 100000.0
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 48000,
hidden_size: Optional[int] = 2560,
intermediate_size: Optional[int] = 7040,
num_hidden_layers: Optional[int] = 24,
num_attention_heads: Optional[int] = 20,
num_key_value_heads: Optional[int] = 20,
head_dim: Optional[int] = 128,
hidden_act: Optional[str] = "silu",
attention_dropout: Optional[float] = 0.0,
max_position_embeddings: Optional[int] = 4096,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-8,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
pad_token_id: Optional[int] = 3,
eos_token_id: Optional[int] = 2,
bos_token_id: Optional[int] = 1,
attention_bias: Optional[bool] = False,
mlp_bias: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["HeliumConfig"]
| HeliumConfig |
python | apache__airflow | providers/apache/tinkerpop/src/airflow/providers/apache/tinkerpop/hooks/gremlin.py | {
"start": 1161,
"end": 5637
} | class ____(BaseHook):
"""
Interact with Graph DB using the Gremlin Client.
This hook creates a connection to Graph DB and allows you to run Gremlin queries.`
:param gremlin_conn_id: Reference to the connection ID configured in Airflow.
"""
conn_name_attr = "gremlin__conn_id"
default_conn_name = "gremlin_default"
conn_type = "gremlin"
hook_name = "Gremlin"
default_port = 443
traversal_source = "g"
def __init__(self, conn_id: str = default_conn_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.gremlin_conn_id = conn_id
self.connection = kwargs.pop("connection", None)
self.client: Client | None = None
def get_conn(self, serializer=None) -> Client:
"""
Establish a connection to Graph DB with the Gremlin Client.
:param serializer: Message serializer to use for the client.
:return: An instance of the Gremlin Client.
"""
if self.client is not None:
return self.client
self.connection = self.get_connection(self.gremlin_conn_id)
uri = self.get_uri(self.connection)
self.log.info("Connecting to URI: %s", uri)
self.client = self.get_client(
self.connection,
self.traversal_source,
uri,
message_serializer=serializer,
)
return self.client
def get_uri(self, conn: Connection) -> str:
"""
Build the URI from the connection object and extra parameters.
:param conn: Airflow Connection object.
:return: URI string.
"""
# For Graph DB using Gremlin, the secure WebSocket scheme is typically "wss"
scheme = "wss" if conn.conn_type == "gremlin" else "ws"
host = conn.host
port = conn.port if conn.port is not None else self.default_port
schema = "" if conn.conn_type == "gremlin" else "gremlin"
return f"{scheme}://{host}:{port}/{schema}"
def get_client(
self, conn: Connection, traversal_source: str, uri: str, message_serializer=None
) -> Client:
"""
Create and return a new Gremlin client.
:param conn: Airflow Connection object.
:param traversal_source: Traversal source for the Gremlin client.
:param uri: URI string for connecting to Graph DB.
:param message_serializer: Message serializer to use for the client.
:return: An instance of the Gremlin Client.
"""
# Build the username. This example uses the connection's schema and login.
login = conn.login if conn.login not in ["mylogin", None] else ""
schema = conn.schema if conn.schema not in ["gremlin", None] else ""
password = conn.password if conn.password not in ["mysecret", None] else ""
username = f"/dbs/{login}/colls/{schema}" if login and schema else ""
# Build the kwargs for the Client.
client_kwargs = {
"url": uri,
"traversal_source": traversal_source,
"username": username,
"password": password,
}
# If a serializer is provided, check if it's a type and instantiate it.
if message_serializer is not None:
if isinstance(message_serializer, type):
message_serializer = message_serializer()
client_kwargs["message_serializer"] = message_serializer
return Client(**client_kwargs)
def run(self, query: str, serializer=None, bindings=None, request_options=None) -> list[Any]:
"""
Execute a Gremlin query and return the results.
:param query: Gremlin query string.
:param serializer: Message serializer to use for the query.
:param bindings: Bindings to use for the query.
:param request_options: Request options to use for the query.
:return: List containing the query results.
"""
client = self.get_conn(serializer)
try:
results_list = (
client.submit(message=query, bindings=bindings, request_options=request_options)
.all()
.result()
)
except Exception as e:
logger.error("An error occurred while running the query: %s", str(e))
raise e
finally:
if client is not None:
client.close()
self.client = None
return results_list
| GremlinHook |
python | plotly__plotly.py | plotly/graph_objs/layout/scene/zaxis/_title.py | {
"start": 235,
"end": 2861
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.zaxis"
_path_str = "layout.scene.zaxis.title"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this axis' title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.zaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.scene.zaxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of this axis.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font.
text
Sets the title of this axis.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.zaxis.Title`
font
Sets this axis' title font.
text
Sets the title of this axis.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.zaxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.zaxis.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/model_tests/model_handler.py | {
"start": 21490,
"end": 25059
} | class ____(metaclass=abc.ABCMeta):
"""Manages a series of ModelHandlers for aggregated testing/benchmarking."""
def __init__(
self, name: str, model_config: ModelConfig,
default_trt_convert_params: trt.TrtConversionParams,
trt_convert_params_updater: Callable[[trt.TrtConversionParams],
Iterable[trt.TrtConversionParams]]):
self._ori_model = self.model_handler_cls(model_config)
self._trt_models = []
for trt_convert_params in trt_convert_params_updater(
default_trt_convert_params):
trt_model = self.trt_model_handler_cls(
model_config, trt_convert_params=trt_convert_params)
self._trt_models.append(trt_model)
self._name = name
self._result_collection = None
def __str__(self) -> str:
return "Input Model: {}".format(str(self._ori_model))
def __repr__(self) -> str:
return "{}({})".format(self.__class__.__name__, str(self))
@property
@classmethod
@abc.abstractmethod
def model_handler_cls(cls):
"""The model handler class. ModelHandleV1/ModelHandlerV2."""
@property
@classmethod
@abc.abstractmethod
def trt_model_handler_cls(cls):
"""The TensorRT model handler class. TrtModelHandleV1/TrtModelHandlerV2."""
@property
def name(self) -> str:
return self._name
@property
def model_config(self) -> ModelConfig:
return self._ori_model.model_config
def generate_random_inputs(self, batch_size: Optional[int] = None):
return self._ori_model.generate_random_inputs(batch_size)
def convert(self, calibration_inputs=None, num_runs=1) -> None:
"""Converts models with TensorRT and calibrates if using INT8 precision mode.
Args:
calibration_inputs: Mapping from input names to ndarrays in TF1. Or a
sequence of tensors in TF2. Used as calibration data.
num_runs: Number of calibration runs.
"""
for trt_model in self._trt_models:
trt_model.convert(calibration_inputs, num_runs)
def run(self,
inputs=None,
warmup_iterations: int = 10,
benchmark_iterations: int = 100) -> TestResultCollection:
"""Runs model inference with provided or randomly generated input tensors.
Args:
inputs: Mapping from names to input ndarrays in TF1. Or a sequence of
tensors in TF2. If `None`, ramdomly generated input tensors will be used
instead.
warmup_iterations: Number of inferences to warm up the runtime.
benchmark_iterations: Number of inferences to measure the latency.
Returns:
`TestResultCollection` summarizing latency and numerics information for
different TensorRT conversion settings.
"""
inputs = inputs or self.generate_random_inputs()
def run_model(model, **kwargs):
return model.run(inputs, warmup_iterations, benchmark_iterations,
**kwargs)
# Some models include operations that can only run on GPU.
try:
cpu_base_result = run_model(self._ori_model, enable_gpu=False)
except RuntimeError as err:
logging.info("%s cannot run on CPU. Reason: %s.",
self._ori_model.model_config, err)
cpu_base_result = None
gpu_base_result = run_model(self._ori_model, enable_gpu=True)
trt_results = list(map(run_model, self._trt_models))
return TestResultCollection(
test_name=self._name,
model_config=self.model_config,
cpu_base_result=cpu_base_result,
gpu_base_result=gpu_base_result,
trt_results=trt_results)
| _ModelHandlerManagerBase |
python | has2k1__plotnine | plotnine/iapi.py | {
"start": 3643,
"end": 3787
} | class ____:
"""
Information from the trained position scales in a panel
"""
x: scale_view
y: scale_view
@dataclass
| panel_view |
python | plotly__plotly.py | plotly/graph_objs/funnelarea/_hoverlabel.py | {
"start": 233,
"end": 11262
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnelarea"
_path_str = "funnelarea.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.funnelarea.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnelarea.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnelarea.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnelarea.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec38.py | {
"start": 184,
"end": 564
} | class ____(Generic[P, R]):
def __init__(self, callback: Callable[P, R]):
self.callback = callback
def method(self, *args: P.args, **kwargs: P.kwargs) -> R:
return self.callback(*args, **kwargs)
def func1(obj: object, **kwargs: object) -> object: ...
reveal_type(
ClassA(func1).method, expected_text="(obj: object, **kwargs: object) -> object"
)
| ClassA |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 20034,
"end": 20301
} | class ____(BaseModel):
"""
Provider serializer for responses.
"""
package_name: Annotated[str, Field(title="Package Name")]
description: Annotated[str, Field(title="Description")]
version: Annotated[str, Field(title="Version")]
| ProviderResponse |
python | conda__conda | conda/cli/conda_argparse.py | {
"start": 7494,
"end": 8734
} | class ____(ArgumentParserBase):
def __init__(self, *args, add_help=True, **kwargs):
kwargs.setdefault("formatter_class", RawDescriptionHelpFormatter)
super().__init__(*args, add_help=False, **kwargs)
if add_help:
add_parser_help(self)
def _check_value(self, action, value):
# For our greedy subparsers, sort the choices by their repr for stable output
if isinstance(action, _GreedySubParsersAction) and isinstance(
action.choices, dict
):
action.choices = dict(sorted(action.choices.items()))
# extend to properly handle when we accept multiple choices and the default is a list
if action.choices is not None and isiterable(value):
for element in value:
super()._check_value(action, element)
else:
super()._check_value(action, value)
def parse_args(self, *args, override_args=None, **kwargs):
parsed_args = super().parse_args(*args, **kwargs)
for name, value in (override_args or {}).items():
if value is not NULL and getattr(parsed_args, name, NULL) is NULL:
setattr(parsed_args, name, value)
return parsed_args
| ArgumentParser |
python | Netflix__metaflow | metaflow/runner/subprocess_manager.py | {
"start": 1915,
"end": 2011
} | class ____(Exception):
"""Exception raised when reading logs times out."""
| LogReadTimeoutError |
python | ray-project__ray | python/ray/tests/test_exceptions.py | {
"start": 254,
"end": 2098
} | class ____:
"""Tests for AuthenticationError exception."""
auth_doc_url = "https://docs.ray.io/en/latest/ray-security/auth.html"
def test_basic_creation(self):
"""Test basic AuthenticationError creation and message format."""
error = AuthenticationError("Token is missing")
error_str = str(error)
# Original message preserved
assert "Token is missing" in error_str
# Doc URL included
assert self.auth_doc_url in error_str
def test_is_ray_error_subclass(self):
"""Test that AuthenticationError is a RayError subclass."""
error = AuthenticationError("Test")
assert isinstance(error, RayError)
@pytest.mark.parametrize(
"auth_mode,expected_note",
[
(FakeAuthMode.DISABLED, "RAY_AUTH_MODE is currently 'disabled'"),
(FakeAuthMode.K8S, "RAY_AUTH_MODE is currently 'k8s'"),
(FakeAuthMode.TOKEN, None),
],
ids=["disabled", "k8s", "token"],
)
def test_auth_mode_note_in_message(self, auth_mode, expected_note):
"""Test that error message includes auth mode note when not in token mode."""
with patch.dict(
"sys.modules",
{
"ray._raylet": MagicMock(
AuthenticationMode=FakeAuthMode,
get_authentication_mode=lambda: auth_mode,
)
},
):
error = AuthenticationError("Token is missing")
error_str = str(error)
assert "Token is missing" in error_str
if expected_note:
assert expected_note in error_str
else:
assert "RAY_AUTH_MODE is currently" not in error_str
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestAuthenticationError |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF066.py | {
"start": 27,
"end": 510
} | class ____: # Test normal class properties
@property
def name(self): # ERROR: No return
f"{self.first_name} {self.last_name}"
@property
def age(self): # OK: Returning something
return 100
def method(self): # OK: Not a property
x = 1
@property
def nested(self): # ERROR: Property itself doesn't return
def inner():
return 0
@property
def stub(self): ... # OK: A stub; doesn't return anything
| User |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_memorystore.py | {
"start": 11149,
"end": 12648
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.cloud_memorystore.CloudMemorystoreHook")
def test_assert_valid_hook_call(self, mock_hook):
mock_hook.return_value.update_instance.return_value.name = TEST_UPDATE_INSTANCE_NAME.format(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
)
task = CloudMemorystoreUpdateInstanceOperator(
task_id=TEST_TASK_ID,
update_mask=TEST_UPDATE_MASK,
instance=TEST_INSTANCE,
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.update_instance.assert_called_once_with(
update_mask=TEST_UPDATE_MASK,
instance=TEST_INSTANCE,
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudMemorystoreUpdateInstanceOperator |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 64651,
"end": 66288
} | class ____(Field):
default_error_messages = {
'invalid': _('Value must be valid JSON.')
}
# Workaround for isinstance calls when importing the field isn't possible
_is_jsonfield = True
def __init__(self, **kwargs):
self.binary = kwargs.pop('binary', False)
self.encoder = kwargs.pop('encoder', None)
self.decoder = kwargs.pop('decoder', None)
super().__init__(**kwargs)
def get_value(self, dictionary):
if html.is_html_input(dictionary) and self.field_name in dictionary:
# When HTML form input is used, mark up the input
# as being a JSON string, rather than a JSON primitive.
class JSONString(str):
def __new__(cls, value):
ret = str.__new__(cls, value)
ret.is_json_string = True
return ret
return JSONString(dictionary[self.field_name])
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
try:
if self.binary or getattr(data, 'is_json_string', False):
if isinstance(data, bytes):
data = data.decode()
return json.loads(data, cls=self.decoder)
else:
json.dumps(data, cls=self.encoder)
except (TypeError, ValueError):
self.fail('invalid')
return data
def to_representation(self, value):
if self.binary:
value = json.dumps(value, cls=self.encoder)
value = value.encode()
return value
# Miscellaneous field types...
| JSONField |
python | pytorch__pytorch | torch/testing/_internal/common_modules.py | {
"start": 8797,
"end": 210467
} | class ____:
""" Module information to be used in testing. """
def __init__(self,
module_cls, # Class object for the module under test
*,
module_inputs_func, # Function to generate module inputs
skips=(), # Indicates which tests to skip
decorators=None, # Additional decorators to apply to generated tests
dtypes=floating_types(), # dtypes this function is expected to work with
dtypesIfMPS=(torch.float16, torch.float32,), # dtypes this function is expected to work with on MPS
dtypesIfHpu=(torch.bfloat16, torch.float32,),
supports_gradgrad=True, # whether the op supports second order gradients
gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck
module_memformat_affects_out=False, # whether converting module to channels last will generate
# channels last output
train_and_eval_differ=False, # whether the module has differing behavior between train and eval
module_error_inputs_func=None, # Function to generate module inputs that error
gradcheck_fast_mode=None, # Whether to use the fast implementation for gradcheck/gradgradcheck.
# When set to None, defers to the default value provided by the wrapper
# function around gradcheck (testing._internal.common_utils.gradcheck)
):
self.module_cls = module_cls
self.module_inputs_func = module_inputs_func
self.decorators = (*(decorators if decorators else []), *(skips if skips else []))
self.dtypes = dtypes
self.dtypesIfMPS = dtypesIfMPS
self.dtypesIfHpu = dtypesIfHpu
self.supports_gradgrad = supports_gradgrad
self.gradcheck_nondet_tol = gradcheck_nondet_tol
self.module_memformat_affects_out = module_memformat_affects_out
self.train_and_eval_differ = train_and_eval_differ
self.module_error_inputs_func = module_error_inputs_func
self.gradcheck_fast_mode = gradcheck_fast_mode
self.is_lazy = issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin)
def get_decorators(self, test_class, test_name, device, dtype, param_kwargs):
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(test_class, test_name, device, dtype, param_kwargs):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
def supported_dtypes(self, device_type):
if device_type == 'mps':
return self.dtypesIfMPS
elif device_type == 'hpu':
return self.dtypesIfHpu
else:
return self.dtypes
@property
def name(self):
return get_module_common_name(self.module_cls)
@property
def formatted_name(self):
return self.name.replace('.', '_')
# Start of module inputs functions.
def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
module_inputs = [
ModuleInput(constructor_input=FunctionInput(10, 8),
forward_input=FunctionInput(input=make_input((4, 10))),
reference_fn=lambda m, p, input: torch.mm(input, p[0].t()) + p[1].view(1, -1).expand(4, 8)),
ModuleInput(constructor_input=FunctionInput(10, 8, bias=False),
forward_input=FunctionInput(make_input((4, 10))),
desc='no_bias',
reference_fn=lambda m, p, i: torch.mm(i, p[0].t())),
ModuleInput(constructor_input=FunctionInput(3, 5),
forward_input=FunctionInput(make_input(3)),
desc='no_batch_dim',
reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1])
]
return module_inputs
def module_inputs_torch_nn_Bilinear(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def bilinear_reference_fn(m, p, x1, x2, bias=True):
result = torch.einsum('bn,anm,bm->ba', x1, p[0], x2)
if bias:
if x1.shape[0] == 1:
result = result.view(-1) + p[1]
else:
result = result + p[1].view(1, -1).expand(x1.shape[0], p[0].shape[0])
return result
module_inputs = [
ModuleInput(constructor_input=FunctionInput(2, 3, 4),
forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))),
reference_fn=bilinear_reference_fn),
ModuleInput(constructor_input=FunctionInput(2, 3, 4, bias=False),
forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))),
desc='no_bias',
reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1, x2, bias=False)),
ModuleInput(constructor_input=FunctionInput(2, 3, 4),
forward_input=FunctionInput(make_input(2), make_input(3)),
desc='no_batch_dim',
reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1.view(1, -1), x2.view(1, -1))),
]
return module_inputs
def module_inputs_torch_nn_KLDivLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_batchmean', {'reduction': 'batchmean'}),
('reduction_none', {'reduction': 'none'}),
('log_target', {'log_target': True})
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return kldivloss_reference(i, t, **constructor_kwargs)
input = make_input((10, 10)).log()
target = make_input((10, 10)) if kwargs.get('log_target', False) else make_input((10, 10)).log()
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(input, target),
desc=desc,
reference_fn=reference_fn)
)
scalar_input = make_input(()).log()
# FIXME(rec): scalar_target is unused, perhaps should be argument to FunctionInput?
scalar_target = ( # noqa: F841
make_input(()) if kwargs.get('log_target', False) else make_input(()).log()
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(scalar_input, scalar_input),
desc='scalar_' + desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_NLLLoss(module_info, device, dtype, requires_grad, training, **kwargs):
def make_input(shape, device=device, dtype=dtype, requires_grad=requires_grad):
return make_tensor(shape, device=device, dtype=dtype,
requires_grad=False).log_softmax(dim=1).requires_grad_(requires_grad)
make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_none', {'reduction': 'none'}),
('ignore_index', {'ignore_index': 2}),
('weights', {'weight': make_weight(4).abs()}),
('weights_ignore_index', {'weight': make_weight(4).abs(), 'ignore_index': 2}),
('weights_ignore_index_neg', {'weight': make_weight(4).abs(), 'ignore_index': -1})
]
# TODO: Uncomment when negative weights is supported.
# negative_weight = make_weight(10)
# negative_weight[0] = -1
# cases.append(('weights_negative', {'weight': negative_weight}))
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return nllloss_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((15, 4)),
torch.empty(15, device=device).uniform_().mul(4).floor().long()),
desc=desc,
reference_fn=reference_fn)
)
def nd_reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return nlllossNd_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(
make_input((2, 4, 5, 5)),
torch.empty(2, 5, 5, device=device).uniform_().mul(4).floor().long()),
desc=f"nd_{desc}",
reference_fn=nd_reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(
make_input((2, 4, 5, 5, 2, 2)),
torch.empty(2, 5, 5, 2, 2, device=device).uniform_().mul(4).floor().long()),
desc=f"higher_dim_{desc}",
reference_fn=nd_reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(
make_input((2, 4, 5)),
torch.empty(2, 5, device=device).uniform_().mul(4).floor().long()),
desc=f"3d_{desc}",
reference_fn=nd_reference_fn)
)
return module_inputs
def module_inputs_torch_nn_GaussianNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('homoscedastic', {'homoscedastic': True}),
]
module_inputs = []
for desc, constructor_kwargs in cases:
homoscedastic = constructor_kwargs.pop('homoscedastic', False)
var_input = make_input(1, 3).abs() if homoscedastic else make_input(4, 1).abs()
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input(4, 3),
make_target(4, 3),
var_input),
desc=desc,
reference_fn=no_batch_dim_reference_fn)
)
return module_inputs
def module_inputs_torch_nn_PoissonNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('full', {'full': True}),
('no_log_input', {'log_input': False}),
('full_no_log_input', {'full': True, 'log_input': False}),
]
def poissonnllloss_reference_fn(i, t, log_input=True, full=False, reduction='mean', eps=1e-8):
if log_input:
result = i.exp() - t.mul(i)
else:
result = i - t.mul((i + eps).log())
if full:
result += (t.mul(t.log()) - t + 0.5 * (2. * math.pi * t).log()).masked_fill(t <= 1, 0)
if reduction == 'none':
return result
elif reduction == 'mean':
return result.sum() / i.numel()
else:
return result.sum()
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return poissonnllloss_reference_fn(i, t, **constructor_kwargs)
log_input = constructor_kwargs.get('log_input', True)
input = make_input((2, 3, 4, 5)) if log_input else make_input((2, 3, 4, 5)).abs().add(0.001)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(input,
make_target((2, 3, 4, 5)).floor_().abs_()),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_MSELoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
]
def mse_loss_reference_fn(m, p, i, t, reduction='mean'):
if reduction == 'none':
return (i - t).pow(2)
elif reduction == 'mean':
return (i - t).pow(2).sum() / i.numel()
else:
return (i - t).pow(2).sum()
module_inputs = []
for desc, constructor_kwargs in cases:
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((2, 3, 4, 5)),
make_target((2, 3, 4, 5))),
desc=desc,
reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs))
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input(()),
make_target(())),
desc=f'{desc}_scalar',
reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs))
)
return module_inputs
def no_batch_dim_reference_fn(m, p, *args, **kwargs):
"""Reference function for modules supporting no batch dimensions.
Unbatched inputs are unsqueezed to form a
single batch input before passing them to the module.
The output is squeezed to compare with the
output of unbatched input to the module.
Currently it only supports modules which return a single Tensor as output.
You can bind the following kwargs.
Kwargs:
batch_first[bool] : If True, all the Tensors in `args` while be unsqueezed at dim `0` .
and output will be squeezed at dim `0` else dim `1` for both.
kwargs_to_batchify[dict] : Dictionary specifying the name of the argument and dimension to unsqueeze.
Useful if there are few arguments whose batch dimension are different
from the ones selected by `batch_first`.
is_criterion[bool] : Specify if the module is a criterion and handle the reduction for output accordingly.
"""
def get_and_pop(key, default):
v = kwargs.get(key, default)
if key in kwargs:
kwargs.pop(key)
return v
batch_dim = 0 if get_and_pop('batch_first', True) else 1
kwargs_to_batchify = get_and_pop('kwargs_to_batchify', None)
is_criterion = get_and_pop('is_criterion', False)
if kwargs_to_batchify is not None:
assert isinstance(kwargs_to_batchify, dict)
for k, v in kwargs.items():
if k in kwargs_to_batchify and v is not None:
bdim = kwargs_to_batchify[k]
kwargs[k] = v.unsqueeze(bdim)
single_batch_input_args = [input.unsqueeze(batch_dim) for input in args]
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs).squeeze(batch_dim)
if is_criterion:
reduction = get_reduction(m)
if reduction == 'none':
return output.squeeze(0)
return output
def no_batch_dim_reference_mha(m, p, *args, **kwargs):
"""Reference function for MultiheadAttention supporting no batch dimensions.
Unbatched inputs are unsqueezed to form a
single batch input before passing them to the module.
The output is squeezed to compare with the
output of unbatched input to the module.
"""
batch_dim = 0 if kwargs.get('batch_first', True) else 1
if 'batch_first' in kwargs:
kwargs.pop('batch_first')
if 'key_padding_mask' in kwargs and kwargs['key_padding_mask'] is not None:
kwargs['key_padding_mask'] = kwargs['key_padding_mask'].unsqueeze(0)
single_batch_input_args = [input.unsqueeze(batch_dim) for input in args]
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs)
return (output[0].squeeze(batch_dim), output[1].squeeze(0))
def no_batch_dim_reference_rnn_gru(m, p, *args, **kwargs):
"""Reference function for RNN and GRU supporting no batch dimensions.
Unbatched inputs are unsqueezed to form a
single batch input before passing them to the module.
The output is squeezed to compare with the
output of unbatched input to the module.
"""
if len(args) == 1:
inp, = args
h = None
elif len(args) == 2:
inp, h = args
h = h.unsqueeze(1)
batch_dim = 0 if kwargs['batch_first'] else 1
kwargs.pop('batch_first')
inp = inp.unsqueeze(batch_dim)
single_batch_input_args = (inp, h)
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs)
return (output[0].squeeze(batch_dim), output[1].squeeze(1))
def no_batch_dim_reference_lstm(m, p, *args, **kwargs):
"""Reference function for LSTM supporting no batch dimensions.
Unbatched inputs are unsqueezed to form a
single batch input before passing them to the module.
The output is squeezed to compare with the
output of unbatched input to the module.
"""
if len(args) == 1:
inp, = args
h = None
elif len(args) == 2:
inp, h = args
h = (h[0].unsqueeze(1), h[1].unsqueeze(1))
batch_dim = 0 if kwargs['batch_first'] else 1
kwargs.pop('batch_first')
inp = inp.unsqueeze(batch_dim)
single_batch_input_args = (inp, h)
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs)
return (output[0].squeeze(batch_dim), (output[1][0].squeeze(1), output[1][1].squeeze(1)))
def no_batch_dim_reference_lstmcell(m, p, *args, **kwargs):
"""Reference function for LSTMCell supporting no batch dimensions.
The module is passed the input and target in batched form with a single item.
The output is squeezed to compare with the no-batch input.
"""
inp, (h, c) = args
single_batch_input_args = (inp.unsqueeze(0), (h.unsqueeze(0), c.unsqueeze(0)))
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs)
return (output[0].squeeze(0), output[1].squeeze(0))
def generate_regression_criterion_inputs(make_input):
return [
ModuleInput(
constructor_input=FunctionInput(reduction=reduction),
forward_input=FunctionInput(make_input((4, )), make_input(4,)),
reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True),
desc=f'no_batch_dim_{reduction}'
) for reduction in ['none', 'mean', 'sum']]
def module_inputs_torch_nn_AvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(kernel_size=2),
forward_input=FunctionInput(make_input((3, 6))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn),
ModuleInput(constructor_input=FunctionInput(2),
forward_input=FunctionInput(make_input((2, 3, 6)))),
ModuleInput(constructor_input=FunctionInput((2,), (2,)),
forward_input=FunctionInput(make_input((2, 3, 6))),
desc='stride'),
ModuleInput(constructor_input=FunctionInput(2, 2, 1),
forward_input=FunctionInput(make_input((2, 3, 6))),
desc='stride_pad')]
def module_inputs_torch_nn_AvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput((2, 2)),
forward_input=FunctionInput(make_input((3, 6, 6))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn),
ModuleInput(constructor_input=FunctionInput((2, 2)),
forward_input=FunctionInput(make_input((2, 3, 6, 6)))),
ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2)),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='stride'),
ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1)),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='stride_pad'),
ModuleInput(constructor_input=FunctionInput((2, 2), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='divisor'),
ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='divisor_stride'),
ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='divisor_stride_pad')]
def module_inputs_torch_nn_AvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput((2, 2, 2)),
forward_input=FunctionInput(make_input((3, 4, 4, 4))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn),
ModuleInput(constructor_input=FunctionInput((2, 2, 2)),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))),
ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2)),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='stride'),
ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='stride_pad'),
ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1)),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='stride_pad_gpu_fixedkw_output'),
ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2)),
forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))),
desc='stride_pad_gpu_general_output'),
ModuleInput(constructor_input=FunctionInput(3, 1, 0),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='stride1_pad0_gpu_input'),
ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='stride_pad_gpu_input_nooverlap'),
ModuleInput(constructor_input=FunctionInput((2, 2, 2), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='divisor'),
ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='divisor_stride'),
ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='divisor_stride_pad'),
ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='divisor_stride_pad_gpu_fixedkw_output'),
ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))),
desc='divisor_stride_pad_gpu_general_output'),
ModuleInput(constructor_input=FunctionInput(3, 1, 0, divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='divisor_stride1_pad0_gpu_input'),
ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='divisor_stride_pad_gpu_input_nooverlap')]
def module_inputs_torch_nn_AdaptiveAvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((1, 3, 5))),
desc='single'),
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((3, 5))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(1,),
forward_input=FunctionInput(make_input((1, 3, 5))),
desc='one_output')]
def module_inputs_torch_nn_AdaptiveAvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((1, 3, 5, 6))),
desc='single'),
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((3, 5, 6))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(1,),
forward_input=FunctionInput(make_input((1, 3, 5, 6))),
desc='single_1x1output'),
ModuleInput(constructor_input=FunctionInput((3, 4)),
forward_input=FunctionInput(make_input((1, 3, 5, 6))),
desc='tuple'),
ModuleInput(constructor_input=FunctionInput((3, None)),
forward_input=FunctionInput(make_input((1, 3, 5, 6))),
desc='tuple_none')]
def module_inputs_torch_nn_AdaptiveAvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((2, 3, 5, 2, 7))),
desc='single'),
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((3, 5, 2, 7))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput((3, 4, 5)),
forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))),
desc='tuple'),
ModuleInput(constructor_input=FunctionInput((None, 4, 5)),
forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))),
desc='tuple_none'),
ModuleInput(constructor_input=FunctionInput((3, 2, 2)),
forward_input=FunctionInput(make_input((1, 1, 3, 2, 6))),
desc='last_dim')]
def module_inputs_torch_nn_AdaptiveMaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((1, 3, 5))),
desc='single'),
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((3, 5))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_AdaptiveMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((1, 3, 5, 6))),
desc='single'),
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((3, 5, 6))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput((3, 4)),
forward_input=FunctionInput(make_input((1, 3, 5, 6))),
desc='tuple'),
ModuleInput(constructor_input=FunctionInput((3, None)),
forward_input=FunctionInput(make_input((1, 3, 5, 6))),
desc='tuple_none')]
def module_inputs_torch_nn_AdaptiveMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))),
desc='single'),
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((3, 5, 6, 7))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput((3, 4, 5)),
forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))),
desc='tuple'),
ModuleInput(constructor_input=FunctionInput((3, None, 5)),
forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))),
desc='tuple_none'),
ModuleInput(constructor_input=FunctionInput(3),
forward_input=FunctionInput(make_input((2, 3, 12, 9, 3))),
desc='single_nonatomic'),
ModuleInput(constructor_input=FunctionInput((3, 4, 5)),
forward_input=FunctionInput(make_input((2, 3, 6, 4, 10))),
desc='tuple_nonatomic')]
def module_inputs_torch_nn_BatchNorm1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(10,),
forward_input=FunctionInput(make_input((4, 10))),
desc='affine'),
ModuleInput(constructor_input=FunctionInput(5,),
forward_input=FunctionInput(make_input((4, 5, 3))),
desc='3d_input'),
ModuleInput(constructor_input=FunctionInput(10, 1e-3, None),
forward_input=FunctionInput(make_input((4, 10))),
desc='affine_simple_average'),
ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, False),
forward_input=FunctionInput(make_input((4, 10))),
desc='not_affine'),
ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, True, False),
forward_input=FunctionInput(make_input((4, 10))),
desc='not_tracking_stats'),
ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False),
forward_input=FunctionInput(make_input((4, 5, 3))),
desc='3d_input_not_affine'),
ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False),
forward_input=FunctionInput(make_input((0, 5, 9))),
desc='zero_batch')]
def module_inputs_torch_nn_BatchNorm2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((2, 3, 6, 6)))),
ModuleInput(constructor_input=FunctionInput(3, 1e-3, None),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='2d_simple_average'),
ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='momentum'),
ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, False),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='not_affine'),
ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, True, False),
forward_input=FunctionInput(make_input((2, 3, 6, 6))),
desc='not_tracking_stats'),
ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False),
forward_input=FunctionInput(make_input((0, 5, 2, 2))),
desc='zero_batch')]
def module_inputs_torch_nn_BatchNorm3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))),
ModuleInput(constructor_input=FunctionInput(3, 1e-3, None),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='3d_simple_average'),
ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='momentum'),
ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, False),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='not_affine'),
ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, True, False),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))),
desc='not_tracking_stats'),
ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False),
forward_input=FunctionInput(make_input((0, 5, 2, 2, 2))),
desc='zero_batch')]
def module_error_inputs_torch_nn_BatchNorm1d_2d_3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
if module_info.module_cls == torch.nn.BatchNorm1d:
input_shape = (2, 10)
elif module_info.module_cls == torch.nn.BatchNorm2d:
input_shape = (2, 10, 5, 5)
else:
input_shape = (2, 10, 4, 4, 4)
return [
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, eps=-1.0),
forward_input=FunctionInput(make_input(input_shape)),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=ValueError,
error_regex="eps must be positive"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, eps=0.0),
forward_input=FunctionInput(make_input(input_shape)),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=ValueError,
error_regex="eps must be positive"
),
]
def module_inputs_torch_nn_ConvNd(module_info, device, dtype, requires_grad, training, **kwargs):
N = kwargs['N']
lazy = kwargs.get('lazy', False)
transposed = kwargs.get('transposed', False)
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
conv_kwargs_list = [{}] if transposed else [{}, {'padding': 'same'}]
kernel_size, C_in, C_out = 3, 4, 5
input_no_batch_shape = (C_in,) + tuple(i + 3 for i in range(N))
input_batch_shape = (2,) + input_no_batch_shape
return [
ModuleInput(constructor_input=(FunctionInput(C_out, kernel_size, **conv_kwargs) if lazy else
FunctionInput(C_in, C_out, kernel_size, **conv_kwargs)),
forward_input=FunctionInput(make_input(
input_batch_shape if with_batch else input_no_batch_shape)),
desc=('' if with_batch else 'no_batch_dim'),
reference_fn=(None if with_batch else no_batch_dim_reference_fn))
for with_batch, conv_kwargs in itertools.product([True, False], conv_kwargs_list)
]
def module_inputs_torch_nn_CosineEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('margin', {'margin': 0.7})
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs):
return cosineembeddingloss_reference(i1, i2, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((15, 10)), make_input((15, 10)),
make_target((15,)).sign()),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_ELU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input((3, 2, 5))),
reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2 * (i.exp() - 1))),
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input(())),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3,))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn),
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input((2, 3, 2, 5))),
desc='4d_input')]
def module_inputs_torch_nn_CELU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input((3, 2, 5))),
reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1))),
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1)),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input((3,))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn)]
def module_inputs_torch_nn_GLU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((5, 6)))),
ModuleInput(constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((5, 6, 7))),
desc='dim'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((4,))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn)]
def module_inputs_torch_nn_GELU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput('none'),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput('none'),
forward_input=FunctionInput(make_input((3, 2, 5))),
reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3,))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn)]
def module_inputs_torch_nn_ReLU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
desc='channels_last_mem_format'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))),
desc='channels_last_3d_mem_format')]
def module_inputs_torch_nn_ReLU6(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
desc='channels_last_mem_format'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))),
desc='channels_last_3d_mem_format')]
def module_inputs_torch_nn_LeakyReLU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3, 2, 5)))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(0.5),
forward_input=FunctionInput(make_input((3, 2, 5))),
desc='with_negval'),
ModuleInput(constructor_input=FunctionInput(0.0),
forward_input=FunctionInput(make_input((10, 10))),
desc='with_zero_negval'),
ModuleInput(constructor_input=FunctionInput(0.5),
forward_input=FunctionInput(make_input(())),
desc='with_negval_scalar')]
def module_inputs_torch_nn_PReLU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4))),
reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='1d'),
ModuleInput(constructor_input=FunctionInput(3),
forward_input=FunctionInput(make_input((2, 3, 4))),
reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='1d_multiparam'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='2d'),
ModuleInput(constructor_input=FunctionInput(3),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='2d_multiparam'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))),
reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='3d'),
ModuleInput(constructor_input=FunctionInput(3),
forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))),
reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='3d_multiparam')]
def module_inputs_torch_nn_SELU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3, 2, 5)))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
desc='scalar')]
def module_inputs_torch_nn_SiLU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((5, 6, 7))),
reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x))]
def module_inputs_torch_nn_Softmax(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((10, 20))),
reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20))),
ModuleInput(constructor_input=FunctionInput(0),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(0, True)),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(-1),
forward_input=FunctionInput(make_input((4, 5))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Softmax2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((1, 3, 10, 20))),
reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, False))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3, 4, 5))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_LogSoftmax(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((10, 20))),
reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_()),
ModuleInput(constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((1, 3, 10, 20))),
reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(),
desc='multiparam'),
ModuleInput(constructor_input=FunctionInput(0),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(),
desc='multiparam_scalar'),
ModuleInput(constructor_input=FunctionInput(-1),
forward_input=FunctionInput(make_input((4, 5))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Softmin(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((10, 20)))),
ModuleInput(constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((2, 3, 5, 10))),
desc='multidim'),
ModuleInput(constructor_input=FunctionInput(0),
forward_input=FunctionInput(make_input(())),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(-1),
forward_input=FunctionInput(make_input((3, 4, 10))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Softplus(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((10, 20))),
reference_fn=lambda m, p, i: torch.log1p(torch.exp(i))),
ModuleInput(constructor_input=FunctionInput(2),
forward_input=FunctionInput(make_input((10, 20))),
reference_fn=lambda m, p, i: 1. / 2. * torch.log1p(torch.exp(2 * i)),
desc='beta'),
ModuleInput(constructor_input=FunctionInput(2, -100),
forward_input=FunctionInput(make_input((10, 20))),
reference_fn=(
lambda m, p, i: ((i * 2) > -100).type_as(i) * i
+ ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log1p(torch.exp(2 * i))),
desc='beta_threshold'),
ModuleInput(constructor_input=FunctionInput(2, -100),
forward_input=FunctionInput(make_input(())),
reference_fn=(
lambda m, p, i: ((i * 2) > -100).type_as(i) * i
+ ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log1p(torch.exp(2 * i))),
desc='beta_threshold_scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Softshrink(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3, 2, 5)))),
ModuleInput(constructor_input=FunctionInput(1,),
forward_input=FunctionInput(make_input((3, 2, 5))),
desc='lambda'),
ModuleInput(constructor_input=FunctionInput(1,),
forward_input=FunctionInput(make_input(())),
desc='lambda_scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Softsign(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3, 2, 5))),
reference_fn=lambda m, p, i: i.div(1 + torch.abs(i))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, i: i.div(1 + torch.abs(i)),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Tanh(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5)))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Tanhshrink(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5)))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Threshold(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(2., 1.),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
desc='threshold_value'),
ModuleInput(constructor_input=FunctionInput(2., 10.),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
desc='large_value'),
ModuleInput(constructor_input=FunctionInput(2., 1.),
forward_input=FunctionInput(make_input(())),
desc='threshold_value_scalar'),
ModuleInput(constructor_input=FunctionInput(2., 1.),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_Mish(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((5, 6, 7))),
reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i)),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')]
def module_inputs_torch_nn_L1Loss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4)),
make_input((2, 3, 4))),
reference_fn=lambda m, p, i, t: 1. / i.numel() * sum((a - b).abs().sum()
for a, b in zip(i, t, strict=True))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(()), make_input(())),
reference_fn=lambda m, p, i, t: 1. / i.numel() * (i - t).abs().sum(),
desc='scalar')] + generate_regression_criterion_inputs(make_input)
def module_inputs_torch_nn_SmoothL1Loss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return smoothl1loss_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((5, 10)),
make_input((5, 10))),
desc=desc,
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input(()),
make_input(())),
desc=f'scalar_{desc}',
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_BCELoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('weights', {'weight': make_weight((10,))}),
]
def bce_loss_reference_fn(m, p, i, t, reduction='mean', weight=None):
result = -(t * i.log() + (1 - t) * (1 - i).log())
if weight is not None:
result = result * weight
if reduction == 'none':
return result
elif reduction == 'mean':
return result.sum() / i.numel()
else:
return result.sum()
module_inputs = []
for desc, constructor_kwargs in cases:
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2),
make_target((15, 10)).gt(0).to(dtype)),
desc=desc,
reference_fn=partial(bce_loss_reference_fn, **constructor_kwargs))
)
scalar_weight = make_weight(())
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(weight=scalar_weight),
forward_input=FunctionInput(make_input((), low=1e-2, high=1 - 1e-2),
make_target(()).gt(0).to(dtype)),
desc='scalar_weight',
reference_fn=partial(bce_loss_reference_fn, weight=scalar_weight))
)
return module_inputs
def module_inputs_torch_nn_BCEWithLogitsLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('weights', {'weight': make_weight((10,))}),
('scalar_weights', {'weight': make_weight(())})
]
def bce_withlogitsloss_reference_fn(m, p, i, t, reduction='mean', weight=None):
# TODO: add pos_weight to the definition here and corresponding SampleInputs
max_val = (-i).clamp(min=0)
result = (1 - t).mul_(i).add_(max_val).add_((-max_val).exp_().add_((-i - max_val).exp_()).log_())
if weight is not None:
result = result * weight
if reduction == 'none':
return result
elif reduction == 'mean':
return result.sum() / i.numel()
else:
return result.sum()
module_inputs = []
for desc, constructor_kwargs in cases:
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2),
make_target((15, 10)).gt(0).to(dtype)),
desc=desc,
reference_fn=partial(bce_withlogitsloss_reference_fn, **constructor_kwargs))
)
return module_inputs
def module_inputs_torch_nn_CrossEntropyLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False)
make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
reductions: list[str] = ['mean', 'sum', 'none']
cases: list[tuple[str, dict]] = [
('', {}),
('weights', {'weight': make_weight((3,))}),
('ignore_index', {'ignore_index': 1}),
('label_smoothing', {'label_smoothing': 0.15}),
('ignore_index_label_smoothing', {'ignore_index': 1, 'label_smoothing': 0.15})
]
module_inputs = []
for reduction, (desc, constructor_kwargs) in product(reductions, cases):
def reference_fn(m, p, i, t, reduction=reduction, constructor_kwargs=constructor_kwargs):
return cross_entropy_loss_reference(i, t, reduction=reduction, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((2, 3, 5, 5)),
make_target((2, 5, 5), low=0, high=3)),
desc=f"4d_{desc}_{reduction}",
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((2, 3, 5)),
make_target((2, 5), low=0, high=3)),
desc=f"3d_{desc}_{reduction}",
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((2, 3)),
make_target((2), low=0, high=3)),
desc=f"2d_{desc}_{reduction}",
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)),
make_target((2, 5, 5, 2, 2), low=0, high=3)),
desc=f"higher_dim_{desc}_{reduction}",
reference_fn=reference_fn)
)
if constructor_kwargs.get('ignore_index', None) is None:
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((5, 3, 4, 2)),
make_input((5, 3, 4, 2)).softmax(dim=1)),
desc=f"4d_prob_target_{desc}_{reduction}",
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((5, 3, 4)),
make_input((5, 3, 4)).softmax(dim=1)),
desc=f"3d_prob_target_{desc}_{reduction}",
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((5, 3)),
make_input((5, 3)).softmax(dim=1)),
desc=f"2d_prob_target_{desc}_{reduction}",
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)),
make_input((2, 3, 5, 5, 2, 2)).softmax(dim=1)),
desc=f"higher_dim_prob_target_{desc}_{reduction}",
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs),
forward_input=FunctionInput(make_input((3,)),
make_target((), low=0, high=3)),
desc=f"no_batch_dim_{desc}_{reduction}",
reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True))
)
return module_inputs
def module_inputs_torch_nn_CTCLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('blank', {'blank': 14})
]
target_dtypes = [torch.int, torch.long]
module_inputs = []
for target_dtype, (desc, constructor_kwargs) in product(target_dtypes, cases):
def reference_fn(m, p, i, t, il, tl, constructor_kwargs=constructor_kwargs):
return ctcloss_reference(i, t, il, tl, **constructor_kwargs)
blank = constructor_kwargs.get('blank', 0)
low = 0 if blank == 14 else 1
high = 14 if blank == 14 else 15
module_inputs.append(
ModuleInput(
constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2),
make_target((3, 30), dtype=target_dtype, low=low, high=high),
(50, 50, 50), (30, 25, 20)),
desc=f'{desc}_lengths_intlists',
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(
constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2),
make_target((3, 30), dtype=target_dtype, low=low, high=high),
torch.tensor((50, 50, 50), device=device),
torch.tensor((30, 25, 20), device=device)),
desc=f'{desc}_lengths_tensors',
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(
constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2),
make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high),
(50, 50, 50), (30, 25, 20)),
desc=f'{desc}_1d_target_lengths_intlists',
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(
constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2),
make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high),
torch.tensor((50, 50, 50), device=device),
torch.tensor((30, 25, 20), device=device)),
desc=f'{desc}_1d_target_lengths_tensors',
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_GroupNorm(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(3, 6, 1e-3),
forward_input=FunctionInput(make_input((4, 6, 5))),
desc='1d_affine'),
ModuleInput(
constructor_input=FunctionInput(3, 12, 1e-3),
forward_input=FunctionInput(make_input((4, 12))),
desc='1d_affine_GN'),
ModuleInput(
constructor_input=FunctionInput(1, 6, 1e-3),
forward_input=FunctionInput(make_input((150, 6))),
desc='1d_affine_large_batch'),
ModuleInput(
constructor_input=FunctionInput(5, 5, 1e-3, False),
forward_input=FunctionInput(make_input((4, 5, 5))),
desc='1d_no_affine_IN'),
ModuleInput(
constructor_input=FunctionInput(1, 10, 1e-3, False),
forward_input=FunctionInput(make_input((4, 10))),
desc='1d_no_affine_LN'),
ModuleInput(
constructor_input=FunctionInput(3, 6, 1e-3),
forward_input=FunctionInput(make_input((4, 6, 2, 3))),
desc='2d_affine'),
ModuleInput(
constructor_input=FunctionInput(3, 3, 1e-3, False),
forward_input=FunctionInput(make_input((4, 3, 2, 3))),
desc='2d_no_affine_IN'),
ModuleInput(
constructor_input=FunctionInput(1, 3, 1e-3, False),
forward_input=FunctionInput(make_input((4, 3, 2, 3))),
desc='2d_no_affine_LN'),
]
def module_error_inputs_torch_nn_GroupNorm(module_info, device, dtype, requires_grad, training, **kwargs):
"""
Error inputs for GroupNorm that test error messages include actual values.
"""
return [
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(3, 10), # num_groups=3, num_channels=10
forward_input=FunctionInput(), # Not needed for construction error
),
error_on=ModuleErrorEnum.CONSTRUCTION_ERROR,
error_type=ValueError,
error_regex=r"num_channels \(10\) must be divisible by num_groups \(3\)"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(5, 13), # num_groups=5, num_channels=13
forward_input=FunctionInput(), # Not needed for construction error
),
error_on=ModuleErrorEnum.CONSTRUCTION_ERROR,
error_type=ValueError,
error_regex=r"num_channels \(13\) must be divisible by num_groups \(5\)"
),
]
def module_inputs_torch_nn_Hardshrink(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(2.),
forward_input=FunctionInput(make_input((4, 3, 2, 4))),
),
ModuleInput(
constructor_input=FunctionInput(2.),
forward_input=FunctionInput(make_input(())),
desc='scalar',
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim',
)
]
def module_inputs_torch_nn_Hardswish(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim',
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 2, 5))),
desc='4d_input')
]
def module_inputs_torch_nn_Hardtanh(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3, 2, 5))),
reference_fn=lambda m, p, i: i.clamp(-1, 1),
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, i: i.clamp(-1, 1),
desc='scalar',
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim',
)
]
def module_inputs_torch_nn_HingeEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('margin', {'margin': 0.5})
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return hingeembeddingloss_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((10,)),
make_target((10,)).gt(0).to(dtype).mul_(2).sub_(1)),
desc=desc,
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input(()),
make_target(()).gt(0).to(dtype).mul_(2).sub_(1)),
desc=f'scalar_{desc}',
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_HuberLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return huberloss_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((5, 10)),
make_input((5, 10))),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_InstanceNormNd(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
lazy = kwargs.get('lazy', False)
N = kwargs['N']
num_features, eps, momentum, affine, track_running_stats = 3, 1e-3, 0.3, False, True
input_no_batch_shape_dict = {1: (3, 15), 2: (3, 6, 6), 3: (3, 4, 4, 4)}
input_no_batch_shape = input_no_batch_shape_dict[N]
input_batch_shape = (4,) + input_no_batch_shape
return [
ModuleInput(
constructor_input=(
FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum)
),
forward_input=FunctionInput(make_input(input_batch_shape))),
ModuleInput(
constructor_input=(
FunctionInput(eps, momentum, affine, track_running_stats) if lazy else
FunctionInput(num_features, eps, momentum, affine, track_running_stats)
),
forward_input=FunctionInput(make_input(input_batch_shape)),
desc='tracking_stats'),
ModuleInput(
constructor_input=(
FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum)
),
forward_input=FunctionInput(make_input(input_no_batch_shape)),
reference_fn=no_batch_dim_reference_fn,
desc='tracking_stats_no_batch_dim'),
ModuleInput(
constructor_input=(
FunctionInput(eps, momentum, affine, track_running_stats) if lazy else
FunctionInput(num_features, eps, momentum, affine, track_running_stats)
),
forward_input=FunctionInput(make_input(input_no_batch_shape)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim')
]
def module_inputs_torch_nn_LayerNorm(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput([5], 1e-3),
forward_input=FunctionInput(make_input((4, 5, 5))),
desc='1d_elementwise_affine'),
ModuleInput(
constructor_input=FunctionInput([5], 1e-3),
forward_input=FunctionInput(make_input((128, 5, 5))),
desc='1d_elementwise_affine_large_batch'),
ModuleInput(
constructor_input=FunctionInput([5], 1e-3, False),
forward_input=FunctionInput(make_input((4, 5, 5))),
desc='1d_no_elementwise_affine'),
ModuleInput(
constructor_input=FunctionInput([2, 2, 5], 1e-3),
forward_input=FunctionInput(make_input((4, 2, 2, 5))),
desc='3d_elementwise_affine'),
ModuleInput(
constructor_input=FunctionInput([2, 2, 5], 1e-3, False),
forward_input=FunctionInput(make_input((4, 2, 2, 5))),
desc='3d_no_elementwise_affine'),
ModuleInput(
constructor_input=FunctionInput([5], 1e-3),
forward_input=FunctionInput(make_input((0, 5))),
desc='1d_empty_elementwise_affine'),
ModuleInput(
constructor_input=FunctionInput([2, 2, 5], 1e-3, elementwise_affine=True, bias=False),
forward_input=FunctionInput(make_input((4, 2, 2, 5))),
desc='3d_elementwise_affine_no_bias'),
]
def module_inputs_torch_nn_RMSNorm(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def rms_norm_reference_fn(m, p, i):
eps = m.eps
if eps is None:
eps = torch.finfo(i.dtype).eps
ndim = i.ndim
normalized_shape = m.normalized_shape
weight = m.weight
dims = [ndim - i - 1 for i in range(len(normalized_shape))]
upcasted_i = i.float()
result = upcasted_i * torch.rsqrt(upcasted_i.pow(2).mean(dim=dims, keepdim=True) + m.eps)
if weight is not None:
result *= weight
return result.type_as(i)
return [
ModuleInput(
constructor_input=FunctionInput([5], 1e-3),
forward_input=FunctionInput(make_input((4, 5, 5))),
desc='1d_elementwise_affine',
reference_fn=rms_norm_reference_fn),
ModuleInput(
constructor_input=FunctionInput([5], 1e-3),
forward_input=FunctionInput(make_input((128, 5, 5))),
desc='1d_elementwise_affine_large_batch',
reference_fn=rms_norm_reference_fn),
ModuleInput(
constructor_input=FunctionInput([5], 1e-3, False),
forward_input=FunctionInput(make_input((4, 5, 5))),
desc='1d_no_elementwise_affine',
reference_fn=rms_norm_reference_fn),
ModuleInput(
constructor_input=FunctionInput([2, 2, 5], 1e-3),
forward_input=FunctionInput(make_input((4, 2, 2, 5))),
desc='3d_elementwise_affine',
reference_fn=rms_norm_reference_fn),
ModuleInput(
constructor_input=FunctionInput([2, 2, 5], 1e-3, False),
forward_input=FunctionInput(make_input((4, 2, 2, 5))),
desc='3d_no_elementwise_affine',
reference_fn=rms_norm_reference_fn),
ModuleInput(
constructor_input=FunctionInput([5], 1e-3),
forward_input=FunctionInput(make_input((0, 5))),
desc='1d_empty_elementwise_affine',
reference_fn=rms_norm_reference_fn),
]
def module_inputs_torch_nn_LocalResponseNorm(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((1, 5, 7))),
desc='1d'),
ModuleInput(
constructor_input=FunctionInput(2,),
forward_input=FunctionInput(make_input((1, 5, 7, 7))),
desc='2d_uneven_pad'),
ModuleInput(
constructor_input=FunctionInput(1, 1., 0.5, 2.),
forward_input=FunctionInput(make_input((1, 5, 7, 7, 7))),
desc='3d_custom_params'),
]
def module_inputs_torch_nn_LPPool1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1.5, 2),
forward_input=FunctionInput(make_input((1, 3, 7))),
desc='norm'),
ModuleInput(
constructor_input=FunctionInput(2, 2, 3),
forward_input=FunctionInput(make_input((1, 3, 7)))),
ModuleInput(
constructor_input=FunctionInput(2, 2, 3),
forward_input=FunctionInput(make_input((3, 7))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
]
def module_inputs_torch_nn_LPPool2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(2, 2, 2),
forward_input=FunctionInput(make_input((1, 3, 7, 7)))),
ModuleInput(
constructor_input=FunctionInput(2, 2, 2),
forward_input=FunctionInput(make_input((3, 7, 7))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(
constructor_input=FunctionInput(1.5, 2),
forward_input=FunctionInput(make_input((1, 3, 7, 7))),
desc='norm'),
]
def module_inputs_torch_nn_LPPool3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(2, 2, 2),
forward_input=FunctionInput(make_input((1, 3, 7, 7, 7)))),
ModuleInput(
constructor_input=FunctionInput(2, 2, 2),
forward_input=FunctionInput(make_input((3, 7, 7, 7))),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim'),
ModuleInput(
constructor_input=FunctionInput(1.5, 2),
forward_input=FunctionInput(make_input((1, 3, 7, 7, 7))),
desc='norm'),
]
def module_inputs_torch_nn_MaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(4),
forward_input=FunctionInput(make_input((2, 10, 4))),
desc='3d_input'),
ModuleInput(
constructor_input=FunctionInput(4, 4),
forward_input=FunctionInput(make_input((2, 10, 4))),
desc='stride'),
ModuleInput(
constructor_input=FunctionInput(4, return_indices=True),
forward_input=FunctionInput(make_input((2, 10, 4))),
desc='return_indices'),
]
def module_inputs_torch_nn_MaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)),
forward_input=FunctionInput(make_input((3, 7, 7))),
desc='3d_input'),
ModuleInput(
constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)),
forward_input=FunctionInput(make_input((1, 3, 7, 7))),
desc='4d_input'),
ModuleInput(
constructor_input=FunctionInput((3, 3), (2, 2), (1, 1), return_indices=True),
forward_input=FunctionInput(make_input((1, 3, 7, 7))),
desc='return_indices'),
]
def module_inputs_torch_nn_MaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput((2, 2, 2)),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5)))),
ModuleInput(
constructor_input=FunctionInput(2, (2, 2, 2)),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='stride'),
ModuleInput(
constructor_input=FunctionInput(2, 2, (1, 1, 1)),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='stride_padding'),
ModuleInput(
constructor_input=FunctionInput(2, 2, (1, 1, 1), return_indices=True),
forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))),
desc='return_indices'),
]
def module_inputs_torch_nn_FractionalMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_random_samples():
return torch.empty((1, 3, 2), dtype=torch.double, device=device).uniform_()
return [
ModuleInput(
constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((1, 3, 5, 7))),
desc='ratio'),
ModuleInput(
constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((1, 3, 7, 6))),
desc='size'),
ModuleInput(
constructor_input=FunctionInput(
2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True
),
forward_input=FunctionInput(make_input((1, 3, 5, 7))),
desc='ratio_return_indices'),
ModuleInput(
constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((3, 5, 7))),
reference_fn=no_batch_dim_reference_fn,
desc='ratio_no_batch_dim'),
ModuleInput(
constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((3, 7, 6))),
reference_fn=no_batch_dim_reference_fn,
desc='size_no_batch_dim'),
]
def module_inputs_torch_nn_FractionalMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_random_samples():
return torch.empty((2, 4, 3), dtype=torch.double, device=device).uniform_()
return [
ModuleInput(
constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))),
desc='ratio'),
ModuleInput(
constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((2, 4, 7, 7, 7))),
desc='size'),
ModuleInput(
constructor_input=FunctionInput((4, 2, 3), output_size=(10, 3, 2), _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((2, 4, 16, 7, 5))),
desc='asymsize'),
ModuleInput(
constructor_input=FunctionInput(
2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True
),
forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))),
desc='ratio_return_indices'),
ModuleInput(
constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((4, 5, 5, 5))),
reference_fn=no_batch_dim_reference_fn,
desc='ratio_no_batch_dim'),
ModuleInput(
constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()),
forward_input=FunctionInput(make_input((4, 7, 7, 7))),
reference_fn=no_batch_dim_reference_fn,
desc='size_no_batch_dim'),
]
def module_inputs_torch_nn_Sigmoid(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
desc='scalar'
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim',
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
desc='channels_last_mem_format'
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))),
desc='channels_last_3d_mem_format'
)
]
def module_inputs_torch_nn_LogSigmoid(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, i: i.sigmoid().log(),
desc='scalar'
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4))),
reference_fn=lambda m, p, i: i.sigmoid().log(),
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim',
),
]
def module_inputs_torch_nn_MarginRankingLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('margin', {'margin': 0.5})
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs):
return marginrankingloss_reference(i1, i2, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((50,)), make_input((50,)),
make_target((50,)).sign()),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_MultiLabelMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return multilabelmarginloss_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((10,)),
make_target((10), low=0, high=10)),
desc=f'1d_{desc}',
reference_fn=reference_fn)
)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((5, 10)),
make_target((5, 10), low=0, high=10)),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_MultiMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False)
make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('p', {'p': 2}),
('margin', {'margin': 0.5}),
('weights', {'weight': make_weight(10)})
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return multimarginloss_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((5, 10)),
make_target((5), low=0, high=10)),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_MultiLabelSoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False)
make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
('weight', {'weight': make_weight(10)}),
]
def multilabelsoftmargin_loss_reference_fn(m, p, i, t, reduction='mean', weight=None):
result = t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()
if weight is not None:
result *= weight
result = (-result).sum(i.dim() - 1) / i.size(-1)
if reduction == 'none':
return result
elif reduction == 'mean':
return result.mean()
else:
return result.sum()
module_inputs = []
for desc, constructor_kwargs in cases:
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((5, 10)),
make_target((5, 10), low=0, high=2)),
desc=desc,
reference_fn=partial(multilabelsoftmargin_loss_reference_fn, **constructor_kwargs))
)
return module_inputs
def module_inputs_torch_nn_SoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: list[tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
]
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return softmarginloss_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((5, 5)),
make_target((5, 5)).sign()),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_TransformerEncoder(module_info, device, dtype, requires_grad, training, **kwargs):
# Reuse the TransformerEncoderLayer samples since the forward args are nearly the same.
samples = []
for layer_module_input in module_inputs_torch_nn_TransformerEncoderLayer(
None, device, dtype, requires_grad, training):
# Construct a TransformerEncoderLayer object to pass to TransformerEncoder.
l_args, l_kwargs = (layer_module_input.constructor_input.args,
layer_module_input.constructor_input.kwargs)
l_kwargs['device'] = device
l_kwargs['dtype'] = dtype
encoder_layer = torch.nn.TransformerEncoderLayer(*l_args, **l_kwargs)
num_layers = 2
# Note: TransformerEncoderLayer takes a "src_mask" while
# TransformerEncoder takes a "mask"; rename kwarg appropriately.
forward_input = layer_module_input.forward_input
if 'src_mask' in forward_input.kwargs:
forward_input.kwargs['mask'] = forward_input.kwargs['src_mask']
del forward_input.kwargs['src_mask']
samples.append(ModuleInput(
constructor_input=FunctionInput(encoder_layer, num_layers),
forward_input=forward_input,
desc=layer_module_input.desc
))
return samples
def module_inputs_torch_nn_TransformerEncoderLayer(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = [
ModuleInput(
constructor_input=FunctionInput(4, 2, 16, 0.0),
forward_input=FunctionInput(
make_input((2, 3, 4))
),
desc='relu_activation'
),
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu),
forward_input=FunctionInput(
make_input((2, 3, 4))
),
desc='gelu_activation'
),
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False),
forward_input=FunctionInput(
make_input((2, 3, 4))
),
desc='no_bias'
), ]
# Samples below are for validating the no-batch-dim support.
key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool))
attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3)))
for src_mask, src_key_padding_mask, norm_first, batch_first, bias in \
itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)):
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
dropout=0.0, batch_first=batch_first,
norm_first=norm_first, bias=bias),
forward_input=FunctionInput(
make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=batch_first, kwargs_to_batchify={'src_key_padding_mask': 0}),
desc=f'no_batch_dim_batch_first_{batch_first}'
))
# Samples below where we pass reference_fn are for validating the fast path,
# since the fast path requires no_grad mode, we run the fast path in .eval()
# and no_grad() in the reference_fn and verify that against the results in train mode.
def fast_path_reference_fn(module, parameters, *args, **kwargs):
assert module.training
module.train(False)
with torch.no_grad():
output = module(*args, **kwargs)
module.train(True)
return output
if training:
for norm_first, bias in itertools.product((True, False), (True, False)):
samples.append(
ModuleInput(
constructor_input=FunctionInput(
4, 2, 8, dropout=0.0, batch_first=True, norm_first=norm_first, bias=bias
),
forward_input=FunctionInput(
make_input((2, 3, 4)),
),
# fastpath doesn't run when bias=False
reference_fn=fast_path_reference_fn if bias else None,
desc=f'fastpath_{bias}_norm_first_{norm_first}'
)
)
return samples
def module_inputs_torch_nn_TransformerDecoderLayer(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = [
ModuleInput(
constructor_input=FunctionInput(4, 2, 16, 0.0),
forward_input=FunctionInput(
make_input((2, 3, 4)), make_input((2, 3, 4))
),
desc='relu_activation'
),
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu),
forward_input=FunctionInput(
make_input((2, 3, 4)), make_input((2, 3, 4))
),
desc='gelu_activation'
),
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False),
forward_input=FunctionInput(
make_input((2, 3, 4)), make_input((2, 3, 4))
),
desc='no_bias'
), ]
key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool))
attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3)))
for tgt_mask, tgt_key_padding_mask, norm_first, bias, batch_first in \
itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)):
# Using same mask for tgt and memory
memory_mask = tgt_mask
memory_key_padding_mask = tgt_key_padding_mask
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
dropout=0.0, batch_first=batch_first,
norm_first=norm_first, bias=bias),
forward_input=FunctionInput(
make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=batch_first,
kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}),
desc=f'no_batch_dim_batch_first_{batch_first}'
))
src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4))
if not batch_first:
src, tgt = src.transpose(0, 1), tgt.transpose(0, 1)
if tgt_key_padding_mask is not None:
memory_key_padding_mask, tgt_key_padding_mask = (tgt_key_padding_mask.expand(2, 3),) * 2
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
dropout=0.0, batch_first=batch_first,
norm_first=norm_first, bias=bias),
forward_input=FunctionInput(
src, tgt, tgt_mask=tgt_mask, memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask
),
desc=f'norm_first_{norm_first}_batch_first_{batch_first}_bias_{bias}'
))
return samples
def module_inputs_torch_nn_Transformer(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = []
# Samples below are for validating the no-batch-dim support.
key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool))
attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3)))
for mask, key_padding_mask, norm_first, bias, batch_first in \
itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)):
# Using same mask for tgt and memory
src_mask , tgt_mask = (mask,) * 2
src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask,) * 2
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
num_encoder_layers=1, num_decoder_layers=1,
dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias),
forward_input=FunctionInput(
make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask,
tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=batch_first,
kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}),
desc=f'no_batch_dim_batch_first_{batch_first}'
))
src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4))
if not batch_first:
src = src.transpose(0, 1)
tgt = tgt.transpose(0, 1)
if key_padding_mask is not None:
src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask.expand(2, 3),) * 2
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
num_encoder_layers=1, num_decoder_layers=1,
dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias),
forward_input=FunctionInput(
src, tgt, tgt_mask=tgt_mask, src_mask=src_mask,
tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask
),
))
return samples
def module_inputs_torch_nn_Embedding(module_info, device, dtype, requires_grad, training, **kwargs):
make_empty = partial(torch.empty, device=device, dtype=torch.long, requires_grad=False)
return [
ModuleInput(
constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3),
forward_input=FunctionInput(make_empty(2, 3).random_(4))
),
ModuleInput(
constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3),
forward_input=FunctionInput(make_empty(1, 512).random_(4).expand(7, 512)),
desc='discontiguous'
),
]
def module_inputs_torch_nn_MultiheadAttention(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = []
bool_vals = (True, False)
key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool))
attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3, 3)))
products = itertools.product(bool_vals, bool_vals, bool_vals, key_padding_masks, attn_masks)
for bias, add_bias_kv, add_zero_attn, key_padding_mask, attn_mask in products:
samples.append(
ModuleInput(
constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=True,
bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn),
forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)),
key_padding_mask=key_padding_mask, attn_mask=attn_mask),
reference_fn=no_batch_dim_reference_mha,
)
)
samples.append(
ModuleInput(
constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=False,
bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn),
forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)),
key_padding_mask=key_padding_mask, attn_mask=attn_mask),
reference_fn=partial(no_batch_dim_reference_mha, batch_first=False),
)
)
return samples
def module_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = [
ModuleInput(
constructor_input=FunctionInput(5, 10),
forward_input=FunctionInput(make_input(5), make_input(10)),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput(5, 10, bias=True),
forward_input=FunctionInput(make_input(5), make_input(10)),
reference_fn=no_batch_dim_reference_fn,
)
]
is_rnn = kwargs.get('is_rnn', False)
if is_rnn:
# RNN also supports `nonlinearity` argument.
# `tanh` is the default, so we check with `relu`
samples.append(
ModuleInput(
constructor_input=FunctionInput(5, 10, bias=True, nonlinearity='relu'),
forward_input=FunctionInput(make_input(5), make_input(10)),
reference_fn=no_batch_dim_reference_fn,
)
)
return samples
def module_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = (
ModuleInput(
constructor_input=FunctionInput(5, 10),
forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))),
reference_fn=no_batch_dim_reference_lstmcell,
),
ModuleInput(
constructor_input=FunctionInput(5, 10, bias=True),
forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))),
reference_fn=no_batch_dim_reference_lstmcell,
),
)
return samples
def make_packed_sequence(inp, batch_sizes):
required_grad = inp.requires_grad
inp.requires_grad_(False) # user won't have access to inp so won't be able to get its grads
seq = pack_padded_sequence(inp, batch_sizes)
seq.data.requires_grad_(required_grad)
return seq
def module_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, with_packed_sequence=False, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
is_rnn = kwargs['is_rnn']
nonlinearity = ('relu', 'tanh')
bias = (False, True)
batch_first = (False, True)
bidirectional = (False, True)
samples = []
if is_rnn:
prod_gen = product(nonlinearity, bias, batch_first, bidirectional)
else:
prod_gen = product(bias, batch_first, bidirectional)
for args in prod_gen:
if is_rnn:
nl, b, b_f, bidir = args
else:
b, b_f, bidir = args
cons_args = {'input_size': 2, 'hidden_size': 2, 'num_layers': 2,
'batch_first': b_f, 'bias': b, 'bidirectional': bidir}
cons_args_hidden = {'input_size': 2, 'hidden_size': 3, 'num_layers': 2,
'batch_first': b_f, 'bias': b, 'bidirectional': bidir}
if is_rnn:
cons_args['nonlinearity'] = nl
cons_args_hidden['nonlinearity'] = nl
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args),
forward_input=FunctionInput(make_input((3, 2))),
reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f),
)
)
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args_hidden),
forward_input=FunctionInput(make_input((3, 2)), make_input((4 if bidir else 2, 3))),
reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f),
)
)
if with_packed_sequence:
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args),
forward_input=FunctionInput(make_packed_sequence(make_input((5, 2, 2)), torch.tensor([5, 3]))),
reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f),
)
)
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args),
forward_input=FunctionInput(make_packed_sequence(make_input((5, 5, 2)), torch.tensor([5, 3, 3, 2, 2]))),
reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f),
)
)
return samples
def module_inputs_torch_nn_LSTM(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
bias = (False, True)
batch_first = (False, True)
bidirectional = (False, True)
proj_sizes = (0, 2)
samples = []
prod_gen = product(bias, batch_first, bidirectional, proj_sizes)
for args in prod_gen:
b, b_f, bidir, proj_size = args
hidden_size = 3
cons_args = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size,
'batch_first': b_f, 'bias': b, 'bidirectional': bidir}
cons_args_hidden = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size,
'batch_first': b_f, 'bias': b, 'bidirectional': bidir}
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args),
forward_input=FunctionInput(make_input((2, 2))),
reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f),
)
)
h_out = proj_size if proj_size > 0 else hidden_size
hx = (make_input((4 if bidir else 2, h_out)), make_input((4 if bidir else 2, hidden_size)))
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args_hidden),
forward_input=FunctionInput(make_input((3, 2)), hx),
reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f),
)
)
return samples
def module_inputs_torch_nn_ReflectionPad1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((2, 3))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2)),
forward_input=FunctionInput(make_input((2, 3, 4))),
),
]
def module_inputs_torch_nn_ReflectionPad2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4, 5))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 3, 4)),
forward_input=FunctionInput(make_input((3, 4, 5, 6))),
),
]
def module_inputs_torch_nn_ReflectionPad3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
reference_fn=no_batch_dim_reference_fn
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)),
forward_input=FunctionInput(make_input((3, 3, 3, 3, 3))),
),
]
def module_inputs_torch_nn_ReplicationPad1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4))),
reference_fn=no_batch_dim_reference_fn
),
ModuleInput(
constructor_input=FunctionInput((1, 2)),
forward_input=FunctionInput(make_input((3, 4, 5))),
),
]
def module_inputs_torch_nn_ReplicationPad2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4, 5))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 3, 4)),
forward_input=FunctionInput(make_input((3, 4, 5, 6))),
),
]
def module_inputs_torch_nn_ReplicationPad3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4, 5, 6))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)),
forward_input=FunctionInput(make_input((3, 4, 5, 6, 7))),
),
]
def module_inputs_torch_nn_ZeroPad1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2)),
forward_input=FunctionInput(make_input((3, 4, 5))),
),
]
def module_inputs_torch_nn_ZeroPad2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((1, 2, 3))),
reference_fn=no_batch_dim_reference_fn
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 3, 4)),
forward_input=FunctionInput(make_input((1, 2, 3, 4))),
),
]
def module_inputs_torch_nn_ZeroPad3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4, 5, 6))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)),
forward_input=FunctionInput(make_input((1, 2, 3, 4, 5))),
),
]
def module_inputs_torch_nn_ConstantPad1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1, 2),
forward_input=FunctionInput(make_input((3, 4))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2), 3),
forward_input=FunctionInput(make_input((3, 4, 5))),
),
]
def module_inputs_torch_nn_ConstantPad2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1, 3),
forward_input=FunctionInput(make_input((3, 4, 5))),
reference_fn=no_batch_dim_reference_fn
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 3, 4), 5),
forward_input=FunctionInput(make_input((1, 2, 3, 4))),
),
]
def module_inputs_torch_nn_ConstantPad3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(1, 3),
forward_input=FunctionInput(make_input((3, 4, 5, 6))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 3, 4, 5, 6), 7),
forward_input=FunctionInput(make_input((1, 2, 1, 2, 1))),
),
]
def module_inputs_torch_nn_CircularPad1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def padding1d_circular_ref(inp, pad):
r""" input:
[[[0., 1., 2.],
[3., 4., 5.]]]
pad: (1, 2)
output:
[[[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.]]]
"""
return torch.cat([inp[:, :, -pad[0]:], inp, inp[:, :, :pad[1]]], dim=2)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4))),
reference_fn=no_batch_dim_reference_fn
),
ModuleInput(
constructor_input=FunctionInput((1, 2)),
forward_input=FunctionInput(make_input((1, 2, 3))),
reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding),
),
ModuleInput(
constructor_input=FunctionInput((3, 1)),
forward_input=FunctionInput(make_input((1, 2, 3))),
reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding),
),
ModuleInput(
constructor_input=FunctionInput((3, 3)),
forward_input=FunctionInput(make_input((1, 2, 3))),
reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding),
),
]
def module_inputs_torch_nn_CircularPad2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def padding2d_circular_ref(inp, pad):
r"""input:
[[[[0., 1., 2],
[3., 4., 5.]]]]
pad: (1, 2, 2, 1)
output:
[[[[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.],
[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.],
[2., 0., 1., 2., 0., 1.]]]]
"""
inp = torch.cat([inp[:, :, -pad[2]:], inp, inp[:, :, :pad[3]]], dim=2)
return torch.cat([inp[:, :, :, -pad[0]:], inp, inp[:, :, :, :pad[1]]], dim=3)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4, 5))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 2, 1)),
forward_input=FunctionInput(make_input((1, 1, 2, 3))),
reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding),
),
ModuleInput(
constructor_input=FunctionInput((2, 3, 2, 2)),
forward_input=FunctionInput(make_input((1, 1, 2, 3))),
reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding),
),
ModuleInput(
constructor_input=FunctionInput((3, 3, 3, 1)),
forward_input=FunctionInput(make_input((1, 1, 3, 3))),
reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding),
),
]
def module_inputs_torch_nn_CircularPad3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def padding3d_circular_ref(inp, pad):
r"""input:
[[[[[ 0., 1., 2.],
[ 3., 4., 5.]],
[[ 6., 7., 8.],
[ 9., 10., 11.]]]]]
pad: (1, 2, 2, 1, 1, 2)
output: [[[[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]],
[[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.]],
[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]],
[[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.]],
[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]]]]]
"""
inp = torch.cat([inp[:, :, -pad[4]:], inp, inp[:, :, :pad[5]]], dim=2)
inp = torch.cat([inp[:, :, :, -pad[2]:], inp, inp[:, :, :, :pad[3]]], dim=3)
return torch.cat([inp[:, :, :, :, -pad[0]:], inp, inp[:, :, :, :, :pad[1]]], dim=4)
return [
ModuleInput(
constructor_input=FunctionInput(1),
forward_input=FunctionInput(make_input((3, 4, 5, 6))),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)),
forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))),
reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding)
),
ModuleInput(
constructor_input=FunctionInput((3, 2, 2, 1, 1, 2)),
forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))),
reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding)
),
ModuleInput(
constructor_input=FunctionInput((3, 3, 2, 1, 2, 2)),
forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))),
reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding)
),
]
# All these operators share similar issues on cuDNN and MIOpen
rnn_gru_lstm_module_info_decorators = (
# RuntimeError: Batching rule not implemented for aten::_cudnn_rnn_backward.
# We could not generate a fallback
DecorateInfo(
unittest.expectedFailure, "TestModule", "test_grad",
active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda'
),
# NotImplementedError: the derivative for '_cudnn_rnn_backward' is not implemented.
# Double backwards is not supported for CuDNN RNNs due to limitations in the CuDNN API
DecorateInfo(
unittest.expectedFailure, "TestModule", "test_gradgrad",
active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda'
),
# CUDNN GRU doesn't accept non-contiguous hx
DecorateInfo(
unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors",
active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda'
),
# MIOPEN GRU doesn't accept non-contiguous hx (this is dispatched to miopen only for float).
DecorateInfo(
unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors",
active_if=(TEST_CUDNN and TEST_WITH_ROCM), dtypes=(torch.float,), device_type='cuda'
)
)
# Start of module error inputs functions.
def module_error_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = [
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20),
forward_input=FunctionInput(make_input(3, 11), make_input(3, 20)),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=RuntimeError,
error_regex="input has inconsistent input_size: got 11 expected 10"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20),
forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=RuntimeError,
error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20),
forward_input=FunctionInput(make_input(3, 10), make_input(5, 20)),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=RuntimeError,
error_regex="Input batch size 3 doesn't match hidden0 batch size 5"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20),
forward_input=FunctionInput(make_input(3, 10), make_input(3, 1, 1, 20)),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=ValueError,
error_regex="Expected hidden to be 1D or 2D, got 4D instead"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20, 'relu'),
forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=RuntimeError,
error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20, 'tanh'),
forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=RuntimeError,
error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20"
),
]
return samples
def module_error_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = [
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20),
forward_input=FunctionInput(make_input(3, 11), (make_input(3, 20), make_input(3, 20))),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=RuntimeError,
error_regex="input has inconsistent input_size: got 11 expected 10"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20),
forward_input=FunctionInput(make_input(3, 10), (make_input(3, 21), make_input(3, 21))),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=RuntimeError,
error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20),
forward_input=FunctionInput(make_input(3, 10), (make_input(5, 20), make_input(5, 20))),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=RuntimeError,
error_regex="Input batch size 3 doesn't match hidden0 batch size 5"
),
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(10, 20),
forward_input=FunctionInput(make_input(3, 10), (make_input(3, 1, 1, 20), make_input(3, 1, 1, 20))),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=ValueError,
error_regex="Expected hx\\[0\\] to be 1D or 2D, got 4D instead"
),
]
return samples
def module_error_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, **kwargs):
samples = [
ErrorModuleInput(
ModuleInput(constructor_input=FunctionInput(10, 0, 1)),
error_on=ModuleErrorEnum.CONSTRUCTION_ERROR,
error_type=ValueError,
error_regex="hidden_size must be greater than zero"
),
ErrorModuleInput(
ModuleInput(constructor_input=FunctionInput(10, 10, 0)),
error_on=ModuleErrorEnum.CONSTRUCTION_ERROR,
error_type=ValueError,
error_regex="num_layers must be greater than zero"
),
]
return samples
def module_error_inputs_torch_nn_Pad1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
is_constant = kwargs.get('is_constant', False)
return [
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=ValueError,
error_regex=r"expected 2D or 3D input \(got 4D input\)",
),
]
def module_error_inputs_torch_nn_Pad2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
is_constant = kwargs.get('is_constant', False)
return [
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3),
forward_input=FunctionInput(make_input((2, 3))),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=ValueError,
error_regex=r"expected 3D or 4D input \(got 2D input\)",
),
]
def module_error_inputs_torch_nn_Pad3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
is_constant = kwargs.get('is_constant', False)
return [
ErrorModuleInput(
ModuleInput(
constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3),
forward_input=FunctionInput(make_input((2, 3))),
),
error_on=ModuleErrorEnum.FORWARD_ERROR,
error_type=ValueError,
error_regex=r"expected 4D or 5D input \(got 2D input\)",
),
]
_macos15_or_newer = torch.backends.mps.is_available() and torch.backends.mps.is_macos_or_newer(15, 0)
# Database of ModuleInfo entries in alphabetical order.
module_db: list[ModuleInfo] = [
ModuleInfo(torch.nn.AdaptiveAvgPool1d,
module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool1d,
skips=(
# Fails on MPS backend if input/output sizes are not divisible
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.AdaptiveAvgPool2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool2d,
skips=(
# Fails on MPS backend if input/output sizes are not divisible
DecorateInfo(skipMPS),
# Fails on backward check if output size is 1x1
DecorateInfo(
unittest.expectedFailure,
'TestModule',
'test_memory_format',
active_if=operator.itemgetter('training'),
),)
),
ModuleInfo(torch.nn.AdaptiveAvgPool3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool3d,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# not supported on MPS backend
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.AdaptiveMaxPool1d,
module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool1d,
),
ModuleInfo(torch.nn.AdaptiveMaxPool2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool2d,
),
ModuleInfo(torch.nn.AdaptiveMaxPool3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool3d,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# not supported on MPS backend
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.AvgPool1d,
module_inputs_func=module_inputs_torch_nn_AvgPool1d,
),
ModuleInfo(torch.nn.AvgPool2d,
module_inputs_func=module_inputs_torch_nn_AvgPool2d,
skips=(
# The difference between channels last backward and
# channels first backward of AvgPool2d on CUDA is too large
# See https://github.com/pytorch/pytorch/issues/107201
DecorateInfo(
unittest.expectedFailure,
'TestModule',
'test_memory_format',
active_if=operator.itemgetter('training'),
device_type='cuda',),
),),
ModuleInfo(torch.nn.AvgPool3d,
module_inputs_func=module_inputs_torch_nn_AvgPool3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# No channels_last support for AvgPool1d as it does not take 4D inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# backward not supported on MPS backend
DecorateInfo(skipMPS, 'TestModule', 'test_non_contiguous_tensors'),)
),
ModuleInfo(torch.nn.BatchNorm1d,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_BatchNorm1d,
module_error_inputs_func=module_error_inputs_torch_nn_BatchNorm1d_2d_3d,
skips=(
# tracking here rather than in the list in test_aotdispatch.py as eval mode passes
# RuntimeError: tried to get Double out of SymInt
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_symbolic_module_exhaustive',
active_if=operator.itemgetter('training')
),
# torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_module_exhaustive',
active_if=operator.itemgetter('training')
))
),
ModuleInfo(torch.nn.BatchNorm2d,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_BatchNorm2d,
module_error_inputs_func=module_error_inputs_torch_nn_BatchNorm1d_2d_3d,
skips=(
# See https://github.com/pytorch/pytorch/issues/134580
DecorateInfo(expectedFailureMPS, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training')),
# tracking here rather than in the list in test_aotdispatch.py as eval mode passes
# RuntimeError: tried to get Double out of SymInt
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_symbolic_module_exhaustive',
active_if=operator.itemgetter('training')
),
# torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_module_exhaustive',
active_if=operator.itemgetter('training')
),)
),
ModuleInfo(torch.nn.BatchNorm3d,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_BatchNorm3d,
module_error_inputs_func=module_error_inputs_torch_nn_BatchNorm1d_2d_3d,
skips=(
# not supported on MPS backend
DecorateInfo(skipMPS),
# tracking here rather than in the list in test_aotdispatch.py as eval mode passes
# RuntimeError: tried to get Double out of SymInt
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_symbolic_module_exhaustive',
active_if=operator.itemgetter('training')
),
# torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_module_exhaustive',
active_if=operator.itemgetter('training')
),)
),
ModuleInfo(torch.nn.CELU,
module_inputs_func=module_inputs_torch_nn_CELU,
# not MPS specific, will be xfailed for all devices in next PR
skips=(
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace',
device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.Conv1d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.Conv2d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
device_type='cuda', dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.Conv3d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# Conv3d is not supported on MPS backend
DecorateInfo(skipMPS, device_type="mps"),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.ConvTranspose1d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
dtypes=floating_and_complex_types_and(torch.chalf),
skips=(
# Not implemented for chalf on CPU
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
dtypes=(torch.chalf,), device_type='cuda'),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.ConvTranspose2d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
dtypes=floating_and_complex_types_and(torch.chalf),
skips=(
# Fails on backward check because ViewAsRealBackward apply contiguous for grad
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda',
dtypes=[torch.float64, torch.complex128]),
# Not implemented for chalf on CPU
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
dtypes=(torch.chalf,), device_type='cuda'),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.ConvTranspose3d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False, transposed=True),
dtypes=floating_and_complex_types_and(torch.chalf),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# ConvTranspose3d is not supported on MPS backend
DecorateInfo(skipMPS),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
# Not implemented for chalf on CPU
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
dtypes=(torch.chalf,), device_type='cuda'),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
DecorateInfo(precisionOverride({torch.complex64: 1e-04}), 'TestModule', 'test_cpu_gpu_parity'),
DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.CosineEmbeddingLoss,
module_inputs_func=module_inputs_torch_nn_CosineEmbeddingLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.ELU,
module_inputs_func=module_inputs_torch_nn_ELU,
# not MPS specific, will be xfailed for all devices in next PR
skips=(
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace',
device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.FractionalMaxPool2d,
module_inputs_func=module_inputs_torch_nn_FractionalMaxPool2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# not supported on MPS backend
DecorateInfo(skipMPS),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.FractionalMaxPool3d,
module_inputs_func=module_inputs_torch_nn_FractionalMaxPool3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# not supported on MPS backend
DecorateInfo(skipMPS),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.L1Loss,
module_inputs_func=module_inputs_torch_nn_L1Loss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.SmoothL1Loss,
module_inputs_func=module_inputs_torch_nn_SmoothL1Loss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# See #119108: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible
# NS: Still fails on MacOS15.1
DecorateInfo(skipIfMPS, 'TestModule', 'test_non_contiguous_tensors',
dtypes=[torch.float16], device_type='mps'),),
),
ModuleInfo(torch.nn.LazyConv1d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConv2d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
device_type='cuda', dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConv3d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
# LazyConv3d is not supported on MPS backend
DecorateInfo(skipMPS),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConvTranspose1d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConvTranspose2d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda',
dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConvTranspose3d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
# LazyConvTranspose3d is not supported on MPS backend
DecorateInfo(skipMPS),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.Linear,
module_inputs_func=module_inputs_torch_nn_Linear,
skips=(
# No channels_last support for Linear currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.Bilinear,
module_inputs_func=module_inputs_torch_nn_Bilinear,
decorators=[
DecorateInfo(
toleranceOverride({
torch.float32: tol(atol=1e-4, rtol=1e-4),
torch.float64: tol(atol=1e-4, rtol=1e-4)}),
'TestModule', 'test_forward', device_type='cpu'),
],
skips=(
# No channels_last support for Bilinear currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# See #119108: tolerance issue
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward",
device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.LPPool1d,
module_inputs_func=module_inputs_torch_nn_LPPool1d,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),)
),
ModuleInfo(torch.nn.LPPool2d,
module_inputs_func=module_inputs_torch_nn_LPPool2d,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),
# Fails on backward check on MPS
# See https://github.com/pytorch/pytorch/issues/107214
DecorateInfo(
unittest.expectedFailure,
'TestModule',
'test_memory_format',
active_if=operator.itemgetter('training') and not _macos15_or_newer,
device_type='mps',
),)
),
ModuleInfo(torch.nn.LPPool3d,
module_inputs_func=module_inputs_torch_nn_LPPool3d,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMPS, device_type='mps'),)
),
ModuleInfo(torch.nn.MaxPool1d,
module_inputs_func=module_inputs_torch_nn_MaxPool1d,
),
ModuleInfo(torch.nn.MaxPool2d,
module_inputs_func=module_inputs_torch_nn_MaxPool2d,
),
ModuleInfo(torch.nn.MaxPool3d,
module_inputs_func=module_inputs_torch_nn_MaxPool3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
),
ModuleInfo(torch.nn.KLDivLoss,
module_inputs_func=module_inputs_torch_nn_KLDivLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# https://github.com/pytorch/pytorch/issues/115588
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),)
),
ModuleInfo(torch.nn.MSELoss,
module_inputs_func=module_inputs_torch_nn_MSELoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# See #119108: tolerance issue
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward",
device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.MarginRankingLoss,
module_inputs_func=module_inputs_torch_nn_MarginRankingLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.MultiLabelMarginLoss,
module_inputs_func=module_inputs_torch_nn_MultiLabelMarginLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# 'aten::multilabel_margin_loss_forward' is not currently implemented for the MPS device.
DecorateInfo(skipIfMPS, 'TestModule', device_type='mps'),
# derivative for aten::multilabel_margin_loss_backward is not implemented
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),)
),
ModuleInfo(torch.nn.MultiMarginLoss,
module_inputs_func=module_inputs_torch_nn_MultiMarginLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# 'aten::multi_margin_loss' is not currently implemented for the MPS device.
DecorateInfo(skipIfMPS, 'TestModule', device_type='mps'),
# RuntimeError: derivative for aten::multi_margin_loss_backward is not implemented
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),)
),
ModuleInfo(torch.nn.SoftMarginLoss,
module_inputs_func=module_inputs_torch_nn_SoftMarginLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# See #119108: tolerance issue
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward",
device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.MultiLabelSoftMarginLoss,
module_inputs_func=module_inputs_torch_nn_MultiLabelSoftMarginLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.NLLLoss,
module_inputs_func=module_inputs_torch_nn_NLLLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# See #119108: tolerance issue
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward",
device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.GaussianNLLLoss,
module_inputs_func=module_inputs_torch_nn_GaussianNLLLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)),
ModuleInfo(torch.nn.PoissonNLLLoss,
module_inputs_func=module_inputs_torch_nn_PoissonNLLLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)),
ModuleInfo(torch.nn.HingeEmbeddingLoss,
module_inputs_func=module_inputs_torch_nn_HingeEmbeddingLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.HuberLoss,
module_inputs_func=module_inputs_torch_nn_HuberLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# See #119108: seemingly incorrect output dtype
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward",
device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.BCELoss,
module_inputs_func=module_inputs_torch_nn_BCELoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# error: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible
DecorateInfo(skipIfMPS, 'TestModule', dtypes=[torch.float16], device_type='mps'),)
),
ModuleInfo(torch.nn.BCEWithLogitsLoss,
module_inputs_func=module_inputs_torch_nn_BCEWithLogitsLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# see #119108: tolerance issue
DecorateInfo(skipIfMPS, 'TestModule', dtypes=[torch.float16], device_type='mps'),)
),
ModuleInfo(torch.nn.CrossEntropyLoss,
module_inputs_func=module_inputs_torch_nn_CrossEntropyLoss,
dtypes=get_all_fp_dtypes(include_half=True, include_bfloat16=False),
decorators=(
# No channels_last support for loss functions.
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=3e-2, rtol=1e-3)}), "TestModule",
"test_forward", dtypes=[torch.float16], device_type='cpu'),
DecorateInfo(unittest.expectedFailure, "TestModule", "test_cpu_gpu_parity", dtypes=[torch.float16],
device_type='cuda'),),
),
ModuleInfo(torch.nn.CTCLoss,
module_inputs_func=module_inputs_torch_nn_CTCLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# The operator aten::_ctc_loss is not currently implemented for the MPS device.
DecorateInfo(skipIfMPS, 'TestModule', device_type='mps',),
# derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),
# https://github.com/pytorch/pytorch/issues/115585
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_non_contiguous_tensors'),)
),
ModuleInfo(torch.nn.GELU,
module_inputs_func=module_inputs_torch_nn_GELU,
skips=(
# See #119108: tolerance issue
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward",
device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.GLU,
module_inputs_func=module_inputs_torch_nn_GLU,
),
ModuleInfo(torch.nn.GroupNorm,
module_inputs_func=module_inputs_torch_nn_GroupNorm,
module_error_inputs_func=module_error_inputs_torch_nn_GroupNorm,
dtypes=get_all_fp_dtypes(include_bfloat16=True, include_half=True),
skips=(
# Tracking at https://github.com/pytorch/pytorch/issues/98089
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}),
'TestModule', 'test_memory_format', device_type='cpu'),
# No channels_last support for GroupNorm currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'),
DecorateInfo(unittest.skip("Skipped!"), "TestModule", "test_grad",
active_if=TEST_WITH_ROCM, device_type='cuda'),)
),
ModuleInfo(torch.nn.Hardshrink,
module_inputs_func=module_inputs_torch_nn_Hardshrink,
),
ModuleInfo(torch.nn.Hardswish,
module_inputs_func=module_inputs_torch_nn_Hardswish,
supports_gradgrad=False),
ModuleInfo(torch.nn.Hardtanh,
module_inputs_func=module_inputs_torch_nn_Hardtanh,
),
ModuleInfo(torch.nn.InstanceNorm1d,
module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=1),
train_and_eval_differ=True,
skips=(
# No channels_last support for InstanceNorm1d currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.InstanceNorm2d,
module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=2),
train_and_eval_differ=True,
skips=(
# No channels_last support for InstanceNorm2d currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.InstanceNorm3d,
module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=3),
train_and_eval_differ=True,
skips=(
# not supported on MPS backend
DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_memory_format'),
DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_non_contiguous_tensors'),
DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_forward'),
DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_non_contiguous'),
DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_save_load'),
# No channels_last support for InstanceNorm3d currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.LocalResponseNorm,
module_inputs_func=module_inputs_torch_nn_LocalResponseNorm,
),
ModuleInfo(torch.nn.LayerNorm,
module_inputs_func=module_inputs_torch_nn_LayerNorm,
skips=(
# No channels_last support for LayerNorm currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.RMSNorm,
module_inputs_func=module_inputs_torch_nn_RMSNorm,
),
# TransformerEncoder takes the same inputs as TransformerEncoderLayer
ModuleInfo(torch.nn.TransformerEncoder,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_TransformerEncoder,
decorators=[
# Not implemented for SDPA backward derivative
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad',
device_type='cpu'),
],
skips=(
# No channels_last support for TransformerEncoderLayer currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# Doesn't support device / dtype kwargs directly because it is just a
# container of TransformerEncoderLayers.
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_factory_kwargs'),)
),
ModuleInfo(torch.nn.TransformerEncoderLayer,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_TransformerEncoderLayer,
decorators=[
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}),
'TestModule', 'test_non_contiguous_tensors',
device_type='cpu', active_if=IS_WINDOWS),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-4, rtol=2e-3)}),
'TestModule', 'test_forward',
device_type='mps'),
# Not implemented for SDPA backward derivative
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad',
device_type='cpu'),
],
skips=(
# No channels_last support for TransformerEncoderLayer currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.TransformerDecoderLayer,
module_inputs_func=module_inputs_torch_nn_TransformerDecoderLayer,
decorators=[
# Not implemented for SDPA backward derivative
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad',
device_type='cpu'),
],
skips=(
# No channels_last support for TransformerDecoderLayer currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.Transformer,
module_inputs_func=module_inputs_torch_nn_Transformer,
# Inputs are too large to run with slow gradcheck
# https://github.com/pytorch/pytorch/issues/117140
gradcheck_fast_mode=True,
decorators=[
# Not implemented for SDPA backward derivative
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad',
device_type='cpu'),
],
skips=(
# No channels_last support for Transformer currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.MultiheadAttention,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_MultiheadAttention,
skips=(
# No channels_last support for MultiheadAttention currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.Embedding,
module_inputs_func=module_inputs_torch_nn_Embedding,
decorators=[
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}),
'TestModule', 'test_non_contiguous_tensors',
device_type='mps')],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.ReLU,
module_inputs_func=module_inputs_torch_nn_ReLU,
skips=None if _macos15_or_newer else (
# Fails on backward check on MPS
# See https://github.com/pytorch/pytorch/issues/107214
DecorateInfo(
unittest.expectedFailure,
'TestModule',
'test_memory_format',
active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
ModuleInfo(torch.nn.LeakyReLU,
module_inputs_func=module_inputs_torch_nn_LeakyReLU,
),
ModuleInfo(torch.nn.ReLU6,
module_inputs_func=module_inputs_torch_nn_ReLU6,
skips=(
# test fails on MPS backend and is being investigated.
# See https://github.com/pytorch/pytorch/issues/100914
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.PReLU,
module_inputs_func=module_inputs_torch_nn_PReLU,
skips=(
# test fails on MPS backend and is being investigated.
# See https://github.com/pytorch/pytorch/issues/100914
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.RNNCell,
module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU_Cell, is_rnn=True),
module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell,
),
ModuleInfo(torch.nn.GRUCell,
module_inputs_func=module_inputs_torch_nn_RNN_GRU_Cell,
module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell,
),
ModuleInfo(torch.nn.LSTMCell,
module_inputs_func=module_inputs_torch_nn_LSTMCell,
module_error_inputs_func=module_error_inputs_torch_nn_LSTMCell,
),
ModuleInfo(torch.nn.Sigmoid,
module_inputs_func=module_inputs_torch_nn_Sigmoid,
skips=None if _macos15_or_newer else (
# Fails on backward check on MPS
# See https://github.com/pytorch/pytorch/issues/107214
DecorateInfo(
unittest.expectedFailure,
'TestModule',
'test_memory_format',
active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
ModuleInfo(torch.nn.LogSigmoid,
module_inputs_func=module_inputs_torch_nn_LogSigmoid,
skips=(
# See #119108: tolerance issue
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.SiLU,
module_inputs_func=module_inputs_torch_nn_SiLU,
),
ModuleInfo(torch.nn.Softmax,
module_inputs_func=module_inputs_torch_nn_Softmax,
),
ModuleInfo(torch.nn.Softmax2d,
module_inputs_func=module_inputs_torch_nn_Softmax2d,
skips=(
# no channels last support for Softmax2d currently
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# See #119108: tolerance issue
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.LogSoftmax,
module_inputs_func=module_inputs_torch_nn_LogSoftmax,
skips=(
# no channels last support for LogSoftmax currently
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
# See #119108: inf nan error
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),)
),
ModuleInfo(torch.nn.Softmin,
module_inputs_func=module_inputs_torch_nn_Softmin,
skips=(
# no channels last support for Softmin currently
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.Softplus,
module_inputs_func=module_inputs_torch_nn_Softplus,
skips=(
# test fails on MPS backend and is being investigated.
# See https://github.com/pytorch/pytorch/issues/100914
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.Softshrink,
module_inputs_func=module_inputs_torch_nn_Softshrink,
skips=(
# not supported on MPS backend
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.Softsign,
module_inputs_func=module_inputs_torch_nn_Softsign,
),
ModuleInfo(torch.nn.Tanh,
module_inputs_func=module_inputs_torch_nn_Tanh,
skips=None if _macos15_or_newer else (
# Fails on backward check on MPS
# See https://github.com/pytorch/pytorch/issues/107214
DecorateInfo(
unittest.expectedFailure,
'TestModule',
'test_memory_format',
active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
ModuleInfo(torch.nn.Tanhshrink,
module_inputs_func=module_inputs_torch_nn_Tanhshrink,
skips=None if _macos15_or_newer else (
# Fails on backward check on MPS
# See https://github.com/pytorch/pytorch/issues/107214
DecorateInfo(
unittest.expectedFailure,
'TestModule',
'test_memory_format',
active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
ModuleInfo(torch.nn.Threshold,
module_inputs_func=module_inputs_torch_nn_Threshold,
skips=(
# test fails on MPS backend and is being investigated.
# See https://github.com/pytorch/pytorch/issues/100914
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.Mish,
module_inputs_func=module_inputs_torch_nn_Mish,
skips=(
# not supported on MPS backend
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.RNN,
train_and_eval_differ=True,
module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=True),
module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU,
decorators=rnn_gru_lstm_module_info_decorators
),
ModuleInfo(torch.nn.GRU,
train_and_eval_differ=True,
module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=False),
module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU,
decorators=rnn_gru_lstm_module_info_decorators),
ModuleInfo(torch.nn.LSTM,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_LSTM,
module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU,
skips=(
# LSTM with projections is not currently supported with MPS
DecorateInfo(skipMPS),),
decorators=rnn_gru_lstm_module_info_decorators),
ModuleInfo(torch.nn.ReflectionPad1d,
module_inputs_func=module_inputs_torch_nn_ReflectionPad1d,
),
ModuleInfo(torch.nn.ReflectionPad2d,
module_inputs_func=module_inputs_torch_nn_ReflectionPad2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format',
device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format',
device_type='mps'),)
),
ModuleInfo(torch.nn.ReflectionPad3d,
module_inputs_func=module_inputs_torch_nn_ReflectionPad3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format',
device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format',
device_type='mps'),)
),
ModuleInfo(torch.nn.ReplicationPad1d,
module_inputs_func=module_inputs_torch_nn_ReplicationPad1d,
),
ModuleInfo(torch.nn.ReplicationPad2d,
module_inputs_func=module_inputs_torch_nn_ReplicationPad2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format',
device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format',
device_type='mps'),)
),
ModuleInfo(torch.nn.ReplicationPad3d,
module_inputs_func=module_inputs_torch_nn_ReplicationPad3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format',
device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format',
device_type='mps'),)
),
ModuleInfo(torch.nn.SELU,
module_inputs_func=module_inputs_torch_nn_SELU,
skips=(
# test fails on MPS backend and is being investigated.
# See https://github.com/pytorch/pytorch/issues/100914
DecorateInfo(skipMPS),)
),
ModuleInfo(torch.nn.ZeroPad1d,
module_inputs_func=module_inputs_torch_nn_ZeroPad1d,
),
ModuleInfo(torch.nn.ZeroPad2d,
module_inputs_func=module_inputs_torch_nn_ZeroPad2d,
skips=(
# Fails with channels last test on MPS backend
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),)
),
ModuleInfo(torch.nn.ZeroPad3d,
module_inputs_func=module_inputs_torch_nn_ZeroPad3d,
skips=(
# Fails with channels last test on MPS backend
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),)
),
ModuleInfo(torch.nn.CircularPad1d,
module_inputs_func=module_inputs_torch_nn_CircularPad1d,
module_error_inputs_func=module_error_inputs_torch_nn_Pad1d,
),
ModuleInfo(torch.nn.CircularPad2d,
module_inputs_func=module_inputs_torch_nn_CircularPad2d,
module_error_inputs_func=module_error_inputs_torch_nn_Pad2d,
),
ModuleInfo(torch.nn.CircularPad3d,
module_inputs_func=module_inputs_torch_nn_CircularPad3d,
module_error_inputs_func=module_error_inputs_torch_nn_Pad3d,
skips=(
# Fails with channels last test on MPS backend
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),)
),
ModuleInfo(torch.nn.ConstantPad1d,
module_inputs_func=module_inputs_torch_nn_ConstantPad1d,
),
ModuleInfo(torch.nn.ConstantPad2d,
module_inputs_func=module_inputs_torch_nn_ConstantPad2d,
skips=(
# Fails with channels last test on MPS backend
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),)
),
ModuleInfo(torch.nn.ConstantPad3d,
module_inputs_func=module_inputs_torch_nn_ConstantPad3d,
skips=(
# Fails with channels last test on MPS backend
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),)
)
]
| ModuleInfo |
python | pyca__cryptography | src/cryptography/x509/base.py | {
"start": 4741,
"end": 8488
} | class ____:
def __init__(
self,
subject_name: Name | None = None,
extensions: list[Extension[ExtensionType]] = [],
attributes: list[tuple[ObjectIdentifier, bytes, int | None]] = [],
):
"""
Creates an empty X.509 certificate request (v1).
"""
self._subject_name = subject_name
self._extensions = extensions
self._attributes = attributes
def subject_name(self, name: Name) -> CertificateSigningRequestBuilder:
"""
Sets the certificate requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._subject_name is not None:
raise ValueError("The subject name may only be set once.")
return CertificateSigningRequestBuilder(
name, self._extensions, self._attributes
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> CertificateSigningRequestBuilder:
"""
Adds an X.509 extension to the certificate request.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateSigningRequestBuilder(
self._subject_name,
[*self._extensions, extension],
self._attributes,
)
def add_attribute(
self,
oid: ObjectIdentifier,
value: bytes,
*,
_tag: _ASN1Type | None = None,
) -> CertificateSigningRequestBuilder:
"""
Adds an X.509 attribute with an OID and associated value.
"""
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
if not isinstance(value, bytes):
raise TypeError("value must be bytes")
if _tag is not None and not isinstance(_tag, _ASN1Type):
raise TypeError("tag must be _ASN1Type")
_reject_duplicate_attribute(oid, self._attributes)
if _tag is not None:
tag = _tag.value
else:
tag = None
return CertificateSigningRequestBuilder(
self._subject_name,
self._extensions,
[*self._attributes, (oid, value, tag)],
)
def sign(
self,
private_key: CertificateIssuerPrivateKeyTypes,
algorithm: _AllowedHashTypes | None,
backend: typing.Any = None,
*,
rsa_padding: padding.PSS | padding.PKCS1v15 | None = None,
ecdsa_deterministic: bool | None = None,
) -> CertificateSigningRequest:
"""
Signs the request using the requestor's private key.
"""
if self._subject_name is None:
raise ValueError("A CertificateSigningRequest must have a subject")
if rsa_padding is not None:
if not isinstance(rsa_padding, (padding.PSS, padding.PKCS1v15)):
raise TypeError("Padding must be PSS or PKCS1v15")
if not isinstance(private_key, rsa.RSAPrivateKey):
raise TypeError("Padding is only supported for RSA keys")
if ecdsa_deterministic is not None:
if not isinstance(private_key, ec.EllipticCurvePrivateKey):
raise TypeError(
"Deterministic ECDSA is only supported for EC keys"
)
return rust_x509.create_x509_csr(
self,
private_key,
algorithm,
rsa_padding,
ecdsa_deterministic,
)
| CertificateSigningRequestBuilder |
python | tensorflow__tensorflow | tensorflow/python/profiler/pprof_profiler.py | {
"start": 1931,
"end": 3117
} | class ____(object):
"""Keeps track of strings to add to string_table in pprof proto."""
def __init__(self):
# Pprof requires first entry in string_table to be ''.
self._string_table = ['']
self._string_to_index = {'': 0}
def index_of(self, value_str):
"""Get index of value_str in the string table.
If value_str is not in the string table, we will add it at the end
and then return the new index.
Args:
value_str: (string) Value to lookup/add in/to the string table.
Returns:
Index of value_str in the string table.
"""
if value_str is None:
value_str = ''
if value_str in self._string_to_index:
return self._string_to_index[value_str]
index = len(self._string_table)
self._string_table.append(value_str)
self._string_to_index[value_str] = index
return index
def next_index(self):
"""Gets index that would be assigned to the next added string.
Returns:
Index of the next string if it was added.
"""
return len(self._string_table)
def string_table(self):
"""Returns a list of strings to store in pprof's string_table."""
return self._string_table
| StringTable |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run_v2.py | {
"start": 20608,
"end": 20719
} | class ____(BaseWorkerResult):
"""
The result of a Cloud Run worker V2 job.
"""
| CloudRunWorkerV2Result |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/ingress.py | {
"start": 434,
"end": 564
} | class ____(BaseModel):
path: str
pathType: IngressPathType
serviceName: str
servicePort: Union[str, int]
| IngressPath |
python | google__pytype | pytype/tests/test_import1.py | {
"start": 282,
"end": 378
} | class ____:
"""Fake options."""
def __init__(self):
self.open_function = open
| FakeOptions |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-typesense/llama_index/vector_stores/typesense/base.py | {
"start": 946,
"end": 9579
} | class ____(BasePydanticVectorStore):
"""
Typesense Vector Store.
In this vector store, embeddings and docs are stored within a
Typesense index.
During query time, the index uses Typesense to query for the top
k most similar nodes.
Args:
client (Any): Typesense client
tokenizer (Optional[Callable[[str], List]]): tokenizer function.
Examples:
`pip install llama-index-vector-stores-typesense`
```python
from llama_index.vector_stores.typesense import TypesenseVectorStore
from typesense import Client
# Sign up for Typesense and get your API key
typesense_client = Client(
{
"api_key": "your_api_key_here",
"nodes": [{"host": "localhost", "port": "8108", "protocol": "http"}],
"connection_timeout_seconds": 2,
}
)
# Create an instance of TypesenseVectorStore
vector_store = TypesenseVectorStore(typesense_client)
```
"""
stores_text: bool = True
is_embedding_query: bool = False
flat_metadata: bool = False
_tokenizer: Callable[[str], List] = PrivateAttr()
_text_key: str = PrivateAttr()
_collection_name: str = PrivateAttr()
_collection: Any = PrivateAttr()
_batch_size: int = PrivateAttr()
_metadata_key: str = PrivateAttr()
_client: typesense.Client = PrivateAttr()
def __init__(
self,
client: Any,
tokenizer: Optional[Callable[[str], List]] = None,
text_key: str = DEFAULT_TEXT_KEY,
collection_name: str = DEFAULT_COLLECTION_NAME,
batch_size: int = DEFAULT_BATCH_SIZE,
metadata_key: str = DEFAULT_METADATA_KEY,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__()
if client is not None:
if not isinstance(client, typesense.Client):
raise ValueError(
f"client should be an instance of typesense.Client, "
f"got {type(client)}"
)
self._client = cast(typesense.Client, client)
self._tokenizer = tokenizer or get_tokenizer()
self._text_key = text_key
self._collection_name = collection_name
self._collection = self._client.collections[self._collection_name]
self._batch_size = batch_size
self._metadata_key = metadata_key
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "TypesenseVectorStore"
@property
def client(self) -> Any:
"""Return Typesense client."""
return self._client
@property
def collection(self) -> Any:
"""Return Typesense collection."""
return self._collection
def _create_collection(self, num_dim: int) -> None:
fields = [
{"name": "vec", "type": "float[]", "num_dim": num_dim},
{"name": f"{self._text_key}", "type": "string"},
{"name": ".*", "type": "auto"},
]
self._client.collections.create(
{"name": self._collection_name, "fields": fields}
)
def _create_upsert_docs(self, nodes: List[BaseNode]) -> List[dict]:
upsert_docs = []
for node in nodes:
doc = {
"id": node.node_id,
"vec": node.get_embedding(),
f"{self._text_key}": node.get_content(metadata_mode=MetadataMode.NONE),
"ref_doc_id": node.ref_doc_id,
f"{self._metadata_key}": node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
),
}
upsert_docs.append(doc)
return upsert_docs
@staticmethod
def _to_typesense_filter(standard_filters: MetadataFilters) -> str:
"""Convert from standard dataclass to typesense filter dict."""
for filter in standard_filters.legacy_filters():
if filter.key == "filter_by":
return str(filter.value)
return ""
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
docs = self._create_upsert_docs(nodes)
try:
collection = cast(Collection, self.collection)
collection.documents.import_(
docs, {"action": "upsert"}, batch_size=self._batch_size
)
except ObjectNotFound:
# Create the collection if it doesn't already exist
num_dim = len(nodes[0].get_embedding())
self._create_collection(num_dim)
collection.documents.import_(
docs, {"action": "upsert"}, batch_size=self._batch_size
)
return [node.node_id for node in nodes]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
collection = cast(Collection, self.collection)
collection.documents.delete({"filter_by": f"ref_doc_id:={ref_doc_id}"})
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query Typesense index for top k most similar nodes.
Args:
query (VectorStoreQuery): Vector store query object.
"""
if query.filters:
typesense_filter = self._to_typesense_filter(query.filters)
else:
typesense_filter = ""
if query.mode is not VectorStoreQueryMode.TEXT_SEARCH:
if query.query_embedding:
embedded_query = [str(x) for x in query.query_embedding]
search_requests = {
"searches": [
{
"collection": self._collection_name,
"q": "*",
"vector_query": f"vec:([{','.join(embedded_query)}],"
+ f"k:{query.similarity_top_k})",
"filter_by": typesense_filter,
}
]
}
else:
raise ValueError("Vector search requires a query embedding")
if query.mode is VectorStoreQueryMode.TEXT_SEARCH:
if query.query_str:
search_requests = {
"searches": [
{
"collection": self._collection_name,
"q": query.query_str,
"query_by": self._text_key,
"filter_by": typesense_filter,
}
]
}
else:
raise ValueError("Text search requires a query string")
response = self._client.multi_search.perform(search_requests, {})
top_k_nodes = []
top_k_ids = []
top_k_scores = None
if query.mode is not VectorStoreQueryMode.TEXT_SEARCH:
top_k_scores = []
for hit in response["results"][0]["hits"]:
document = hit["document"]
id = document["id"]
text = document[self._text_key]
# Note that typesense distances range from 0 to 2, \
# where 0 is most similar and 2 is most dissimilar
if query.mode is not VectorStoreQueryMode.TEXT_SEARCH:
score = hit["vector_distance"]
try:
node = metadata_dict_to_node(document[self._metadata_key])
node.text = text
except Exception:
extra_info, node_info, relationships = legacy_metadata_dict_to_node(
document[self._metadata_key], text_key=self._text_key
)
node = TextNode(
text=text,
id_=id,
metadata=extra_info,
start_chart_idx=node_info.get("start", None),
end_chart_idx=node_info.get("end", None),
relationships=relationships,
)
top_k_ids.append(id)
top_k_nodes.append(node)
if query.mode is not VectorStoreQueryMode.TEXT_SEARCH:
top_k_scores.append(score)
return VectorStoreQueryResult(
nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids
)
| TypesenseVectorStore |
python | kamyu104__LeetCode-Solutions | Python/copy-list-with-random-pointer.py | {
"start": 150,
"end": 1106
} | class ____(object):
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
# copy and combine copied list with original list
current = head
while current:
copied = Node(current.val)
copied.next = current.next
current.next = copied
current = copied.next
# update random node in copied list
current = head
while current:
if current.random:
current.next.random = current.random.next
current = current.next.next
# split copied list from combined one
dummy = Node(0)
copied_current, current = dummy, head
while current:
copied_current.next = current.next
current.next = current.next.next
copied_current, current = copied_current.next, current.next
return dummy.next
# Time: O(n)
# Space: O(n)
| Solution |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/fsevents.py | {
"start": 1393,
"end": 5131
} | class ____(EventEmitter):
"""
Mac OS X FSEvents Emitter class.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self.snapshot = DirectorySnapshot(watch.path, watch.is_recursive)
def on_thread_stop(self):
_fsevents.remove_watch(self.watch)
_fsevents.stop(self)
def queue_events(self, timeout):
with self._lock:
if not self.watch.is_recursive\
and self.watch.path not in self.pathnames:
return
new_snapshot = DirectorySnapshot(self.watch.path,
self.watch.is_recursive)
events = new_snapshot - self.snapshot
self.snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
def run(self):
try:
def callback(pathnames, flags, emitter=self):
emitter.queue_events(emitter.timeout)
# for pathname, flag in zip(pathnames, flags):
# if emitter.watch.is_recursive: # and pathname != emitter.watch.path:
# new_sub_snapshot = DirectorySnapshot(pathname, True)
# old_sub_snapshot = self.snapshot.copy(pathname)
# diff = new_sub_snapshot - old_sub_snapshot
# self.snapshot += new_subsnapshot
# else:
# new_snapshot = DirectorySnapshot(emitter.watch.path, False)
# diff = new_snapshot - emitter.snapshot
# emitter.snapshot = new_snapshot
# INFO: FSEvents reports directory notifications recursively
# by default, so we do not need to add subdirectory paths.
#pathnames = set([self.watch.path])
# if self.watch.is_recursive:
# for root, directory_names, _ in os.walk(self.watch.path):
# for directory_name in directory_names:
# full_path = absolute_path(
# os.path.join(root, directory_name))
# pathnames.add(full_path)
self.pathnames = [self.watch.path]
_fsevents.add_watch(self,
self.watch,
callback,
self.pathnames)
_fsevents.read_events(self)
except:
pass
| FSEventsEmitter |
python | tox-dev__tox | src/tox/report.py | {
"start": 8347,
"end": 8447
} | class ____(RuntimeError):
"""Error that has been handled so no need for stack trace."""
| HandledError |
python | rq__rq | rq/cron.py | {
"start": 5461,
"end": 21740
} | class ____:
"""Simple interval-based job scheduler for RQ"""
def __init__(
self,
connection: Redis,
logging_level: Union[str, int] = logging.INFO,
name: str = '',
):
self.connection: Redis = connection
self._cron_jobs: List[CronJob] = []
self.hostname: str = socket.gethostname()
self.pid: int = os.getpid()
self.name: str = name or f'{self.hostname}:{self.pid}:{uuid.uuid4().hex[:6]}'
self.config_file: str = ''
self.created_at: datetime = now()
self.serializer = resolve_serializer()
self.log: logging.Logger = logging.getLogger(__name__)
if not self.log.hasHandlers():
setup_loghandlers(
level=logging_level,
name=__name__,
log_format=DEFAULT_LOGGING_FORMAT,
date_format=DEFAULT_LOGGING_DATE_FORMAT,
)
self.log.propagate = False
def __eq__(self, other) -> bool:
"""Equality does not take the database/connection into account"""
if not isinstance(other, self.__class__):
return False
return self.name == other.name
def __hash__(self) -> int:
"""The hash does not take the database/connection into account"""
return hash(self.name)
def register(
self,
func: Callable,
queue_name: str,
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
interval: Optional[int] = None,
cron: Optional[str] = None,
job_timeout: Optional[int] = None,
result_ttl: int = 500,
ttl: Optional[int] = None,
failure_ttl: Optional[int] = None,
meta: Optional[dict] = None,
) -> CronJob:
"""Register a function to be run at regular intervals"""
cron_job = CronJob(
queue_name=queue_name,
func=func,
args=args,
kwargs=kwargs,
interval=interval,
cron=cron,
job_timeout=job_timeout,
result_ttl=result_ttl,
ttl=ttl,
failure_ttl=failure_ttl,
meta=meta,
)
self._cron_jobs.append(cron_job)
job_key = f'{func.__module__}.{func.__name__}'
if interval:
self.log.info(f"Registered '{job_key}' to run on {queue_name} every {interval} seconds")
elif cron:
self.log.info(f"Registered '{job_key}' to run on {queue_name} with cron schedule '{cron}'")
return cron_job
def get_jobs(self) -> List[CronJob]:
"""Get all registered cron jobs"""
return self._cron_jobs
def enqueue_jobs(self) -> List[CronJob]:
"""Enqueue all jobs that are due to run"""
enqueue_time = now()
enqueued_jobs: List[CronJob] = []
for job in self._cron_jobs:
if job.should_run():
job.enqueue(self.connection)
job.set_run_time(enqueue_time)
enqueued_jobs.append(job)
return enqueued_jobs
def calculate_sleep_interval(self) -> float:
"""Calculate how long to sleep until the next job is due.
Returns the number of seconds to sleep, with a maximum of 60 seconds
to ensure we check regularly.
"""
current_time = now()
# Find the next job to run
next_job_times = [job.next_run_time for job in self._cron_jobs if job.next_run_time]
if not next_job_times:
return 60 # Default sleep time of 60 seconds
# Find the closest job by next_run_time
closest_time = min(next_job_times)
# Calculate seconds until next job
seconds_until_next = (closest_time - current_time).total_seconds()
# If negative or zero, the job is overdue, so run immediately
if seconds_until_next <= 0:
return 0
# Cap maximum sleep time at 60 seconds
return min(seconds_until_next, 60)
def _install_signal_handlers(self):
"""Install signal handlers for graceful shutdown."""
signal.signal(signal.SIGINT, self._request_stop)
signal.signal(signal.SIGTERM, self._request_stop)
def _request_stop(self, signum, frame):
"""Handle shutdown signals gracefully."""
self.log.info('CronScheduler %s: received shutdown signal %s', self.name, signum)
raise StopRequested()
def start(self):
"""Start the cron scheduler"""
self.log.info('CronScheduler %s: starting...', self.name)
# Register birth and install signal handlers
self._install_signal_handlers()
self.register_birth()
try:
while True:
self.enqueue_jobs()
self.heartbeat()
sleep_time = self.calculate_sleep_interval()
if sleep_time > 0:
self.log.debug(f'Sleeping for {sleep_time} seconds...')
time.sleep(sleep_time)
except KeyboardInterrupt:
self.log.info('CronScheduler %s: received KeyboardInterrupt', self.name)
except StopRequested:
self.log.info('CronScheduler %s: stop requested', self.name)
finally:
# Register death before shutting down
self.register_death()
self.log.info('CronScheduler %s: shutdown complete', self.name)
def load_config_from_file(self, config_path: str):
"""
Dynamically load a cron config file and register all jobs with this Cron instance.
Supports both dotted import paths (e.g. 'app.cron_config') and file paths
(e.g. '/path/to/app/cron_config.py', 'app/cron_config.py'). The .py
extension is recommended for file paths for clarity.
Jobs defined in the config file must use the global `rq.cron.register` function.
Args:
config_path: Path to the cron_config.py file or module path.
"""
self.config_file = config_path
self.log.info(f'Loading cron configuration from {config_path}')
global _job_data_registry
_job_data_registry = [] # Clear global registry before loading module
if os.path.isabs(config_path):
# Absolute paths must be loaded by file path (cannot be converted to valid module paths)
self.log.debug(f'Loading absolute file path: {config_path}')
# Validate the file path
validate_absolute_path(config_path)
# Load the file as a module
module_name = f'rq_cron_config_{os.path.basename(config_path).replace(".", "_")}'
try:
spec = importlib.util.spec_from_file_location(module_name, config_path)
if spec is None or spec.loader is None:
error_msg = f'Could not create module spec for {config_path}'
self.log.error(error_msg)
raise ImportError(error_msg)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
self.log.debug(f'Successfully loaded config from file: {config_path}')
except Exception as e:
if module_name in sys.modules:
del sys.modules[module_name]
error_msg = f"Failed to load configuration file '{config_path}': {e}"
self.log.error(error_msg)
raise ImportError(error_msg) from e
else:
# Relative paths and dotted paths - normalize to dotted module format
normalized_path = normalize_config_path(config_path)
self.log.debug(f'Normalized path: {normalized_path}')
# Import the module using the normalized dotted path
try:
if normalized_path in sys.modules:
importlib.reload(sys.modules[normalized_path])
else:
importlib.import_module(normalized_path)
self.log.debug(f'Successfully loaded config from module: {normalized_path}')
except ImportError as e:
error_msg = f"Failed to import configuration module '{normalized_path}' (from '{config_path}'): {e}"
self.log.error(error_msg)
raise ImportError(error_msg) from e
except Exception as e:
error_msg = f"An error occurred while importing '{normalized_path}' (from '{config_path}'): {e}"
self.log.error(error_msg)
raise Exception(error_msg) from e
# Now that the module has been loaded (which populated _job_data_registry
# via the global `register` function), register the jobs with *this* instance.
job_count = 0
for data in _job_data_registry:
self.log.debug(f'Registering job from config: {data["func"].__name__}')
try:
self.register(**data) # Calls the instance's register method
job_count += 1
except Exception as e:
self.log.error(f'Failed to register job {data["func"].__name__} from config: {e}', exc_info=True)
# Decide if loading should fail entirely or just skip the job
# For now, log the error and continue
# Clear the global registry after we're done
_job_data_registry = [] # type: ignore
self.log.info(f"Successfully registered {job_count} cron jobs from '{config_path}'")
# Method modifies the instance, no need to return self unless chaining is desired
@property
def key(self) -> str:
"""Redis key for this CronScheduler instance"""
return f'rq:cron_scheduler:{self.name}'
def to_dict(self) -> Dict:
"""Convert CronScheduler instance to a dictionary for Redis storage"""
obj = {
'hostname': self.hostname,
'pid': str(self.pid),
'name': self.name,
'created_at': utcformat(self.created_at),
'config_file': self.config_file or '',
}
return obj
def save(self, pipeline: Optional[Pipeline] = None) -> None:
"""Save CronScheduler instance to Redis hash with TTL"""
connection = pipeline if pipeline is not None else self.connection
connection.hset(self.key, mapping=self.to_dict())
connection.expire(self.key, 60)
def restore(self, raw_data: Dict) -> None:
"""Restore CronScheduler instance from Redis hash data"""
obj = decode_redis_hash(raw_data, decode_values=True)
self.hostname = obj['hostname']
self.pid = int(obj.get('pid', 0))
self.name = obj['name']
self.created_at = str_to_date(obj['created_at'])
self.config_file = obj['config_file']
@classmethod
def fetch(cls, name: str, connection: Redis) -> 'CronScheduler':
"""Fetch a CronScheduler instance from Redis by name"""
key = f'rq:cron_scheduler:{name}'
raw_data = connection.hgetall(key)
if not raw_data:
raise SchedulerNotFound(f"CronScheduler with name '{name}' not found")
scheduler = cls(connection=connection, name=name)
scheduler.restore(raw_data)
return scheduler
@classmethod
def all(cls, connection: Redis, cleanup: bool = True) -> List['CronScheduler']:
"""Returns all CronScheduler instances from the registry
Args:
connection: Redis connection to use
cleanup: If True, removes stale entries from registry before fetching schedulers
Returns:
List of CronScheduler instances
"""
from contextlib import suppress
if cleanup:
cron_scheduler_registry.cleanup(connection)
scheduler_names = cron_scheduler_registry.get_keys(connection)
schedulers = []
for name in scheduler_names:
with suppress(SchedulerNotFound):
scheduler = cls.fetch(name, connection)
schedulers.append(scheduler)
return schedulers
def register_birth(self) -> None:
"""Register this scheduler's birth in the scheduler registry and save data to Redis hash"""
self.log.info(f'CronScheduler {self.name}: registering birth...')
with self.connection.pipeline() as pipeline:
cron_scheduler_registry.register(self, pipeline)
self.save(pipeline)
pipeline.execute()
def register_death(self, pipeline: Optional[Pipeline] = None) -> None:
"""Register this scheduler's death by removing it from the scheduler registry"""
self.log.info(f'CronScheduler {self.name}: registering death...')
cron_scheduler_registry.unregister(self, pipeline)
def heartbeat(self) -> None:
"""Send a heartbeat to update this scheduler's last seen timestamp in the registry
and extend the scheduler's Redis hash TTL.
"""
with self.connection.pipeline() as pipe:
pipe.zadd(cron_scheduler_registry.get_registry_key(), {self.name: time.time()}, xx=True, ch=True)
pipe.expire(self.key, 120)
results = pipe.execute()
# Check zadd result (first command in pipeline)
zadd_result = results[0]
if zadd_result:
self.log.debug(f'CronScheduler {self.name}: heartbeat sent successfully')
else:
self.log.warning(f'CronScheduler {self.name}: heartbeat failed - scheduler not found in registry')
@property
def last_heartbeat(self) -> Optional[datetime]:
"""Return the UTC datetime of the last heartbeat, or None if no heartbeat recorded
Returns:
datetime: UTC datetime of the last heartbeat, or None if scheduler not found in registry
"""
score = self.connection.zscore(cron_scheduler_registry.get_registry_key(), self.name)
if score is None:
return None
# Convert Unix timestamp to UTC datetime
return datetime.fromtimestamp(score, tz=timezone.utc)
# Global registry to store job data before Cron instance is created
_job_data_registry: List[Dict] = []
def register(
func: Callable,
queue_name: str,
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
interval: Optional[int] = None,
cron: Optional[str] = None,
job_timeout: Optional[int] = None,
result_ttl: int = 500,
ttl: Optional[int] = None,
failure_ttl: Optional[int] = None,
meta: Optional[dict] = None,
) -> Dict:
"""
Register a function to be run as a cron job by adding its definition
to a temporary global registry.
This function should typically be called from within a cron configuration file
that will be loaded using `CronScheduler.load_config_from_file()`.
Example (in your cron_config.py):
from rq import cron
from my_app.tasks import my_func
cron.register(my_func, 'default', interval=60) # Run every 60 seconds
Returns:
dict: The job data dictionary added to the registry.
"""
# Store the job data in the global registry
job_data = {
'func': func,
'queue_name': queue_name,
'args': args,
'kwargs': kwargs,
'interval': interval,
'cron': cron,
'job_timeout': job_timeout,
'result_ttl': result_ttl,
'ttl': ttl,
'failure_ttl': failure_ttl,
'meta': meta,
}
# Add to the global registry
_job_data_registry.append(job_data)
# Log the registration attempt (optional)
logger = logging.getLogger(__name__)
job_key = f'{func.__module__}.{func.__name__}'
logger.debug(f"Cron config: Adding job '{job_key}' to registry for queue {queue_name}")
return job_data
def create_cron(connection: Redis) -> CronScheduler:
"""Create a CronScheduler instance with all registered jobs"""
cron_instance = CronScheduler(connection=connection)
# Register all previously registered jobs with the CronScheduler instance
for data in _job_data_registry:
logging.debug(f'Registering job: {data["func"].__name__}')
cron_instance.register(**data)
return cron_instance
| CronScheduler |
python | ray-project__ray | python/ray/tests/test_namespace.py | {
"start": 518,
"end": 3566
} | class ____:
def ping(self):
return "pong from other job"
actor = DetachedActor.options(name="Pinger", lifetime="detached").remote()
ray.get(actor.ping.remote())
"""
# Start a detached actor in a different namespace.
run_string_as_driver(driver_template.format(address, "different"))
@ray.remote
class Actor:
def ping(self):
return "pong"
# Create an actor. This should succeed because the other actor is in a
# different namespace.
probe = Actor.options(name="Pinger").remote()
assert ray.get(probe.ping.remote()) == "pong"
del probe
# Wait for actor removal
actor_removed = False
for _ in range(50): # Timeout after 5s
try:
ray.get_actor("Pinger")
except ValueError:
actor_removed = True
# This means the actor was removed.
break
else:
time.sleep(0.1)
assert actor_removed, "This is an anti-flakey test measure"
with pytest.raises(ValueError, match="Failed to look up actor with name"):
ray.get_actor("Pinger")
# Now make the actor in this namespace, from a different job.
run_string_as_driver(driver_template.format(address, "namespace"))
detached_actor = ray.get_actor("Pinger")
assert ray.get(detached_actor.ping.remote()) == "pong from other job"
with pytest.raises(ValueError, match="The name .* is already taken"):
Actor.options(name="Pinger", lifetime="detached").remote()
def test_placement_groups(shutdown_only):
info = ray.init(namespace="namespace")
address = info["address"]
# First param of template is the namespace. Second is the redis address.
driver_template = """
import ray
ray.init(address="{}", namespace="{}")
pg = ray.util.placement_group(bundles=[dict(CPU=1)], name="hello",
lifetime="detached")
ray.get(pg.ready())
"""
# Start a detached placement group in a different namespace.
run_string_as_driver(driver_template.format(address, "different"))
# Create an actor. This should succeed because the other actor is in a
# different namespace.
probe = ray.util.placement_group(bundles=[{"CPU": 1}], name="hello")
ray.get(probe.ready())
ray.util.remove_placement_group(probe)
removed = False
for _ in range(50): # Timeout after 5s
try:
ray.util.get_placement_group("hello")
except ValueError:
removed = True
# This means the actor was removed.
break
else:
time.sleep(0.1)
assert removed, "This is an anti-flakey test measure"
# Now make the actor in this namespace, from a different job.
run_string_as_driver(driver_template.format(address, "namespace"))
def test_default_namespace(shutdown_only):
info = ray.init(namespace="namespace")
address = info["address"]
# First param of template is the namespace. Second is the redis address.
driver_template = """
import ray
ray.init(address="{}")
@ray.remote
| DetachedActor |
python | ApeWorX__ape | src/ape/api/networks.py | {
"start": 49493,
"end": 51933
} | class ____(NetworkAPI):
@property
def upstream_network(self) -> NetworkAPI:
"""
The network being forked.
"""
network_name = self.name.replace("-fork", "").replace("_fork", "")
return self.ecosystem.get_network(network_name)
@property
def upstream_provider(self) -> "UpstreamProvider":
"""
The provider used when requesting data before the local fork.
Set this in your config under the network settings.
When not set, will attempt to use the default provider, if one
exists.
"""
config_choice: str = self.config.get("upstream_provider")
if provider_name := config_choice or self.upstream_network.default_provider_name:
return self.upstream_network.get_provider(provider_name)
raise NetworkError(f"Upstream network '{self.upstream_network}' has no providers.")
@property
def upstream_chain_id(self) -> int:
"""
The chain Id of the upstream network.
For example, when on ``mainnet-fork``, this should always
return the chain ID for ``mainnet``. Some providers may use
a different chain ID for forked networks while some do not.
This property should ALWAYS be that of the forked network, regardless.
"""
return self.upstream_network.chain_id
def use_upstream_provider(self) -> ProviderContextManager:
"""
Connect to the upstream provider.
Returns:
:class:`~ape.api.networks.ProviderContextManager`
"""
return self.upstream_network.use_provider(self.upstream_provider)
def create_network_type(chain_id: int, network_id: int, is_fork: bool = False) -> type[NetworkAPI]:
"""
Easily create a :class:`~ape.api.networks.NetworkAPI` subclass.
"""
BaseNetwork = ForkedNetworkAPI if is_fork else NetworkAPI
class network_def(BaseNetwork): # type: ignore
@property
def chain_id(self) -> int:
return chain_id
@property
def network_id(self) -> int:
return network_id
return network_def
# TODO: Can remove in 0.9 since `LOCAL_NETWORK_NAME` doesn't need to be here.
__all__ = [
"LOCAL_NETWORK_NAME", # Have to leave for backwards compat.
"EcosystemAPI",
"ForkedNetworkAPI",
"NetworkAPI",
"ProviderContextManager",
"ProxyInfoAPI",
"create_network_type",
]
| ForkedNetworkAPI |
python | ray-project__ray | python/ray/serve/tests/unit/test_deployment_state.py | {
"start": 191095,
"end": 205833
} | class ____:
"""End-to-end integration tests for rank functionality through deployment state manager."""
def _set_replicas_ready(
self, ds: DeploymentState, replica_states: List[ReplicaState]
):
"""Helper to set replicas in given states to ready."""
for replica in ds._replicas.get(replica_states):
replica._actor.set_ready()
def _set_replicas_done_stopping(self, ds: DeploymentState):
"""Helper to set stopping replicas as done stopping."""
for replica in ds._replicas.get([ReplicaState.STOPPING]):
replica._actor.set_done_stopping()
def test_scaling_up_and_down_scenario(self, mock_deployment_state_manager):
"""Test a realistic scaling scenario through deployment state manager."""
create_dsm, _, _, _ = mock_deployment_state_manager
dsm: DeploymentStateManager = create_dsm()
# Start with 3 replicas
info_1, v1 = deployment_info(num_replicas=3, version="1")
dsm.deploy(TEST_DEPLOYMENT_ID, info_1)
ds: DeploymentState = dsm._deployment_states[TEST_DEPLOYMENT_ID]
# Create initial replicas
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.STARTING, 3, v1)])
# Set replicas ready
self._set_replicas_ready(ds, [ReplicaState.STARTING])
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.RUNNING, 3, v1)])
assert ds.curr_status_info.status == DeploymentStatus.HEALTHY
# Check initial ranks are 0, 1, 2
ranks_mapping = ds._get_replica_ranks_mapping()
ranks = sorted(ranks_mapping.values())
assert ranks == [0, 1, 2], f"Expected ranks [0, 1, 2], got {ranks}"
# Scale down to 2 replicas - this should trigger rank reassignment
info_2, _ = deployment_info(num_replicas=2, version="1")
dsm.deploy(TEST_DEPLOYMENT_ID, info_2)
dsm.update()
# One replica should be stopping
check_counts(
ds,
total=3,
by_state=[(ReplicaState.RUNNING, 2, v1), (ReplicaState.STOPPING, 1, v1)],
)
# Complete the scale down
self._set_replicas_done_stopping(ds)
dsm.update()
check_counts(ds, total=2, by_state=[(ReplicaState.RUNNING, 2, v1)])
assert ds.curr_status_info.status == DeploymentStatus.HEALTHY
# Trigger rank consistency check with one more update
dsm.update()
# After scaling down and reaching healthy status, ranks should be contiguous [0, 1]
ranks_mapping = ds._get_replica_ranks_mapping()
ranks = sorted(ranks_mapping.values())
assert ranks == [0, 1], f"Expected ranks [0, 1] after scale down, got {ranks}"
# Scale back up to 3 replicas - new replica should reuse available rank
info_3, _ = deployment_info(num_replicas=3, version="1")
dsm.deploy(TEST_DEPLOYMENT_ID, info_3)
dsm.update()
# Should have one new starting replica
check_counts(
ds,
total=3,
by_state=[(ReplicaState.RUNNING, 2, v1), (ReplicaState.STARTING, 1, v1)],
)
# Set new replica ready
self._set_replicas_ready(ds, [ReplicaState.STARTING])
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.RUNNING, 3, v1)])
assert ds.curr_status_info.status == DeploymentStatus.HEALTHY
# Trigger rank consistency check with one more update
dsm.update()
# Final ranks should be contiguous [0, 1, 2]
ranks_mapping = ds._get_replica_ranks_mapping()
ranks = sorted(ranks_mapping.values())
assert ranks == [0, 1, 2], f"Expected final ranks [0, 1, 2], got {ranks}"
def test_controller_recovery_with_scattered_ranks(
self, mock_deployment_state_manager
):
"""Test controller recovery with existing replica ranks through deployment state manager."""
create_dsm, _, _, _ = mock_deployment_state_manager
dsm: DeploymentStateManager = create_dsm()
# Deploy with 3 replicas
info_1, v1 = deployment_info(num_replicas=3, version="1")
target_state_changed = dsm.deploy(TEST_DEPLOYMENT_ID, info_1)
assert target_state_changed
dsm.save_checkpoint()
ds: DeploymentState = dsm._deployment_states[TEST_DEPLOYMENT_ID]
# Create replicas and get them running
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.STARTING, 3, v1)])
self._set_replicas_ready(ds, [ReplicaState.STARTING])
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.RUNNING, 3, v1)])
# Get the actual replica objects (not just IDs)
replicas = ds._replicas.get([ReplicaState.RUNNING])
replica_ids = [replica.replica_id for replica in replicas]
# Simulate controller crashed! Create a new deployment state manager
# with the existing replica IDs to trigger recovery
new_dsm: DeploymentStateManager = create_dsm(
[replica_id.to_full_id_str() for replica_id in replica_ids]
)
# New deployment state should be created and replicas should be RECOVERING
new_ds = new_dsm._deployment_states[TEST_DEPLOYMENT_ID]
check_counts(new_ds, total=3, by_state=[(ReplicaState.RECOVERING, 3, v1)])
# Complete recovery - set replicas ready
self._set_replicas_ready(new_ds, [ReplicaState.RECOVERING])
new_dsm.update()
check_counts(new_ds, total=3, by_state=[(ReplicaState.RUNNING, 3, v1)])
assert new_ds.curr_status_info.status == DeploymentStatus.HEALTHY
# At this point ranks should be scattered but all values [0, 1, 2] should be present
ranks_mapping = new_ds._get_replica_ranks_mapping()
ranks = sorted(ranks_mapping.values())
assert ranks == [0, 1, 2], "Should have recovered scattered ranks"
# Trigger rank consistency check with one more update - this should reorder if needed
new_dsm.update()
# After rank consistency check, ranks should still be [0, 1, 2]
final_ranks_mapping = new_ds._get_replica_ranks_mapping()
final_ranks = sorted(final_ranks_mapping.values())
assert final_ranks == [
0,
1,
2,
], f"Expected contiguous ranks [0, 1, 2] after consistency check, got {final_ranks}"
# Clean up
replica_rank_context.clear()
def test_complex_reassignment_scenario(self, mock_deployment_state_manager):
"""Test complex reassignment with many gaps through deployment state manager."""
create_dsm, _, _, _ = mock_deployment_state_manager
dsm: DeploymentStateManager = create_dsm()
# Deploy with 4 replicas
info_1, v1 = deployment_info(num_replicas=4, version="1")
target_state_changed = dsm.deploy(TEST_DEPLOYMENT_ID, info_1)
assert target_state_changed
dsm.save_checkpoint()
ds: DeploymentState = dsm._deployment_states[TEST_DEPLOYMENT_ID]
# Create replicas and get them running
dsm.update()
check_counts(ds, total=4, by_state=[(ReplicaState.STARTING, 4, v1)])
self._set_replicas_ready(ds, [ReplicaState.STARTING])
dsm.update()
check_counts(ds, total=4, by_state=[(ReplicaState.RUNNING, 4, v1)])
# Get the actual replica objects
replicas = ds._replicas.get([ReplicaState.RUNNING])
replica_ids = [replica.replica_id for replica in replicas]
# Simulate very scattered ranks in global context: 0, 3, 7, 10
global replica_rank_context
replica_rank_context.clear()
replica_rank_context[replica_ids[0].unique_id] = ReplicaRank(
rank=0, node_rank=-1, local_rank=-1
)
replica_rank_context[replica_ids[1].unique_id] = ReplicaRank(
rank=3, node_rank=-1, local_rank=-1
)
replica_rank_context[replica_ids[2].unique_id] = ReplicaRank(
rank=7, node_rank=-1, local_rank=-1
)
replica_rank_context[replica_ids[3].unique_id] = ReplicaRank(
rank=10, node_rank=-1, local_rank=-1
)
# Simulate controller crashed! Create a new deployment state manager
# with the existing replica IDs to trigger recovery
new_dsm: DeploymentStateManager = create_dsm(
[replica_id.to_full_id_str() for replica_id in replica_ids]
)
# New deployment state should be created and replicas should be RECOVERING
new_ds = new_dsm._deployment_states[TEST_DEPLOYMENT_ID]
check_counts(new_ds, total=4, by_state=[(ReplicaState.RECOVERING, 4, v1)])
# Complete recovery - set replicas ready
self._set_replicas_ready(new_ds, [ReplicaState.RECOVERING])
new_dsm.update()
check_counts(new_ds, total=4, by_state=[(ReplicaState.RUNNING, 4, v1)])
assert new_ds.curr_status_info.status == DeploymentStatus.HEALTHY
# Trigger rank consistency check with one more update
new_dsm.update()
# After reassignment, ranks should be contiguous [0, 1, 2, 3]
ranks_mapping = new_ds._get_replica_ranks_mapping()
ranks = sorted(ranks_mapping.values())
assert ranks == [
0,
1,
2,
3,
], f"Expected reassigned ranks [0, 1, 2, 3], got {ranks}"
def test_rank_consistency_during_version_rollout(
self, mock_deployment_state_manager
):
"""Test that rank consistency is maintained during version rollouts."""
create_dsm, _, _, _ = mock_deployment_state_manager
dsm: DeploymentStateManager = create_dsm()
# Start with 3 replicas of version 1
info_1, v1 = deployment_info(num_replicas=3, version="1")
dsm.deploy(TEST_DEPLOYMENT_ID, info_1)
ds: DeploymentState = dsm._deployment_states[TEST_DEPLOYMENT_ID]
# Create and ready initial replicas
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.STARTING, 3, v1)])
self._set_replicas_ready(ds, [ReplicaState.STARTING])
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.RUNNING, 3, v1)])
assert ds.curr_status_info.status == DeploymentStatus.HEALTHY
# Verify initial ranks are contiguous
ranks_mapping = ds._get_replica_ranks_mapping()
initial_ranks = sorted(ranks_mapping.values())
assert initial_ranks == [0, 1, 2]
# Deploy version 2 - this should trigger rolling update
info_2, v2 = deployment_info(num_replicas=3, version="2")
dsm.deploy(TEST_DEPLOYMENT_ID, info_2)
dsm.update()
# Complete the rolling update step by step
while True:
# Set any new starting replicas ready
starting_replicas = ds._replicas.get([ReplicaState.STARTING])
if starting_replicas:
self._set_replicas_ready(ds, [ReplicaState.STARTING])
# Complete any stopping replicas
stopping_replicas = ds._replicas.get([ReplicaState.STOPPING])
if stopping_replicas:
self._set_replicas_done_stopping(ds)
dsm.update()
# Check if rolling update is complete
running_replicas = ds._replicas.get([ReplicaState.RUNNING])
if len(running_replicas) == 3 and all(
r.version == v2 for r in running_replicas
):
break
# After rolling update is complete, deployment should be healthy
assert ds.curr_status_info.status == DeploymentStatus.HEALTHY
# Trigger rank consistency check with one more update
dsm.update()
# After rolling update, verify ranks are still contiguous
final_ranks_mapping = ds._get_replica_ranks_mapping()
final_ranks = sorted(final_ranks_mapping.values())
assert final_ranks == [
0,
1,
2,
], f"Expected contiguous ranks [0, 1, 2] after rollout, got {final_ranks}"
def test_rank_assignment_with_replica_failures(self, mock_deployment_state_manager):
"""Test rank handling when replicas fail during startup."""
create_dsm, _, _, _ = mock_deployment_state_manager
dsm: DeploymentStateManager = create_dsm()
# Deploy with 3 replicas
info_1, v1 = deployment_info(num_replicas=3, version="1")
dsm.deploy(TEST_DEPLOYMENT_ID, info_1)
ds: DeploymentState = dsm._deployment_states[TEST_DEPLOYMENT_ID]
# Create initial replicas
dsm.update()
check_counts(ds, total=3, by_state=[(ReplicaState.STARTING, 3, v1)])
# Make first two replicas ready, but let the third fail
starting_replicas = ds._replicas.get([ReplicaState.STARTING])
starting_replicas[0]._actor.set_ready()
starting_replicas[1]._actor.set_ready()
starting_replicas[2]._actor.set_failed_to_start()
dsm.update()
running_count = ds._replicas.count(states=[ReplicaState.RUNNING])
stopping_count = ds._replicas.count(states=[ReplicaState.STOPPING])
assert running_count == 2, "Should have 2 running replicas"
assert stopping_count == 1, "Should have 1 stopping replica"
self._set_replicas_done_stopping(ds)
dsm.update()
starting_count = ds._replicas.count(states=[ReplicaState.STARTING])
assert starting_count == 1, "Should have 1 starting replica"
self._set_replicas_ready(ds, [ReplicaState.STARTING])
dsm.update()
# second update to reassign ranks
dsm.update()
# Final verification - should have 3 running replicas (ignore failed/stopping replicas)
running_replicas = ds._replicas.get([ReplicaState.RUNNING])
assert (
len(running_replicas) == 3
), f"Expected 3 running replicas, got {len(running_replicas)}"
# Verify that ranks are properly assigned and unique for running replicas
ranks_mapping = ds._get_replica_ranks_mapping()
# Filter ranks to only include those for running replicas
running_replica_ids = [
replica.replica_id.unique_id for replica in running_replicas
]
running_replica_ranks = [
ranks_mapping[replica_id]
for replica_id in running_replica_ids
if replica_id in ranks_mapping
]
# The ranks should be assigned to all running replicas
assert set(running_replica_ranks) == {
0,
1,
2,
}, f"Expected ranks [0, 1, 2], got {ranks_mapping.values()}"
| TestDeploymentRankManagerIntegrationE2E |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/d.py | {
"start": 991,
"end": 1043
} | class ____(dprogram):
inst_to = '${LIBDIR}'
| dshlib |
python | spyder-ide__spyder | spyder/plugins/run/widgets.py | {
"start": 2070,
"end": 2232
} | class ____:
Close = 0
Save = 1
Run = 2
# ---- Base class
# -----------------------------------------------------------------------------
| RunDialogStatus |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/tests/test_github_app_auth.py | {
"start": 15181,
"end": 16143
} | class ____:
"""Test GitHubIssuesClient with GitHub App authentication."""
def test_init_with_github_app(self):
"""Test initialization with GitHub App auth."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
client = GitHubIssuesClient(github_app_auth=app_auth)
assert client._github_app_auth is app_auth
assert client._use_github_app
def test_init_with_both_raises_error(self):
"""Test that providing both PAT and GitHub App auth raises error."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
with pytest.raises(ValueError, match="Cannot provide both"):
GitHubIssuesClient(github_token="ghp_token", github_app_auth=app_auth)
@pytest.mark.skipif(not HAS_GITHUB_APP_AUTH, reason="GitHub App auth not available")
| TestIssuesClientWithAppAuth |
python | astropy__astropy | astropy/units/format/base.py | {
"start": 623,
"end": 7628
} | class ____:
"""
The abstract base class of all unit formats.
"""
registry: ClassVar[dict[str, type["Base"]]] = {}
_space: ClassVar[str] = " "
_scale_unit_separator: ClassVar[str] = " "
_times: ClassVar[str] = "*"
name: ClassVar[str] # Set by __init_subclass__ by the latest
def __new__(cls, *args, **kwargs):
# This __new__ is to make it clear that there is no reason to
# instantiate a Formatter--if you try to you'll just get back the
# class
return cls
def __init_subclass__(cls, **kwargs):
# Keep a registry of all formats. Key by the class name unless a name
# is explicitly set (i.e., one *not* inherited from a superclass).
if "name" not in cls.__dict__:
cls.name = cls.__name__.lower()
Base.registry[cls.name] = cls
super().__init_subclass__(**kwargs)
@classmethod
def format_exponential_notation(
cls, val: UnitScale | np.number, format_spec: str = ".8g"
) -> str:
"""
Formats a value in exponential notation.
Parameters
----------
val : number
The value to be formatted
format_spec : str, optional
Format used to split up mantissa and exponent
Returns
-------
str
The value in exponential notation in a this class's format.
"""
x = format(val, format_spec).split("e")
if len(x) != 2:
return cls._format_mantissa(x[0]) # no exponent
ex = x[1].lstrip("0+")
if not ex:
return cls._format_mantissa(x[0]) # exponent was zero
if ex.startswith("-"):
ex = "-" + ex[1:].lstrip("0")
ex = f"10{cls._format_superscript(ex)}"
m = cls._format_mantissa("" if x[0].rstrip("0") == "1." else x[0])
return f"{m}{cls._times}{ex}" if m else ex
@classmethod
def _format_mantissa(cls, m: str) -> str:
return m
@classmethod
def _format_superscript(cls, number: str) -> str:
return f"({number})" if "/" in number or "." in number else number
@classmethod
def _format_unit_power(cls, unit: NamedUnit, power: UnitPower = 1) -> str:
"""Format the unit for this format class raised to the given power.
This is overridden in Latex where the name of the unit can depend on the power
(e.g., for degrees).
"""
name = unit._get_format_name(cls.name)
return name if power == 1 else name + cls._format_power(power)
@classmethod
def _format_power(cls, power: UnitPower) -> str:
# If the denominator of `power` is a power of 2 then `power` is stored
# as a `float` (see `units.utils.sanitize_power()`), but we still want
# to display it as a fraction.
return cls._format_superscript(
str(maybe_simple_fraction(power) if isinstance(power, float) else power)
)
@classmethod
def _format_unit_list(cls, units: Iterable[tuple[NamedUnit, UnitPower]]) -> str:
return cls._space.join(
cls._format_unit_power(base_, power) for base_, power in units
)
@classmethod
def _format_inline_fraction(
cls, scale: str, numerator: str, denominator: str
) -> str:
if cls._space in denominator:
denominator = f"({denominator})"
if scale and numerator == "1":
return f"{scale}/ {denominator}"
return f"{scale}{numerator} / {denominator}"
@classmethod
def _format_multiline_fraction(
cls, scale: str, numerator: str, denominator: str
) -> str:
# By default, we just warn that we do not have a multiline format.
warnings.warn(
f"{cls.name!r} format does not support multiline "
"fractions; using inline instead.",
UnitsWarning,
)
return cls._format_inline_fraction(scale, numerator, denominator)
@classmethod
def to_string(
cls,
unit: UnitBase,
*,
deprecations: DeprecatedUnitAction = DeprecatedUnitAction.WARN,
fraction: bool | Literal["inline", "multiline"] = True,
) -> str:
"""Convert a unit to its string representation.
Implementation for `~astropy.units.UnitBase.to_string`.
Parameters
----------
unit : |Unit|
The unit to convert.
deprecations : {"warn", "silent", "raise", "convert"}, optional, keyword-only
Whether deprecated units should emit a warning, be handled
silently or raise an error. The "convert" option replaces
the deprecated unit if possible and emits a warning otherwise.
fraction : {False|True|'inline'|'multiline'}, optional
Options are as follows:
- `False` : display unit bases with negative powers as they are
(e.g., ``km s-1``);
- 'inline' or `True` : use a single-line fraction (e.g., ``km / s``);
- 'multiline' : use a multiline fraction if possible (available for
the ``latex``, ``console`` and ``unicode`` formats; e.g.,
``$\\mathrm{\\frac{km}{s}}$``). If not possible, use 'inline'.
Raises
------
ValueError
If ``fraction`` is not recognized.
"""
# First the scale. Normally unity, in which case we omit
# it, but non-unity scale can happen, e.g., in decompositions
# like u.Ry.decompose(), which gives "2.17987e-18 kg m2 / s2".
s = "" if unit.scale == 1.0 else cls.format_exponential_notation(unit.scale)
# dimensionless does not have any bases, but can have a scale;
# e.g., u.percent.decompose() gives "0.01".
if not unit.bases:
return s
if s:
s += cls._scale_unit_separator
# Unit powers are monotonically decreasing
if not fraction or unit.powers[-1] > 0:
return s + cls._format_unit_list(zip(unit.bases, unit.powers, strict=True))
if fraction is True or fraction == "inline":
formatter = cls._format_inline_fraction
elif fraction == "multiline":
formatter = cls._format_multiline_fraction
else:
raise ValueError(
"fraction can only be False, 'inline', or 'multiline', "
f"not {fraction!r}."
)
positive = []
negative = []
for base, power in zip(unit.bases, unit.powers, strict=True):
if power > 0:
positive.append((base, power))
else:
negative.append((base, -power))
return formatter(
s, cls._format_unit_list(positive) or "1", cls._format_unit_list(negative)
)
@classmethod
def parse(cls, s: str) -> UnitBase:
"""
Convert a string to a unit object.
"""
raise NotImplementedError(f"Can not parse with {cls.__name__} format")
| Base |
python | openai__openai-python | src/openai/types/beta/chatkit/chat_session_rate_limits.py | {
"start": 160,
"end": 293
} | class ____(BaseModel):
max_requests_per_1_minute: int
"""Maximum allowed requests per one-minute window."""
| ChatSessionRateLimits |
python | walkccc__LeetCode | solutions/3469. Find Minimum Cost to Remove Array Elements/3469.py | {
"start": 0,
"end": 508
} | class ____:
def minCost(self, nums: list[int]) -> int:
n = len(nums)
@functools.lru_cache(None)
def dp(last: int, i: int) -> int:
if i == n: # Single element left.
return nums[last]
if i == n - 1: # Two elements left.
return max(nums[last], nums[i])
a = max(nums[i], nums[i + 1]) + dp(last, i + 2)
b = max(nums[last], nums[i]) + dp(i + 1, i + 2)
c = max(nums[last], nums[i + 1]) + dp(i, i + 2)
return min(a, b, c)
return dp(0, 1)
| Solution |
python | python__mypy | mypyc/ir/ops.py | {
"start": 17915,
"end": 18707
} | class ____(RegisterOp):
"""Decrease reference count and free object if zero (dec_ref src).
The is_xdec flag says to use an XDECREF, which checks if the
pointer is NULL first.
"""
error_kind = ERR_NEVER
def __init__(self, src: Value, is_xdec: bool = False, line: int = -1) -> None:
assert src.type.is_refcounted
super().__init__(line)
self.src = src
self.is_xdec = is_xdec
def __repr__(self) -> str:
return "<{}DecRef {!r}>".format("X" if self.is_xdec else "", self.src)
def sources(self) -> list[Value]:
return [self.src]
def set_sources(self, new: list[Value]) -> None:
(self.src,) = new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_dec_ref(self)
@final
| DecRef |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 66191,
"end": 67746
} | class ____(StatNode):
# name string
# cname string or None
# kind "struct" or "union"
# typedef_flag boolean
# visibility "public" or "private"
# api boolean
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# packed boolean
child_attrs = ["attributes"]
def declare(self, env, scope=None):
self.entry = env.declare_struct_or_union(
self.name, self.kind, scope, self.typedef_flag, self.pos,
self.cname, visibility=self.visibility, api=self.api,
packed=self.packed)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = StructOrUnionScope(self.name)
self.declare(env, scope)
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(env, scope)
if self.visibility != 'extern':
for attr in scope.var_entries:
type = attr.type
while type.is_array:
type = type.base_type
if type == self.entry.type:
error(attr.pos, "Struct cannot contain itself as a member.")
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
| CStructOrUnionDefNode |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/buffer.py | {
"start": 4694,
"end": 71298
} | class ____:
"""
The core data structure that holds the text and cursor position of the
current input line and implements all text manipulations on top of it. It
also implements the history, undo stack and the completion state.
:param completer: :class:`~prompt_toolkit.completion.Completer` instance.
:param history: :class:`~prompt_toolkit.history.History` instance.
:param tempfile_suffix: The tempfile suffix (extension) to be used for the
"open in editor" function. For a Python REPL, this would be ".py", so
that the editor knows the syntax highlighting to use. This can also be
a callable that returns a string.
:param tempfile: For more advanced tempfile situations where you need
control over the subdirectories and filename. For a Git Commit Message,
this would be ".git/COMMIT_EDITMSG", so that the editor knows the syntax
highlighting to use. This can also be a callable that returns a string.
:param name: Name for this buffer. E.g. DEFAULT_BUFFER. This is mostly
useful for key bindings where we sometimes prefer to refer to a buffer
by their name instead of by reference.
:param accept_handler: Called when the buffer input is accepted. (Usually
when the user presses `enter`.) The accept handler receives this
`Buffer` as input and should return True when the buffer text should be
kept instead of calling reset.
In case of a `PromptSession` for instance, we want to keep the text,
because we will exit the application, and only reset it during the next
run.
:param max_number_of_completions: Never display more than this number of
completions, even when the completer can produce more (limited by
default to 10k for performance).
Events:
:param on_text_changed: When the buffer text changes. (Callable or None.)
:param on_text_insert: When new text is inserted. (Callable or None.)
:param on_cursor_position_changed: When the cursor moves. (Callable or None.)
:param on_completions_changed: When the completions were changed. (Callable or None.)
:param on_suggestion_set: When an auto-suggestion text has been set. (Callable or None.)
Filters:
:param complete_while_typing: :class:`~prompt_toolkit.filters.Filter`
or `bool`. Decide whether or not to do asynchronous autocompleting while
typing.
:param validate_while_typing: :class:`~prompt_toolkit.filters.Filter`
or `bool`. Decide whether or not to do asynchronous validation while
typing.
:param enable_history_search: :class:`~prompt_toolkit.filters.Filter` or
`bool` to indicate when up-arrow partial string matching is enabled. It
is advised to not enable this at the same time as
`complete_while_typing`, because when there is an autocompletion found,
the up arrows usually browse through the completions, rather than
through the history.
:param read_only: :class:`~prompt_toolkit.filters.Filter`. When True,
changes will not be allowed.
:param multiline: :class:`~prompt_toolkit.filters.Filter` or `bool`. When
not set, pressing `Enter` will call the `accept_handler`. Otherwise,
pressing `Esc-Enter` is required.
"""
def __init__(
self,
completer: Completer | None = None,
auto_suggest: AutoSuggest | None = None,
history: History | None = None,
validator: Validator | None = None,
tempfile_suffix: str | Callable[[], str] = "",
tempfile: str | Callable[[], str] = "",
name: str = "",
complete_while_typing: FilterOrBool = False,
validate_while_typing: FilterOrBool = False,
enable_history_search: FilterOrBool = False,
document: Document | None = None,
accept_handler: BufferAcceptHandler | None = None,
read_only: FilterOrBool = False,
multiline: FilterOrBool = True,
max_number_of_completions: int = 10000,
on_text_changed: BufferEventHandler | None = None,
on_text_insert: BufferEventHandler | None = None,
on_cursor_position_changed: BufferEventHandler | None = None,
on_completions_changed: BufferEventHandler | None = None,
on_suggestion_set: BufferEventHandler | None = None,
) -> None:
# Accept both filters and booleans as input.
enable_history_search = to_filter(enable_history_search)
complete_while_typing = to_filter(complete_while_typing)
validate_while_typing = to_filter(validate_while_typing)
read_only = to_filter(read_only)
multiline = to_filter(multiline)
self.completer = completer or DummyCompleter()
self.auto_suggest = auto_suggest
self.validator = validator
self.tempfile_suffix = tempfile_suffix
self.tempfile = tempfile
self.name = name
self.accept_handler = accept_handler
# Filters. (Usually, used by the key bindings to drive the buffer.)
self.complete_while_typing = complete_while_typing
self.validate_while_typing = validate_while_typing
self.enable_history_search = enable_history_search
self.read_only = read_only
self.multiline = multiline
self.max_number_of_completions = max_number_of_completions
# Text width. (For wrapping, used by the Vi 'gq' operator.)
self.text_width = 0
#: The command buffer history.
# Note that we shouldn't use a lazy 'or' here. bool(history) could be
# False when empty.
self.history = InMemoryHistory() if history is None else history
self.__cursor_position = 0
# Events
self.on_text_changed: Event[Buffer] = Event(self, on_text_changed)
self.on_text_insert: Event[Buffer] = Event(self, on_text_insert)
self.on_cursor_position_changed: Event[Buffer] = Event(
self, on_cursor_position_changed
)
self.on_completions_changed: Event[Buffer] = Event(self, on_completions_changed)
self.on_suggestion_set: Event[Buffer] = Event(self, on_suggestion_set)
# Document cache. (Avoid creating new Document instances.)
self._document_cache: FastDictCache[
tuple[str, int, SelectionState | None], Document
] = FastDictCache(Document, size=10)
# Create completer / auto suggestion / validation coroutines.
self._async_suggester = self._create_auto_suggest_coroutine()
self._async_completer = self._create_completer_coroutine()
self._async_validator = self._create_auto_validate_coroutine()
# Asyncio task for populating the history.
self._load_history_task: asyncio.Future[None] | None = None
# Reset other attributes.
self.reset(document=document)
def __repr__(self) -> str:
if len(self.text) < 15:
text = self.text
else:
text = self.text[:12] + "..."
return f"<Buffer(name={self.name!r}, text={text!r}) at {id(self)!r}>"
def reset(
self, document: Document | None = None, append_to_history: bool = False
) -> None:
"""
:param append_to_history: Append current input to history first.
"""
if append_to_history:
self.append_to_history()
document = document or Document()
self.__cursor_position = document.cursor_position
# `ValidationError` instance. (Will be set when the input is wrong.)
self.validation_error: ValidationError | None = None
self.validation_state: ValidationState | None = ValidationState.UNKNOWN
# State of the selection.
self.selection_state: SelectionState | None = None
# Multiple cursor mode. (When we press 'I' or 'A' in visual-block mode,
# we can insert text on multiple lines at once. This is implemented by
# using multiple cursors.)
self.multiple_cursor_positions: list[int] = []
# When doing consecutive up/down movements, prefer to stay at this column.
self.preferred_column: int | None = None
# State of complete browser
# For interactive completion through Ctrl-N/Ctrl-P.
self.complete_state: CompletionState | None = None
# State of Emacs yank-nth-arg completion.
self.yank_nth_arg_state: YankNthArgState | None = None # for yank-nth-arg.
# Remember the document that we had *right before* the last paste
# operation. This is used for rotating through the kill ring.
self.document_before_paste: Document | None = None
# Current suggestion.
self.suggestion: Suggestion | None = None
# The history search text. (Used for filtering the history when we
# browse through it.)
self.history_search_text: str | None = None
# Undo/redo stacks (stack of `(text, cursor_position)`).
self._undo_stack: list[tuple[str, int]] = []
self._redo_stack: list[tuple[str, int]] = []
# Cancel history loader. If history loading was still ongoing.
# Cancel the `_load_history_task`, so that next repaint of the
# `BufferControl` we will repopulate it.
if self._load_history_task is not None:
self._load_history_task.cancel()
self._load_history_task = None
#: The working lines. Similar to history, except that this can be
#: modified. The user can press arrow_up and edit previous entries.
#: Ctrl-C should reset this, and copy the whole history back in here.
#: Enter should process the current command and append to the real
#: history.
self._working_lines: deque[str] = deque([document.text])
self.__working_index = 0
def load_history_if_not_yet_loaded(self) -> None:
"""
Create task for populating the buffer history (if not yet done).
Note::
This needs to be called from within the event loop of the
application, because history loading is async, and we need to be
sure the right event loop is active. Therefor, we call this method
in the `BufferControl.create_content`.
There are situations where prompt_toolkit applications are created
in one thread, but will later run in a different thread (Ptpython
is one example. The REPL runs in a separate thread, in order to
prevent interfering with a potential different event loop in the
main thread. The REPL UI however is still created in the main
thread.) We could decide to not support creating prompt_toolkit
objects in one thread and running the application in a different
thread, but history loading is the only place where it matters, and
this solves it.
"""
if self._load_history_task is None:
async def load_history() -> None:
async for item in self.history.load():
self._working_lines.appendleft(item)
self.__working_index += 1
self._load_history_task = get_app().create_background_task(load_history())
def load_history_done(f: asyncio.Future[None]) -> None:
"""
Handle `load_history` result when either done, cancelled, or
when an exception was raised.
"""
try:
f.result()
except asyncio.CancelledError:
# Ignore cancellation. But handle it, so that we don't get
# this traceback.
pass
except GeneratorExit:
# Probably not needed, but we had situations where
# `GeneratorExit` was raised in `load_history` during
# cancellation.
pass
except BaseException:
# Log error if something goes wrong. (We don't have a
# caller to which we can propagate this exception.)
logger.exception("Loading history failed")
self._load_history_task.add_done_callback(load_history_done)
# <getters/setters>
def _set_text(self, value: str) -> bool:
"""set text at current working_index. Return whether it changed."""
working_index = self.working_index
working_lines = self._working_lines
original_value = working_lines[working_index]
working_lines[working_index] = value
# Return True when this text has been changed.
if len(value) != len(original_value):
# For Python 2, it seems that when two strings have a different
# length and one is a prefix of the other, Python still scans
# character by character to see whether the strings are different.
# (Some benchmarking showed significant differences for big
# documents. >100,000 of lines.)
return True
elif value != original_value:
return True
return False
def _set_cursor_position(self, value: int) -> bool:
"""Set cursor position. Return whether it changed."""
original_position = self.__cursor_position
self.__cursor_position = max(0, value)
return self.__cursor_position != original_position
@property
def text(self) -> str:
return self._working_lines[self.working_index]
@text.setter
def text(self, value: str) -> None:
"""
Setting text. (When doing this, make sure that the cursor_position is
valid for this text. text/cursor_position should be consistent at any time,
otherwise set a Document instead.)
"""
# Ensure cursor position remains within the size of the text.
if self.cursor_position > len(value):
self.cursor_position = len(value)
# Don't allow editing of read-only buffers.
if self.read_only():
raise EditReadOnlyBuffer()
changed = self._set_text(value)
if changed:
self._text_changed()
# Reset history search text.
# (Note that this doesn't need to happen when working_index
# changes, which is when we traverse the history. That's why we
# don't do this in `self._text_changed`.)
self.history_search_text = None
@property
def cursor_position(self) -> int:
return self.__cursor_position
@cursor_position.setter
def cursor_position(self, value: int) -> None:
"""
Setting cursor position.
"""
assert isinstance(value, int)
# Ensure cursor position is within the size of the text.
if value > len(self.text):
value = len(self.text)
if value < 0:
value = 0
changed = self._set_cursor_position(value)
if changed:
self._cursor_position_changed()
@property
def working_index(self) -> int:
return self.__working_index
@working_index.setter
def working_index(self, value: int) -> None:
if self.__working_index != value:
self.__working_index = value
# Make sure to reset the cursor position, otherwise we end up in
# situations where the cursor position is out of the bounds of the
# text.
self.cursor_position = 0
self._text_changed()
def _text_changed(self) -> None:
# Remove any validation errors and complete state.
self.validation_error = None
self.validation_state = ValidationState.UNKNOWN
self.complete_state = None
self.yank_nth_arg_state = None
self.document_before_paste = None
self.selection_state = None
self.suggestion = None
self.preferred_column = None
# fire 'on_text_changed' event.
self.on_text_changed.fire()
# Input validation.
# (This happens on all change events, unlike auto completion, also when
# deleting text.)
if self.validator and self.validate_while_typing():
get_app().create_background_task(self._async_validator())
def _cursor_position_changed(self) -> None:
# Remove any complete state.
# (Input validation should only be undone when the cursor position
# changes.)
self.complete_state = None
self.yank_nth_arg_state = None
self.document_before_paste = None
# Unset preferred_column. (Will be set after the cursor movement, if
# required.)
self.preferred_column = None
# Note that the cursor position can change if we have a selection the
# new position of the cursor determines the end of the selection.
# fire 'on_cursor_position_changed' event.
self.on_cursor_position_changed.fire()
@property
def document(self) -> Document:
"""
Return :class:`~prompt_toolkit.document.Document` instance from the
current text, cursor position and selection state.
"""
return self._document_cache[
self.text, self.cursor_position, self.selection_state
]
@document.setter
def document(self, value: Document) -> None:
"""
Set :class:`~prompt_toolkit.document.Document` instance.
This will set both the text and cursor position at the same time, but
atomically. (Change events will be triggered only after both have been set.)
"""
self.set_document(value)
def set_document(self, value: Document, bypass_readonly: bool = False) -> None:
"""
Set :class:`~prompt_toolkit.document.Document` instance. Like the
``document`` property, but accept an ``bypass_readonly`` argument.
:param bypass_readonly: When True, don't raise an
:class:`.EditReadOnlyBuffer` exception, even
when the buffer is read-only.
.. warning::
When this buffer is read-only and `bypass_readonly` was not passed,
the `EditReadOnlyBuffer` exception will be caught by the
`KeyProcessor` and is silently suppressed. This is important to
keep in mind when writing key bindings, because it won't do what
you expect, and there won't be a stack trace. Use try/finally
around this function if you need some cleanup code.
"""
# Don't allow editing of read-only buffers.
if not bypass_readonly and self.read_only():
raise EditReadOnlyBuffer()
# Set text and cursor position first.
text_changed = self._set_text(value.text)
cursor_position_changed = self._set_cursor_position(value.cursor_position)
# Now handle change events. (We do this when text/cursor position is
# both set and consistent.)
if text_changed:
self._text_changed()
self.history_search_text = None
if cursor_position_changed:
self._cursor_position_changed()
@property
def is_returnable(self) -> bool:
"""
True when there is something handling accept.
"""
return bool(self.accept_handler)
# End of <getters/setters>
def save_to_undo_stack(self, clear_redo_stack: bool = True) -> None:
"""
Safe current state (input text and cursor position), so that we can
restore it by calling undo.
"""
# Safe if the text is different from the text at the top of the stack
# is different. If the text is the same, just update the cursor position.
if self._undo_stack and self._undo_stack[-1][0] == self.text:
self._undo_stack[-1] = (self._undo_stack[-1][0], self.cursor_position)
else:
self._undo_stack.append((self.text, self.cursor_position))
# Saving anything to the undo stack, clears the redo stack.
if clear_redo_stack:
self._redo_stack = []
def transform_lines(
self,
line_index_iterator: Iterable[int],
transform_callback: Callable[[str], str],
) -> str:
"""
Transforms the text on a range of lines.
When the iterator yield an index not in the range of lines that the
document contains, it skips them silently.
To uppercase some lines::
new_text = transform_lines(range(5,10), lambda text: text.upper())
:param line_index_iterator: Iterator of line numbers (int)
:param transform_callback: callable that takes the original text of a
line, and return the new text for this line.
:returns: The new text.
"""
# Split lines
lines = self.text.split("\n")
# Apply transformation
for index in line_index_iterator:
try:
lines[index] = transform_callback(lines[index])
except IndexError:
pass
return "\n".join(lines)
def transform_current_line(self, transform_callback: Callable[[str], str]) -> None:
"""
Apply the given transformation function to the current line.
:param transform_callback: callable that takes a string and return a new string.
"""
document = self.document
a = document.cursor_position + document.get_start_of_line_position()
b = document.cursor_position + document.get_end_of_line_position()
self.text = (
document.text[:a]
+ transform_callback(document.text[a:b])
+ document.text[b:]
)
def transform_region(
self, from_: int, to: int, transform_callback: Callable[[str], str]
) -> None:
"""
Transform a part of the input string.
:param from_: (int) start position.
:param to: (int) end position.
:param transform_callback: Callable which accepts a string and returns
the transformed string.
"""
assert from_ < to
self.text = "".join(
[
self.text[:from_]
+ transform_callback(self.text[from_:to])
+ self.text[to:]
]
)
def cursor_left(self, count: int = 1) -> None:
self.cursor_position += self.document.get_cursor_left_position(count=count)
def cursor_right(self, count: int = 1) -> None:
self.cursor_position += self.document.get_cursor_right_position(count=count)
def cursor_up(self, count: int = 1) -> None:
"""(for multiline edit). Move cursor to the previous line."""
original_column = self.preferred_column or self.document.cursor_position_col
self.cursor_position += self.document.get_cursor_up_position(
count=count, preferred_column=original_column
)
# Remember the original column for the next up/down movement.
self.preferred_column = original_column
def cursor_down(self, count: int = 1) -> None:
"""(for multiline edit). Move cursor to the next line."""
original_column = self.preferred_column or self.document.cursor_position_col
self.cursor_position += self.document.get_cursor_down_position(
count=count, preferred_column=original_column
)
# Remember the original column for the next up/down movement.
self.preferred_column = original_column
def auto_up(
self, count: int = 1, go_to_start_of_line_if_history_changes: bool = False
) -> None:
"""
If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_previous(count=count)
elif self.document.cursor_position_row > 0:
self.cursor_up(count=count)
elif not self.selection_state:
self.history_backward(count=count)
# Go to the start of the line?
if go_to_start_of_line_if_history_changes:
self.cursor_position += self.document.get_start_of_line_position()
def auto_down(
self, count: int = 1, go_to_start_of_line_if_history_changes: bool = False
) -> None:
"""
If we're not on the last line (of a multiline input) go a line down,
otherwise go forward in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_next(count=count)
elif self.document.cursor_position_row < self.document.line_count - 1:
self.cursor_down(count=count)
elif not self.selection_state:
self.history_forward(count=count)
# Go to the start of the line?
if go_to_start_of_line_if_history_changes:
self.cursor_position += self.document.get_start_of_line_position()
def delete_before_cursor(self, count: int = 1) -> str:
"""
Delete specified number of characters before cursor and return the
deleted text.
"""
assert count >= 0
deleted = ""
if self.cursor_position > 0:
deleted = self.text[self.cursor_position - count : self.cursor_position]
new_text = (
self.text[: self.cursor_position - count]
+ self.text[self.cursor_position :]
)
new_cursor_position = self.cursor_position - len(deleted)
# Set new Document atomically.
self.document = Document(new_text, new_cursor_position)
return deleted
def delete(self, count: int = 1) -> str:
"""
Delete specified number of characters and Return the deleted text.
"""
if self.cursor_position < len(self.text):
deleted = self.document.text_after_cursor[:count]
self.text = (
self.text[: self.cursor_position]
+ self.text[self.cursor_position + len(deleted) :]
)
return deleted
else:
return ""
def join_next_line(self, separator: str = " ") -> None:
"""
Join the next line to the current one by deleting the line ending after
the current line.
"""
if not self.document.on_last_line:
self.cursor_position += self.document.get_end_of_line_position()
self.delete()
# Remove spaces.
self.text = (
self.document.text_before_cursor
+ separator
+ self.document.text_after_cursor.lstrip(" ")
)
def join_selected_lines(self, separator: str = " ") -> None:
"""
Join the selected lines.
"""
assert self.selection_state
# Get lines.
from_, to = sorted(
[self.cursor_position, self.selection_state.original_cursor_position]
)
before = self.text[:from_]
lines = self.text[from_:to].splitlines()
after = self.text[to:]
# Replace leading spaces with just one space.
lines = [l.lstrip(" ") + separator for l in lines]
# Set new document.
self.document = Document(
text=before + "".join(lines) + after,
cursor_position=len(before + "".join(lines[:-1])) - 1,
)
def swap_characters_before_cursor(self) -> None:
"""
Swap the last two characters before the cursor.
"""
pos = self.cursor_position
if pos >= 2:
a = self.text[pos - 2]
b = self.text[pos - 1]
self.text = self.text[: pos - 2] + b + a + self.text[pos:]
def go_to_history(self, index: int) -> None:
"""
Go to this item in the history.
"""
if index < len(self._working_lines):
self.working_index = index
self.cursor_position = len(self.text)
def complete_next(self, count: int = 1, disable_wrap_around: bool = False) -> None:
"""
Browse to the next completions.
(Does nothing if there are no completion.)
"""
index: int | None
if self.complete_state:
completions_count = len(self.complete_state.completions)
if self.complete_state.complete_index is None:
index = 0
elif self.complete_state.complete_index == completions_count - 1:
index = None
if disable_wrap_around:
return
else:
index = min(
completions_count - 1, self.complete_state.complete_index + count
)
self.go_to_completion(index)
def complete_previous(
self, count: int = 1, disable_wrap_around: bool = False
) -> None:
"""
Browse to the previous completions.
(Does nothing if there are no completion.)
"""
index: int | None
if self.complete_state:
if self.complete_state.complete_index == 0:
index = None
if disable_wrap_around:
return
elif self.complete_state.complete_index is None:
index = len(self.complete_state.completions) - 1
else:
index = max(0, self.complete_state.complete_index - count)
self.go_to_completion(index)
def cancel_completion(self) -> None:
"""
Cancel completion, go back to the original text.
"""
if self.complete_state:
self.go_to_completion(None)
self.complete_state = None
def _set_completions(self, completions: list[Completion]) -> CompletionState:
"""
Start completions. (Generate list of completions and initialize.)
By default, no completion will be selected.
"""
self.complete_state = CompletionState(
original_document=self.document, completions=completions
)
# Trigger event. This should eventually invalidate the layout.
self.on_completions_changed.fire()
return self.complete_state
def start_history_lines_completion(self) -> None:
"""
Start a completion based on all the other lines in the document and the
history.
"""
found_completions: set[str] = set()
completions = []
# For every line of the whole history, find matches with the current line.
current_line = self.document.current_line_before_cursor.lstrip()
for i, string in enumerate(self._working_lines):
for j, l in enumerate(string.split("\n")):
l = l.strip()
if l and l.startswith(current_line):
# When a new line has been found.
if l not in found_completions:
found_completions.add(l)
# Create completion.
if i == self.working_index:
display_meta = "Current, line %s" % (j + 1)
else:
display_meta = f"History {i + 1}, line {j + 1}"
completions.append(
Completion(
text=l,
start_position=-len(current_line),
display_meta=display_meta,
)
)
self._set_completions(completions=completions[::-1])
self.go_to_completion(0)
def go_to_completion(self, index: int | None) -> None:
"""
Select a completion from the list of current completions.
"""
assert self.complete_state
# Set new completion
state = self.complete_state
state.go_to_index(index)
# Set text/cursor position
new_text, new_cursor_position = state.new_text_and_position()
self.document = Document(new_text, new_cursor_position)
# (changing text/cursor position will unset complete_state.)
self.complete_state = state
def apply_completion(self, completion: Completion) -> None:
"""
Insert a given completion.
"""
# If there was already a completion active, cancel that one.
if self.complete_state:
self.go_to_completion(None)
self.complete_state = None
# Insert text from the given completion.
self.delete_before_cursor(-completion.start_position)
self.insert_text(completion.text)
def _set_history_search(self) -> None:
"""
Set `history_search_text`.
(The text before the cursor will be used for filtering the history.)
"""
if self.enable_history_search():
if self.history_search_text is None:
self.history_search_text = self.document.text_before_cursor
else:
self.history_search_text = None
def _history_matches(self, i: int) -> bool:
"""
True when the current entry matches the history search.
(when we don't have history search, it's also True.)
"""
return self.history_search_text is None or self._working_lines[i].startswith(
self.history_search_text
)
def history_forward(self, count: int = 1) -> None:
"""
Move forwards through the history.
:param count: Amount of items to move forward.
"""
self._set_history_search()
# Go forward in history.
found_something = False
for i in range(self.working_index + 1, len(self._working_lines)):
if self._history_matches(i):
self.working_index = i
count -= 1
found_something = True
if count == 0:
break
# If we found an entry, move cursor to the end of the first line.
if found_something:
self.cursor_position = 0
self.cursor_position += self.document.get_end_of_line_position()
def history_backward(self, count: int = 1) -> None:
"""
Move backwards through history.
"""
self._set_history_search()
# Go back in history.
found_something = False
for i in range(self.working_index - 1, -1, -1):
if self._history_matches(i):
self.working_index = i
count -= 1
found_something = True
if count == 0:
break
# If we move to another entry, move cursor to the end of the line.
if found_something:
self.cursor_position = len(self.text)
def yank_nth_arg(self, n: int | None = None, _yank_last_arg: bool = False) -> None:
"""
Pick nth word from previous history entry (depending on current
`yank_nth_arg_state`) and insert it at current position. Rotate through
history if called repeatedly. If no `n` has been given, take the first
argument. (The second word.)
:param n: (None or int), The index of the word from the previous line
to take.
"""
assert n is None or isinstance(n, int)
history_strings = self.history.get_strings()
if not len(history_strings):
return
# Make sure we have a `YankNthArgState`.
if self.yank_nth_arg_state is None:
state = YankNthArgState(n=-1 if _yank_last_arg else 1)
else:
state = self.yank_nth_arg_state
if n is not None:
state.n = n
# Get new history position.
new_pos = state.history_position - 1
if -new_pos > len(history_strings):
new_pos = -1
# Take argument from line.
line = history_strings[new_pos]
words = [w.strip() for w in _QUOTED_WORDS_RE.split(line)]
words = [w for w in words if w]
try:
word = words[state.n]
except IndexError:
word = ""
# Insert new argument.
if state.previous_inserted_word:
self.delete_before_cursor(len(state.previous_inserted_word))
self.insert_text(word)
# Save state again for next completion. (Note that the 'insert'
# operation from above clears `self.yank_nth_arg_state`.)
state.previous_inserted_word = word
state.history_position = new_pos
self.yank_nth_arg_state = state
def yank_last_arg(self, n: int | None = None) -> None:
"""
Like `yank_nth_arg`, but if no argument has been given, yank the last
word by default.
"""
self.yank_nth_arg(n=n, _yank_last_arg=True)
def start_selection(
self, selection_type: SelectionType = SelectionType.CHARACTERS
) -> None:
"""
Take the current cursor position as the start of this selection.
"""
self.selection_state = SelectionState(self.cursor_position, selection_type)
def copy_selection(self, _cut: bool = False) -> ClipboardData:
"""
Copy selected text and return :class:`.ClipboardData` instance.
Notice that this doesn't store the copied data on the clipboard yet.
You can store it like this:
.. code:: python
data = buffer.copy_selection()
get_app().clipboard.set_data(data)
"""
new_document, clipboard_data = self.document.cut_selection()
if _cut:
self.document = new_document
self.selection_state = None
return clipboard_data
def cut_selection(self) -> ClipboardData:
"""
Delete selected text and return :class:`.ClipboardData` instance.
"""
return self.copy_selection(_cut=True)
def paste_clipboard_data(
self,
data: ClipboardData,
paste_mode: PasteMode = PasteMode.EMACS,
count: int = 1,
) -> None:
"""
Insert the data from the clipboard.
"""
assert isinstance(data, ClipboardData)
assert paste_mode in (PasteMode.VI_BEFORE, PasteMode.VI_AFTER, PasteMode.EMACS)
original_document = self.document
self.document = self.document.paste_clipboard_data(
data, paste_mode=paste_mode, count=count
)
# Remember original document. This assignment should come at the end,
# because assigning to 'document' will erase it.
self.document_before_paste = original_document
def newline(self, copy_margin: bool = True) -> None:
"""
Insert a line ending at the current position.
"""
if copy_margin:
self.insert_text("\n" + self.document.leading_whitespace_in_current_line)
else:
self.insert_text("\n")
def insert_line_above(self, copy_margin: bool = True) -> None:
"""
Insert a new line above the current one.
"""
if copy_margin:
insert = self.document.leading_whitespace_in_current_line + "\n"
else:
insert = "\n"
self.cursor_position += self.document.get_start_of_line_position()
self.insert_text(insert)
self.cursor_position -= 1
def insert_line_below(self, copy_margin: bool = True) -> None:
"""
Insert a new line below the current one.
"""
if copy_margin:
insert = "\n" + self.document.leading_whitespace_in_current_line
else:
insert = "\n"
self.cursor_position += self.document.get_end_of_line_position()
self.insert_text(insert)
def insert_text(
self,
data: str,
overwrite: bool = False,
move_cursor: bool = True,
fire_event: bool = True,
) -> None:
"""
Insert characters at cursor position.
:param fire_event: Fire `on_text_insert` event. This is mainly used to
trigger autocompletion while typing.
"""
# Original text & cursor position.
otext = self.text
ocpos = self.cursor_position
# In insert/text mode.
if overwrite:
# Don't overwrite the newline itself. Just before the line ending,
# it should act like insert mode.
overwritten_text = otext[ocpos : ocpos + len(data)]
if "\n" in overwritten_text:
overwritten_text = overwritten_text[: overwritten_text.find("\n")]
text = otext[:ocpos] + data + otext[ocpos + len(overwritten_text) :]
else:
text = otext[:ocpos] + data + otext[ocpos:]
if move_cursor:
cpos = self.cursor_position + len(data)
else:
cpos = self.cursor_position
# Set new document.
# (Set text and cursor position at the same time. Otherwise, setting
# the text will fire a change event before the cursor position has been
# set. It works better to have this atomic.)
self.document = Document(text, cpos)
# Fire 'on_text_insert' event.
if fire_event: # XXX: rename to `start_complete`.
self.on_text_insert.fire()
# Only complete when "complete_while_typing" is enabled.
if self.completer and self.complete_while_typing():
get_app().create_background_task(self._async_completer())
# Call auto_suggest.
if self.auto_suggest:
get_app().create_background_task(self._async_suggester())
def undo(self) -> None:
# Pop from the undo-stack until we find a text that if different from
# the current text. (The current logic of `save_to_undo_stack` will
# cause that the top of the undo stack is usually the same as the
# current text, so in that case we have to pop twice.)
while self._undo_stack:
text, pos = self._undo_stack.pop()
if text != self.text:
# Push current text to redo stack.
self._redo_stack.append((self.text, self.cursor_position))
# Set new text/cursor_position.
self.document = Document(text, cursor_position=pos)
break
def redo(self) -> None:
if self._redo_stack:
# Copy current state on undo stack.
self.save_to_undo_stack(clear_redo_stack=False)
# Pop state from redo stack.
text, pos = self._redo_stack.pop()
self.document = Document(text, cursor_position=pos)
def validate(self, set_cursor: bool = False) -> bool:
"""
Returns `True` if valid.
:param set_cursor: Set the cursor position, if an error was found.
"""
# Don't call the validator again, if it was already called for the
# current input.
if self.validation_state != ValidationState.UNKNOWN:
return self.validation_state == ValidationState.VALID
# Call validator.
if self.validator:
try:
self.validator.validate(self.document)
except ValidationError as e:
# Set cursor position (don't allow invalid values.)
if set_cursor:
self.cursor_position = min(
max(0, e.cursor_position), len(self.text)
)
self.validation_state = ValidationState.INVALID
self.validation_error = e
return False
# Handle validation result.
self.validation_state = ValidationState.VALID
self.validation_error = None
return True
async def _validate_async(self) -> None:
"""
Asynchronous version of `validate()`.
This one doesn't set the cursor position.
We have both variants, because a synchronous version is required.
Handling the ENTER key needs to be completely synchronous, otherwise
stuff like type-ahead is going to give very weird results. (People
could type input while the ENTER key is still processed.)
An asynchronous version is required if we have `validate_while_typing`
enabled.
"""
while True:
# Don't call the validator again, if it was already called for the
# current input.
if self.validation_state != ValidationState.UNKNOWN:
return
# Call validator.
error = None
document = self.document
if self.validator:
try:
await self.validator.validate_async(self.document)
except ValidationError as e:
error = e
# If the document changed during the validation, try again.
if self.document != document:
continue
# Handle validation result.
if error:
self.validation_state = ValidationState.INVALID
else:
self.validation_state = ValidationState.VALID
self.validation_error = error
get_app().invalidate() # Trigger redraw (display error).
def append_to_history(self) -> None:
"""
Append the current input to the history.
"""
# Save at the tail of the history. (But don't if the last entry the
# history is already the same.)
if self.text:
history_strings = self.history.get_strings()
if not len(history_strings) or history_strings[-1] != self.text:
self.history.append_string(self.text)
def _search(
self,
search_state: SearchState,
include_current_position: bool = False,
count: int = 1,
) -> tuple[int, int] | None:
"""
Execute search. Return (working_index, cursor_position) tuple when this
search is applied. Returns `None` when this text cannot be found.
"""
assert count > 0
text = search_state.text
direction = search_state.direction
ignore_case = search_state.ignore_case()
def search_once(
working_index: int, document: Document
) -> tuple[int, Document] | None:
"""
Do search one time.
Return (working_index, document) or `None`
"""
if direction == SearchDirection.FORWARD:
# Try find at the current input.
new_index = document.find(
text,
include_current_position=include_current_position,
ignore_case=ignore_case,
)
if new_index is not None:
return (
working_index,
Document(document.text, document.cursor_position + new_index),
)
else:
# No match, go forward in the history. (Include len+1 to wrap around.)
# (Here we should always include all cursor positions, because
# it's a different line.)
for i in range(working_index + 1, len(self._working_lines) + 1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], 0)
new_index = document.find(
text, include_current_position=True, ignore_case=ignore_case
)
if new_index is not None:
return (i, Document(document.text, new_index))
else:
# Try find at the current input.
new_index = document.find_backwards(text, ignore_case=ignore_case)
if new_index is not None:
return (
working_index,
Document(document.text, document.cursor_position + new_index),
)
else:
# No match, go back in the history. (Include -1 to wrap around.)
for i in range(working_index - 1, -2, -1):
i %= len(self._working_lines)
document = Document(
self._working_lines[i], len(self._working_lines[i])
)
new_index = document.find_backwards(
text, ignore_case=ignore_case
)
if new_index is not None:
return (
i,
Document(document.text, len(document.text) + new_index),
)
return None
# Do 'count' search iterations.
working_index = self.working_index
document = self.document
for _ in range(count):
result = search_once(working_index, document)
if result is None:
return None # Nothing found.
else:
working_index, document = result
return (working_index, document.cursor_position)
def document_for_search(self, search_state: SearchState) -> Document:
"""
Return a :class:`~prompt_toolkit.document.Document` instance that has
the text/cursor position for this search, if we would apply it. This
will be used in the
:class:`~prompt_toolkit.layout.BufferControl` to display feedback while
searching.
"""
search_result = self._search(search_state, include_current_position=True)
if search_result is None:
return self.document
else:
working_index, cursor_position = search_result
# Keep selection, when `working_index` was not changed.
if working_index == self.working_index:
selection = self.selection_state
else:
selection = None
return Document(
self._working_lines[working_index], cursor_position, selection=selection
)
def get_search_position(
self,
search_state: SearchState,
include_current_position: bool = True,
count: int = 1,
) -> int:
"""
Get the cursor position for this search.
(This operation won't change the `working_index`. It's won't go through
the history. Vi text objects can't span multiple items.)
"""
search_result = self._search(
search_state, include_current_position=include_current_position, count=count
)
if search_result is None:
return self.cursor_position
else:
working_index, cursor_position = search_result
return cursor_position
def apply_search(
self,
search_state: SearchState,
include_current_position: bool = True,
count: int = 1,
) -> None:
"""
Apply search. If something is found, set `working_index` and
`cursor_position`.
"""
search_result = self._search(
search_state, include_current_position=include_current_position, count=count
)
if search_result is not None:
working_index, cursor_position = search_result
self.working_index = working_index
self.cursor_position = cursor_position
def exit_selection(self) -> None:
self.selection_state = None
def _editor_simple_tempfile(self) -> tuple[str, Callable[[], None]]:
"""
Simple (file) tempfile implementation.
Return (tempfile, cleanup_func).
"""
suffix = to_str(self.tempfile_suffix)
descriptor, filename = tempfile.mkstemp(suffix)
os.write(descriptor, self.text.encode("utf-8"))
os.close(descriptor)
def cleanup() -> None:
os.unlink(filename)
return filename, cleanup
def _editor_complex_tempfile(self) -> tuple[str, Callable[[], None]]:
# Complex (directory) tempfile implementation.
headtail = to_str(self.tempfile)
if not headtail:
# Revert to simple case.
return self._editor_simple_tempfile()
headtail = str(headtail)
# Try to make according to tempfile logic.
head, tail = os.path.split(headtail)
if os.path.isabs(head):
head = head[1:]
dirpath = tempfile.mkdtemp()
if head:
dirpath = os.path.join(dirpath, head)
# Assume there is no issue creating dirs in this temp dir.
os.makedirs(dirpath)
# Open the filename and write current text.
filename = os.path.join(dirpath, tail)
with open(filename, "w", encoding="utf-8") as fh:
fh.write(self.text)
def cleanup() -> None:
shutil.rmtree(dirpath)
return filename, cleanup
def open_in_editor(self, validate_and_handle: bool = False) -> asyncio.Task[None]:
"""
Open code in editor.
This returns a future, and runs in a thread executor.
"""
if self.read_only():
raise EditReadOnlyBuffer()
# Write current text to temporary file
if self.tempfile:
filename, cleanup_func = self._editor_complex_tempfile()
else:
filename, cleanup_func = self._editor_simple_tempfile()
async def run() -> None:
try:
# Open in editor
# (We need to use `run_in_terminal`, because not all editors go to
# the alternate screen buffer, and some could influence the cursor
# position.)
success = await run_in_terminal(
lambda: self._open_file_in_editor(filename), in_executor=True
)
# Read content again.
if success:
with open(filename, "rb") as f:
text = f.read().decode("utf-8")
# Drop trailing newline. (Editors are supposed to add it at the
# end, but we don't need it.)
if text.endswith("\n"):
text = text[:-1]
self.document = Document(text=text, cursor_position=len(text))
# Accept the input.
if validate_and_handle:
self.validate_and_handle()
finally:
# Clean up temp dir/file.
cleanup_func()
return get_app().create_background_task(run())
def _open_file_in_editor(self, filename: str) -> bool:
"""
Call editor executable.
Return True when we received a zero return code.
"""
# If the 'VISUAL' or 'EDITOR' environment variable has been set, use that.
# Otherwise, fall back to the first available editor that we can find.
visual = os.environ.get("VISUAL")
editor = os.environ.get("EDITOR")
editors = [
visual,
editor,
# Order of preference.
"/usr/bin/editor",
"/usr/bin/nano",
"/usr/bin/pico",
"/usr/bin/vi",
"/usr/bin/emacs",
]
for e in editors:
if e:
try:
# Use 'shlex.split()', because $VISUAL can contain spaces
# and quotes.
returncode = subprocess.call(shlex.split(e) + [filename])
return returncode == 0
except OSError:
# Executable does not exist, try the next one.
pass
return False
def start_completion(
self,
select_first: bool = False,
select_last: bool = False,
insert_common_part: bool = False,
complete_event: CompleteEvent | None = None,
) -> None:
"""
Start asynchronous autocompletion of this buffer.
(This will do nothing if a previous completion was still in progress.)
"""
# Only one of these options can be selected.
assert select_first + select_last + insert_common_part <= 1
get_app().create_background_task(
self._async_completer(
select_first=select_first,
select_last=select_last,
insert_common_part=insert_common_part,
complete_event=complete_event
or CompleteEvent(completion_requested=True),
)
)
def _create_completer_coroutine(self) -> Callable[..., Coroutine[Any, Any, None]]:
"""
Create function for asynchronous autocompletion.
(This consumes the asynchronous completer generator, which possibly
runs the completion algorithm in another thread.)
"""
def completion_does_nothing(document: Document, completion: Completion) -> bool:
"""
Return `True` if applying this completion doesn't have any effect.
(When it doesn't insert any new text.
"""
text_before_cursor = document.text_before_cursor
replaced_text = text_before_cursor[
len(text_before_cursor) + completion.start_position :
]
return replaced_text == completion.text
@_only_one_at_a_time
async def async_completer(
select_first: bool = False,
select_last: bool = False,
insert_common_part: bool = False,
complete_event: CompleteEvent | None = None,
) -> None:
document = self.document
complete_event = complete_event or CompleteEvent(text_inserted=True)
# Don't complete when we already have completions.
if self.complete_state or not self.completer:
return
# Create an empty CompletionState.
complete_state = CompletionState(original_document=self.document)
self.complete_state = complete_state
def proceed() -> bool:
"""Keep retrieving completions. Input text has not yet changed
while generating completions."""
return self.complete_state == complete_state
refresh_needed = asyncio.Event()
async def refresh_while_loading() -> None:
"""Background loop to refresh the UI at most 3 times a second
while the completion are loading. Calling
`on_completions_changed.fire()` for every completion that we
receive is too expensive when there are many completions. (We
could tune `Application.max_render_postpone_time` and
`Application.min_redraw_interval`, but having this here is a
better approach.)
"""
while True:
self.on_completions_changed.fire()
refresh_needed.clear()
await asyncio.sleep(0.3)
await refresh_needed.wait()
refresh_task = asyncio.ensure_future(refresh_while_loading())
try:
# Load.
async with aclosing(
self.completer.get_completions_async(document, complete_event)
) as async_generator:
async for completion in async_generator:
complete_state.completions.append(completion)
refresh_needed.set()
# If the input text changes, abort.
if not proceed():
break
# Always stop at 10k completions.
if (
len(complete_state.completions)
>= self.max_number_of_completions
):
break
finally:
refresh_task.cancel()
# Refresh one final time after we got everything.
self.on_completions_changed.fire()
completions = complete_state.completions
# When there is only one completion, which has nothing to add, ignore it.
if len(completions) == 1 and completion_does_nothing(
document, completions[0]
):
del completions[:]
# Set completions if the text was not yet changed.
if proceed():
# When no completions were found, or when the user selected
# already a completion by using the arrow keys, don't do anything.
if (
not self.complete_state
or self.complete_state.complete_index is not None
):
return
# When there are no completions, reset completion state anyway.
if not completions:
self.complete_state = None
# Render the ui if the completion menu was shown
# it is needed especially if there is one completion and it was deleted.
self.on_completions_changed.fire()
return
# Select first/last or insert common part, depending on the key
# binding. (For this we have to wait until all completions are
# loaded.)
if select_first:
self.go_to_completion(0)
elif select_last:
self.go_to_completion(len(completions) - 1)
elif insert_common_part:
common_part = get_common_complete_suffix(document, completions)
if common_part:
# Insert the common part, update completions.
self.insert_text(common_part)
if len(completions) > 1:
# (Don't call `async_completer` again, but
# recalculate completions. See:
# https://github.com/ipython/ipython/issues/9658)
completions[:] = [
c.new_completion_from_position(len(common_part))
for c in completions
]
self._set_completions(completions=completions)
else:
self.complete_state = None
else:
# When we were asked to insert the "common"
# prefix, but there was no common suffix but
# still exactly one match, then select the
# first. (It could be that we have a completion
# which does * expansion, like '*.py', with
# exactly one match.)
if len(completions) == 1:
self.go_to_completion(0)
else:
# If the last operation was an insert, (not a delete), restart
# the completion coroutine.
if self.document.text_before_cursor == document.text_before_cursor:
return # Nothing changed.
if self.document.text_before_cursor.startswith(
document.text_before_cursor
):
raise _Retry
return async_completer
def _create_auto_suggest_coroutine(self) -> Callable[[], Coroutine[Any, Any, None]]:
"""
Create function for asynchronous auto suggestion.
(This can be in another thread.)
"""
@_only_one_at_a_time
async def async_suggestor() -> None:
document = self.document
# Don't suggest when we already have a suggestion.
if self.suggestion or not self.auto_suggest:
return
suggestion = await self.auto_suggest.get_suggestion_async(self, document)
# Set suggestion only if the text was not yet changed.
if self.document == document:
# Set suggestion and redraw interface.
self.suggestion = suggestion
self.on_suggestion_set.fire()
else:
# Otherwise, restart thread.
raise _Retry
return async_suggestor
def _create_auto_validate_coroutine(
self,
) -> Callable[[], Coroutine[Any, Any, None]]:
"""
Create a function for asynchronous validation while typing.
(This can be in another thread.)
"""
@_only_one_at_a_time
async def async_validator() -> None:
await self._validate_async()
return async_validator
def validate_and_handle(self) -> None:
"""
Validate buffer and handle the accept action.
"""
valid = self.validate(set_cursor=True)
# When the validation succeeded, accept the input.
if valid:
if self.accept_handler:
keep_text = self.accept_handler(self)
else:
keep_text = False
self.append_to_history()
if not keep_text:
self.reset()
_T = TypeVar("_T", bound=Callable[..., Coroutine[Any, Any, None]])
def _only_one_at_a_time(coroutine: _T) -> _T:
"""
Decorator that only starts the coroutine only if the previous call has
finished. (Used to make sure that we have only one autocompleter, auto
suggestor and validator running at a time.)
When the coroutine raises `_Retry`, it is restarted.
"""
running = False
@wraps(coroutine)
async def new_coroutine(*a: Any, **kw: Any) -> Any:
nonlocal running
# Don't start a new function, if the previous is still in progress.
if running:
return
running = True
try:
while True:
try:
await coroutine(*a, **kw)
except _Retry:
continue
else:
return None
finally:
running = False
return cast(_T, new_coroutine)
| Buffer |
python | tensorflow__tensorflow | tensorflow/compiler/tests/async_comp_test.py | {
"start": 1796,
"end": 3510
} | class ____(test.TestCase):
# Asynchrobnous compilation uses the existing fallback path and existing
# compiler. This test only tests that asynchronous compilation is performed.
def testAsyncCompilationJit(self):
@function.Defun(compiled=True)
def CompiledFunction(x):
return math_ops.log(x)
with session_lib.Session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = CompiledFunction(x)
run_metadata = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [0.] * 60},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
# For The first iteration, the fall back path is chosen.
hasXlaRunOp = MetadataHasXlaRunOp(run_metadata)
self.assertFalse(hasXlaRunOp)
# Execute the session until after asynchronous compilation is finished
# and the compiled cluster has been executed once.
while (not hasXlaRunOp):
run_metadata = config_pb2.RunMetadata()
sess.run(
y,
feed_dict={x: [0.] * 60},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
hasXlaRunOp = MetadataHasXlaRunOp(run_metadata)
if __name__ == "__main__":
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_async_compilation=true " +
"--tf_xla_enable_lazy_compilation=true " +
os.environ.get("TF_XLA_FLAGS", ""))
# This test is using Tensorflow sessions which are not compatible with eager
# mode.
ops.disable_eager_execution()
test.main()
| AsyncCompilationTest |
python | pypa__pip | src/pip/_internal/index/package_finder.py | {
"start": 3442,
"end": 3709
} | class ____(enum.Enum):
candidate = enum.auto()
different_project = enum.auto()
yanked = enum.auto()
format_unsupported = enum.auto()
format_invalid = enum.auto()
platform_mismatch = enum.auto()
requires_python_mismatch = enum.auto()
| LinkType |
python | numpy__numpy | numpy/distutils/command/sdist.py | {
"start": 223,
"end": 733
} | class ____(old_sdist):
def add_defaults (self):
old_sdist.add_defaults(self)
dist = self.distribution
if dist.has_data_files():
for data in dist.data_files:
self.filelist.extend(get_data_files(data))
if dist.has_headers():
headers = []
for h in dist.headers:
if isinstance(h, str): headers.append(h)
else: headers.append(h[1])
self.filelist.extend(headers)
return
| sdist |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_to_timestamp.py | {
"start": 445,
"end": 5973
} | class ____:
def test_to_timestamp(self, frame_or_series):
K = 5
index = period_range(freq="Y", start="1/1/2001", end="12/1/2009")
obj = DataFrame(
np.random.default_rng(2).standard_normal((len(index), K)),
index=index,
columns=["A", "B", "C", "D", "E"],
)
obj["mix"] = "a"
obj = tm.get_obj(obj, frame_or_series)
exp_index = date_range("1/1/2001", end="12/31/2009", freq="YE-DEC", unit="ns")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
result = obj.to_timestamp("D", "end")
tm.assert_index_equal(result.index, exp_index)
tm.assert_numpy_array_equal(result.values, obj.values)
if frame_or_series is Series:
assert result.name == "A"
exp_index = date_range("1/1/2001", end="1/1/2009", freq="YS-JAN", unit="ns")
result = obj.to_timestamp("D", "start")
tm.assert_index_equal(result.index, exp_index)
result = obj.to_timestamp(how="start")
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23)
result = obj.to_timestamp("H", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = obj.to_timestamp("T", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
result = obj.to_timestamp("S", "end")
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
def test_to_timestamp_columns(self):
K = 5
index = period_range(freq="Y", start="1/1/2001", end="12/1/2009")
df = DataFrame(
np.random.default_rng(2).standard_normal((len(index), K)),
index=index,
columns=["A", "B", "C", "D", "E"],
)
df["mix"] = "a"
# columns
df = df.T
exp_index = date_range("1/1/2001", end="12/31/2009", freq="YE-DEC", unit="ns")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
result = df.to_timestamp("D", "end", axis=1)
tm.assert_index_equal(result.columns, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range("1/1/2001", end="1/1/2009", freq="YS-JAN", unit="ns")
result = df.to_timestamp("D", "start", axis=1)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23)
result = df.to_timestamp("H", "end", axis=1)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp("min", "end", axis=1)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.columns, exp_index)
result = df.to_timestamp("S", "end", axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.columns, exp_index)
result1 = df.to_timestamp("5min", axis=1)
result2 = df.to_timestamp("min", axis=1)
expected = date_range("2001-01-01", "2009-01-01", freq="YS", unit="ns")
assert isinstance(result1.columns, DatetimeIndex)
assert isinstance(result2.columns, DatetimeIndex)
tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
assert result1.columns.freqstr == "YS-JAN"
assert result2.columns.freqstr == "YS-JAN"
def test_to_timestamp_invalid_axis(self):
index = period_range(freq="Y", start="1/1/2001", end="12/1/2009")
obj = DataFrame(
np.random.default_rng(2).standard_normal((len(index), 5)), index=index
)
# invalid axis
with pytest.raises(ValueError, match="axis"):
obj.to_timestamp(axis=2)
def test_to_timestamp_hourly(self, frame_or_series):
index = period_range(freq="h", start="1/1/2001", end="1/2/2001")
obj = Series(1, index=index, name="foo")
if frame_or_series is not Series:
obj = obj.to_frame()
exp_index = date_range(
"1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="h", unit="ns"
)
result = obj.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
if frame_or_series is Series:
assert result.name == "foo"
def test_to_timestamp_raises(self, index, frame_or_series):
# GH#33327
obj = frame_or_series(index=index, dtype=object)
if not isinstance(index, PeriodIndex):
msg = f"unsupported Type {type(index).__name__}"
with pytest.raises(TypeError, match=msg):
obj.to_timestamp()
| TestToTimestamp |
python | mwaskom__seaborn | tests/test_miscplot.py | {
"start": 724,
"end": 914
} | class ____:
@_network(url="https://github.com/mwaskom/seaborn-data")
def test_dogplot(self):
misc.dogplot()
ax = plt.gca()
assert len(ax.images) == 1
| TestDogPlot |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 34551,
"end": 35555
} | class ____(WrapperLine):
wrapper: PythonWrapperCodegen
node: ir.ScatterFallback
def codegen(self, code: IndentedBuffer) -> None:
node = self.node
assert ir.is_node_sequence(node.inputs)
if node.src_is_tensor:
(x, index, src) = (t.codegen_reference() for t in node.inputs)
else:
(x, index) = (t.codegen_reference() for t in node.inputs)
src = node.constant_args[1]
device = d.type if (d := node.get_device()) else V.graph.device_type
self.wrapper._generate_scatter_fallback(
x,
[x, node.constant_args[0], index, src],
node.cpp_kernel_name,
node.python_kernel_name,
node.src_is_tensor,
node.kwargs["reduce"],
node.codegen_kwargs(),
device,
)
def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:
return converter._generate_scatter_fallback
@dataclasses.dataclass
| ScatterFallbackLine |
python | huggingface__transformers | src/transformers/models/decision_transformer/modeling_decision_transformer.py | {
"start": 27437,
"end": 27887
} | class ____(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: DecisionTransformerConfig
base_model_prefix = "decision_transformer"
main_input_name = "states"
supports_gradient_checkpointing = False
@auto_docstring(
custom_intro="""
The Decision Transformer Model
"""
)
| DecisionTransformerPreTrainedModel |
python | django__django | django/http/multipartparser.py | {
"start": 22148,
"end": 27423
} | class ____:
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b"".join(chunks)
boundary = self._find_boundary(chunk)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data):
"""
Find a multipart boundary in data.
Should no boundary exist in the data, return None. Otherwise, return
a tuple containing the indices of the following:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last : last + 1] == b"\n":
end -= 1
last = max(0, end - 1)
if data[last : last + 1] == b"\r":
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""Exhaust an iterator or stream."""
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
collections.deque(iterator, maxlen=0) # consume iterator quickly.
def parse_boundary_stream(stream, max_header_size):
"""
Parse one and exactly one stream that encapsulates a boundary.
"""
# Look for the end of headers and if not found extend the search to double
# the size up to the MAX_TOTAL_HEADER_SIZE.
headers_chunk_size = 1024
while True:
if headers_chunk_size > max_header_size:
raise MultiPartParserError("Request max total header size exceeded.")
# Stream at beginning of header, look for end of header and parse it if
# found. The header must fit within one chunk.
chunk = stream.read(headers_chunk_size)
# 'find' returns the top of these four bytes, so munch them later to
# prevent them from polluting the payload.
header_end = chunk.find(b"\r\n\r\n")
if header_end != -1:
break
# Find no header, mark this fact and pass on the stream verbatim.
stream.unget(chunk)
# No more data to read.
if len(chunk) < headers_chunk_size:
return (RAW, {}, stream)
# Double the chunk size.
headers_chunk_size *= 2
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4 :])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b"\r\n"):
try:
header_name, value_and_params = line.decode().split(":", 1)
name = header_name.lower().rstrip(" ")
value, params = parse_header_parameters(value_and_params.lstrip(" "))
params = {k: v.encode() for k, v in params.items()}
except ValueError: # Invalid header.
continue
if name == "content-disposition":
TYPE = FIELD
if params.get("filename"):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
| BoundaryIter |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/argsort.py | {
"start": 189,
"end": 2642
} | class ____(Operator):
"""Operator for torch.argsort() operation."""
def __init__(self):
"""Initialize ArgsortOperator."""
super().__init__("argsort")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.argsort"
def can_produce(self, output_spec: Spec) -> bool:
"""Argsort can produce tensor outputs with integer dtype (long)."""
if not isinstance(output_spec, TensorSpec):
return False
# argsort returns indices, so it must be integer type (long)
return output_spec.dtype == torch.long and len(output_spec.size) > 0
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input spec for argsort operation.
torch.argsort(input, dim=-1, descending=False) returns a tensor with:
- Same shape as input
- dtype is torch.long (indices)
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ArgsortOperator can only produce TensorSpec outputs")
# Input tensor has the same shape as output but can have any numeric dtype
input_size = output_spec.size
# Generate a valid stride for the input
input_stride = fuzz_valid_stride(input_size)
# Choose a random float dtype for input (argsort works on numeric types)
# Using float32 as a reasonable default
input_dtype = torch.float32
return [TensorSpec(size=input_size, stride=input_stride, dtype=input_dtype)]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for argsort operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ArgsortOperator can only produce TensorSpec outputs")
if len(input_names) != 1:
raise ValueError("ArgsortOperator requires exactly one input")
# Randomly choose a dimension to sort along
# Default to -1 (last dimension) as it's most common
if len(output_spec.size) > 1:
dim = random.randint(-len(output_spec.size), len(output_spec.size) - 1)
else:
dim = 0
# Randomly choose ascending or descending order
descending = random.choice([True, False])
return f"{output_name} = torch.argsort({input_names[0]}, dim={dim}, descending={descending})"
| ArgsortOperator |
python | ray-project__ray | rllib/algorithms/tests/test_algorithm_save_load_checkpoint_learner.py | {
"start": 3207,
"end": 4788
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_save_and_restore(self):
for algo_name in algorithms_and_configs:
config = algorithms_and_configs[algo_name]
with tempfile.TemporaryDirectory() as tmpdir:
# create an algorithm, checkpoint it, then train for 2 iterations
ray.get(save_and_train.remote(config, "CartPole-v1", tmpdir))
# load that checkpoint into a new algorithm and train for 2
# iterations
results_algo_2 = ray.get( # noqa
load_and_train.remote(config, "CartPole-v1", tmpdir)
)
# load that checkpoint into another new algorithm and train for 2
# iterations
results_algo_3 = ray.get( # noqa
load_and_train.remote(config, "CartPole-v1", tmpdir)
)
# check that the results are the same across loaded algorithms
# they won't be the same as the first algorithm since the random
# state that is used for each algorithm is not preserved across
# checkpoints.
# TODO (sven): Uncomment once seeding works on EnvRunners.
# check(results_algo_3, results_algo_2)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestAlgorithmWithLearnerSaveAndRestore |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/conflict_parent/package.py | {
"start": 217,
"end": 879
} | class ____(Package):
homepage = "https://github.com/tgamblin/callpath"
url = "http://github.com/tgamblin/callpath-1.0.tar.gz"
version("0.8", md5="0123456789abcdef0123456789abcdef")
version("0.9", md5="0123456789abcdef0123456789abcdef")
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("conflict")
depends_on("c", type="build")
conflicts("^conflict~foo", when="@0.9")
def install(self, spec, prefix):
configure("--prefix=%s" % prefix)
make()
make("install")
def setup_run_environment(self, env: EnvironmentModifications) -> None:
env.set("FOOBAR", self.name)
| ConflictParent |
python | pyinstaller__pyinstaller | bootloader/waflib/Build.py | {
"start": 24842,
"end": 25061
} | class ____(BuildContext):
'''installs the targets on the system'''
cmd = 'install'
def __init__(self, **kw):
super(InstallContext, self).__init__(**kw)
self.is_install = INSTALL
| InstallContext |
python | jazzband__django-oauth-toolkit | tests/test_mixins.py | {
"start": 3937,
"end": 6957
} | class ____(BaseTest):
def test_options_shall_pass(self):
class TestView(ProtectedResourceMixin, View):
server_class = Server
validator_class = OAuth2Validator
request = self.request_factory.options("/fake-req")
view = TestView.as_view()
response = view(request)
self.assertEqual(response.status_code, 200)
@pytest.fixture
def oidc_only_view():
class TView(OIDCOnlyMixin, View):
def get(self, *args, **kwargs):
return HttpResponse("OK")
return TView.as_view()
@pytest.fixture
def oidc_logout_only_view():
class TView(OIDCLogoutOnlyMixin, View):
def get(self, *args, **kwargs):
return HttpResponse("OK")
return TView.as_view()
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RW)
def test_oidc_only_mixin_oidc_enabled(oauth2_settings, rf, oidc_only_view):
assert oauth2_settings.OIDC_ENABLED
rsp = oidc_only_view(rf.get("/"))
assert rsp.status_code == 200
assert rsp.content.decode("utf-8") == "OK"
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RP_LOGOUT)
def test_oidc_logout_only_mixin_oidc_enabled(oauth2_settings, rf, oidc_only_view):
assert oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ENABLED
rsp = oidc_only_view(rf.get("/"))
assert rsp.status_code == 200
assert rsp.content.decode("utf-8") == "OK"
def test_oidc_only_mixin_oidc_disabled_debug(oauth2_settings, rf, settings, oidc_only_view):
assert oauth2_settings.OIDC_ENABLED is False
settings.DEBUG = True
with pytest.raises(ImproperlyConfigured) as exc:
oidc_only_view(rf.get("/"))
assert "OIDC views are not enabled" in str(exc.value)
def test_oidc_logout_only_mixin_oidc_disabled_debug(oauth2_settings, rf, settings, oidc_logout_only_view):
assert oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ENABLED is False
settings.DEBUG = True
with pytest.raises(ImproperlyConfigured) as exc:
oidc_logout_only_view(rf.get("/"))
assert str(exc.value) == OIDCLogoutOnlyMixin.debug_error_message
def test_oidc_only_mixin_oidc_disabled_no_debug(oauth2_settings, rf, settings, oidc_only_view, caplog):
assert oauth2_settings.OIDC_ENABLED is False
settings.DEBUG = False
with caplog.at_level(logging.WARNING, logger="oauth2_provider"):
rsp = oidc_only_view(rf.get("/"))
assert rsp.status_code == 404
assert len(caplog.records) == 1
assert "OIDC views are not enabled" in caplog.records[0].message
def test_oidc_logout_only_mixin_oidc_disabled_no_debug(
oauth2_settings, rf, settings, oidc_logout_only_view, caplog
):
assert oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ENABLED is False
settings.DEBUG = False
with caplog.at_level(logging.WARNING, logger="oauth2_provider"):
rsp = oidc_logout_only_view(rf.get("/"))
assert rsp.status_code == 404
assert len(caplog.records) == 1
assert caplog.records[0].message == OIDCLogoutOnlyMixin.debug_error_message
| TestProtectedResourceMixin |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_reflection.py | {
"start": 91427,
"end": 95024
} | class ____(fixtures.TestBase):
class NTL:
def __init__(self, enums, domains):
self.enums = enums
self.domains = domains
class CustomType:
def __init__(self, arg1=None, arg2=None, collation=None):
self.arg1 = arg1
self.arg2 = arg2
self.collation = collation
ischema_names = None
def setup_test(self):
ischema_names = postgresql.PGDialect.ischema_names
postgresql.PGDialect.ischema_names = ischema_names.copy()
self.ischema_names = ischema_names
def teardown_test(self):
postgresql.PGDialect.ischema_names = self.ischema_names
self.ischema_names = None
def _assert_reflected(self, dialect):
for sch, args in [
("my_custom_type", (None, None)),
("my_custom_type()", (None, None)),
("my_custom_type(ARG1)", ("ARG1", None)),
("my_custom_type(ARG1, ARG2)", ("ARG1", "ARG2")),
]:
row_dict = {
"name": "colname",
"table_name": "tblname",
"format_type": sch,
"default": None,
"not_null": False,
"collation": "cc" if sch == "my_custom_type()" else None,
"comment": None,
"generated": "",
"identity_options": None,
}
column_info = dialect._get_columns_info(
[row_dict], self.NTL({}, {}), "public"
)
assert ("public", "tblname") in column_info
column_info = column_info[("public", "tblname")]
assert len(column_info) == 1
column_info = column_info[0]
assert isinstance(column_info["type"], self.CustomType)
eq_(column_info["type"].arg1, args[0])
eq_(column_info["type"].arg2, args[1])
if sch == "my_custom_type()":
eq_(column_info["type"].collation, "cc")
else:
eq_(column_info["type"].collation, None)
def test_clslevel(self):
postgresql.PGDialect.ischema_names["my_custom_type"] = self.CustomType
dialect = postgresql.PGDialect()
self._assert_reflected(dialect)
def test_instancelevel(self):
dialect = postgresql.PGDialect()
dialect.ischema_names = dialect.ischema_names.copy()
dialect.ischema_names["my_custom_type"] = self.CustomType
self._assert_reflected(dialect)
def test_no_format_type(self):
"""test #8748"""
dialect = postgresql.PGDialect()
dialect.ischema_names = dialect.ischema_names.copy()
dialect.ischema_names["my_custom_type"] = self.CustomType
with expect_warnings(
r"PostgreSQL format_type\(\) returned NULL for column 'colname'"
):
row_dict = {
"name": "colname",
"table_name": "tblname",
"format_type": None,
"default": None,
"not_null": False,
"collation": None,
"comment": None,
"generated": "",
"identity_options": None,
}
column_info = dialect._get_columns_info(
[row_dict], self.NTL({}, {}), "public"
)
assert ("public", "tblname") in column_info
column_info = column_info[("public", "tblname")]
assert len(column_info) == 1
column_info = column_info[0]
assert isinstance(column_info["type"], NullType)
| CustomTypeReflectionTest |
python | pytorch__pytorch | test/dynamo/mock_modules/mock_module2.py | {
"start": 72,
"end": 295
} | class ____:
def __init__(self, x, y):
self.x = x
self.y = y
def method2(self, x):
return mock_module3.method1([], x)
def method1(x, y):
torch.ones(1, 1)
x.append(y)
return x
| Class1 |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 48183,
"end": 48735
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("labelable_id", "label_ids", "client_mutation_id")
labelable_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="labelableId"
)
label_ids = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(ID))),
graphql_name="labelIds",
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| AddLabelsToLabelableInput |
python | great-expectations__great_expectations | tests/metrics/test_metric.py | {
"start": 2960,
"end": 3553
} | class ____:
@pytest.mark.unit
def test_domain_kwarg_immutability_success(self):
column_values_above = ColumnValuesAbove(
column=COLUMN,
min_value=42,
)
with pytest.raises(TypeError):
column_values_above.column = "updated_column"
@pytest.mark.unit
def test_value_kwarg_immutability_success(self):
column_values_above = ColumnValuesAbove(
column=COLUMN,
min_value=42,
)
with pytest.raises(TypeError):
column_values_above.min_value = 42
| TestMetricImmutability |
python | PyCQA__pylint | pylint/reporters/ureports/nodes.py | {
"start": 3167,
"end": 3894
} | class ____(BaseLayout):
"""A section.
attributes :
* BaseLayout attributes
a title may also be given to the constructor, it'll be added
as a first element
a description may also be given to the constructor, it'll be added
as a first paragraph
"""
def __init__(
self,
title: str | None = None,
description: str | None = None,
children: Iterable[Text | str] = (),
) -> None:
super().__init__(children=children)
if description:
self.insert(0, Paragraph([Text(description)]))
if title:
self.insert(0, Title(children=(title,)))
self.report_id: str = "" # Used in ReportHandlerMixin.make_reports
| Section |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/variables/resource_variable_ops_test.py | {
"start": 3307,
"end": 4012
} | class ____(extension_type.ExtensionType):
v: resource_variable_ops.ResourceVariable
__composite_gradient__ = CompositeVariableGradient()
def _eager_safe_var_handle_op(*args, **kwargs):
# When running in eager mode the `shared_name` should be set to the
# `anonymous_name` to avoid spurious sharing issues. The runtime generates a
# unique name on our behalf when the reserved `anonymous_name` is used as the
# `shared_name`.
if context.executing_eagerly() and "shared_name" not in kwargs:
kwargs["shared_name"] = context.anonymous_name()
return resource_variable_ops.var_handle_op(*args, **kwargs)
@test_util.with_eager_op_as_function
@test_util.with_control_flow_v2
| CompositeVariable |
python | google__pytype | pytype/pytd/optimize.py | {
"start": 2719,
"end": 3071
} | class ____(visitors.Visitor):
"""Remove duplicate or redundant entries in union types.
For example, this transforms
a: Union[int, int]
b: Union[int, Any]
c: Union[int, int, float]
to
a: int
b: Any
c: Union[int, float]
"""
def VisitUnionType(self, union):
return pytd_utils.JoinTypes(union.type_list)
| SimplifyUnions |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass9.py | {
"start": 790,
"end": 860
} | class ____(metaclass=Meta1, param2="", param1=1, param4=3): ...
| Class1_5 |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-bge-m3/llama_index/indices/managed/bge_m3/base.py | {
"start": 522,
"end": 8039
} | class ____(BaseIndex[IndexDict]):
"""
Store for BGE-M3 with PLAID indexing.
BGE-M3 is a multilingual embedding model with multi-functionality:
Dense retrieval, Sparse retrieval and Multi-vector retrieval.
Parameters
----------
index_path: directory containing PLAID index files.
model_name: BGE-M3 hugging face model name.
Default: "BAAI/bge-m3".
show_progress: whether to show progress bar when building index.
Default: False. noop for BGE-M3 for now.
doc_maxlen: max document length. Default: 120.
query_maxlen: max query length. Default: 60.
"""
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IndexDict] = None,
storage_context: Optional[StorageContext] = None,
model_name: str = "BAAI/bge-m3",
index_name: str = "",
show_progress: bool = False,
pooling_method: str = "cls",
normalize_embeddings: bool = True,
use_fp16: bool = False,
batch_size: int = 32,
doc_maxlen: int = 8192,
query_maxlen: int = 8192,
weights_for_different_modes: List[float] = None,
**kwargs: Any,
) -> None:
self.index_path = "storage/bge_m3_index"
self.index_name = index_name
self.batch_size = batch_size
self.doc_maxlen = doc_maxlen
self.query_maxlen = query_maxlen
self.weights_for_different_modes = weights_for_different_modes
self._multi_embed_store = None
self._docs_pos_to_node_id: Dict[int, str] = {}
try:
from FlagEmbedding import BGEM3FlagModel
except ImportError as exc:
raise ImportError(
"Please install FlagEmbedding to use this feature from the repo:",
"https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3",
) from exc
self.model = BGEM3FlagModel(
model_name,
pooling_method=pooling_method,
normalize_embeddings=normalize_embeddings,
use_fp16=use_fp16,
)
super().__init__(
nodes=nodes,
index_struct=index_struct,
index_name=index_name,
storage_context=storage_context,
show_progress=show_progress,
objects=objects,
**kwargs,
)
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
raise NotImplementedError("BGEM3Index does not support insertion yet.")
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
raise NotImplementedError("BGEM3Index does not support deletion yet.")
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
from .retriever import BGEM3Retriever
return BGEM3Retriever(index=self, object_map=self._object_map, **kwargs)
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
raise NotImplementedError("BGEM3Index does not support ref_doc_info.")
def _build_index_from_nodes(
self, nodes: Sequence[BaseNode], **kwargs: Any
) -> IndexDict:
"""
Generate a PLAID index from the BGE-M3 checkpoint via its hugging face
model_name.
"""
index_struct = IndexDict()
docs_list = []
for i, node in enumerate(nodes):
docs_list.append(node.get_content())
self._docs_pos_to_node_id[i] = node.node_id
index_struct.add_node(node, text_id=str(i))
self._multi_embed_store = self.model.encode(
docs_list,
batch_size=self.batch_size,
max_length=self.doc_maxlen,
return_dense=True,
return_sparse=True,
return_colbert_vecs=True,
)
return index_struct
def persist(self, persist_dir: str) -> None:
# Check if the destination directory exists
if os.path.exists(persist_dir):
# Remove the existing destination directory
shutil.rmtree(persist_dir)
self._storage_context.persist(persist_dir=persist_dir)
# Save _multi_embed_store
# Use pickle protocol 4 which supports large objects better
with open(Path(persist_dir) / "multi_embed_store.pkl", "wb") as f:
pickler = pickle.Pickler(f, protocol=pickle.HIGHEST_PROTOCOL)
pickler.dump(self._multi_embed_store)
@classmethod
def load_from_disk(
cls,
persist_dir: str,
model_name: str = "BAAI/bge-m3",
index_name: str = "",
weights_for_different_modes: List[float] = None,
) -> "BGEM3Index":
sc = StorageContext.from_defaults(persist_dir=persist_dir)
index = BGEM3Index(
model_name=model_name,
index_name=index_name,
index_struct=sc.index_store.index_structs()[0],
storage_context=sc,
weights_for_different_modes=weights_for_different_modes,
)
docs_pos_to_node_id = {
int(k): v for k, v in index.index_struct.nodes_dict.items()
}
index._docs_pos_to_node_id = docs_pos_to_node_id
index._multi_embed_store = pickle.load(
open(Path(persist_dir) / "multi_embed_store.pkl", "rb")
)
return index
def query(self, query_str: str, top_k: int = 10) -> List[NodeWithScore]:
"""
Query the BGE-M3 + Plaid store.
Returns: list of NodeWithScore.
"""
query_embed = self.model.encode(
query_str,
batch_size=self.batch_size,
max_length=self.query_maxlen,
return_dense=True,
return_sparse=True,
return_colbert_vecs=True,
)
dense_scores = np.matmul(
query_embed["dense_vecs"], self._multi_embed_store["dense_vecs"].T
)
sparse_scores = np.array(
[
self.model.compute_lexical_matching_score(
query_embed["lexical_weights"], doc_lexical_weights
)
for doc_lexical_weights in self._multi_embed_store["lexical_weights"]
]
)
colbert_scores = np.array(
[
self.model.colbert_score(
query_embed["colbert_vecs"], doc_colbert_vecs
).item()
for doc_colbert_vecs in self._multi_embed_store["colbert_vecs"]
]
)
if self.weights_for_different_modes is None:
weights_for_different_modes = [1.0, 1.0, 1.0]
weight_sum = 3.0
else:
weights_for_different_modes = self.weights_for_different_modes
weight_sum = sum(weights_for_different_modes)
combined_scores = (
dense_scores * weights_for_different_modes[0]
+ sparse_scores * weights_for_different_modes[1]
+ colbert_scores * weights_for_different_modes[2]
) / weight_sum
topk_indices = np.argsort(combined_scores)[::-1][:top_k]
topk_scores = [combined_scores[idx] for idx in topk_indices]
node_doc_ids = [self._docs_pos_to_node_id[idx] for idx in topk_indices]
nodes = self.docstore.get_nodes(node_doc_ids)
nodes_with_score = []
for node, score in zip(nodes, topk_scores):
nodes_with_score.append(NodeWithScore(node=node, score=score))
return nodes_with_score
| BGEM3Index |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed1.py | {
"start": 1358,
"end": 1519
} | class ____(TypedDict, closed=True, extra_items=str):
pass
# This should generate an error because "closed" and
# "extra_items" cannot both be specified.
| BadTD3 |
python | huggingface__transformers | tests/generation/test_utils.py | {
"start": 239269,
"end": 247278
} | class ____(unittest.TestCase):
def setUp(self):
checkpoint = "EleutherAI/pythia-160m-deduped"
self.assistant_model = AutoModelForCausalLM.from_pretrained(checkpoint)
self.assistant_model.generation_config.assistant_confidence_threshold = 0.4
self.model_kwargs = {}
self.input_ids = torch.randint(1, 10, (1, 9))
self.candidate_generator = AssistedCandidateGenerator(
input_ids=self.input_ids,
assistant_model=self.assistant_model,
generation_config=self.assistant_model.generation_config,
model_kwargs=self.model_kwargs,
)
self.candidate_generator.probs = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
self.original_probs = self.candidate_generator.probs
self.original_threshold = self.assistant_model.generation_config.assistant_confidence_threshold
def assert_no_sklearn(self):
with patch("transformers.generation.candidate_generator.is_sklearn_available", lambda: False):
self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches)
self.assertEqual(self.candidate_generator.matches, self.original_matches)
self.assertEqual(self.candidate_generator.probs, self.original_probs)
self.assertEqual(
self.assistant_model.generation_config.assistant_confidence_threshold, self.original_threshold
)
@parameterized.expand([(is_sklearn_available(),), (False,)])
def test_update_candidate_strategy_no_matches_short(self, sklearn_available):
self.original_matches = []
self.candidate_generator.matches = self.original_matches
self.num_matches = 0
if sklearn_available:
self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches)
self.assertEqual(self.candidate_generator.matches, [0])
self.assertEqual(self.candidate_generator.probs, [0.9])
self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.4)
else:
self.assert_no_sklearn()
@parameterized.expand([(is_sklearn_available(),), (False,)])
def test_update_candidate_strategy_with_mix_matches_3(self, sklearn_available):
self.original_matches = [1, 0, 1, 0, 1]
self.candidate_generator.matches = self.original_matches
self.num_matches = 3
if sklearn_available:
self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches)
self.assertEqual(self.candidate_generator.matches, [1, 0, 1, 0, 1, 1, 1, 1, 0])
self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.2)
else:
self.assert_no_sklearn()
@parameterized.expand([(is_sklearn_available(),), (False,)])
def test_update_candidate_strategy_with_matches_4(self, sklearn_available):
self.original_matches = [1, 1, 1, 1, 1]
self.candidate_generator.matches = self.original_matches
self.num_matches = 4
if sklearn_available:
self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches)
self.assertEqual(self.candidate_generator.matches, [1, 1, 1, 1, 1, 1, 1, 1, 1])
self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.4)
else:
self.assert_no_sklearn()
@parameterized.expand([(is_sklearn_available(),), (False,)])
def test_update_candidate_strategy_with_matches_3(self, sklearn_available):
self.original_matches = [1, 1, 1, 1, 1]
self.candidate_generator.matches = self.original_matches
self.num_matches = 3
if sklearn_available:
self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches)
self.assertEqual(self.candidate_generator.matches, [1, 1, 1, 1, 1, 1, 1, 1, 0])
self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.2)
else:
self.assert_no_sklearn()
@parameterized.expand([(is_sklearn_available(),), (False,)])
def test_update_candidate_strategy_with_matches_2(self, sklearn_available):
self.original_matches = [1, 1, 1, 1, 1]
self.candidate_generator.matches = self.original_matches
self.num_matches = 2
if sklearn_available:
self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches)
self.assertEqual(self.candidate_generator.matches, [1, 1, 1, 1, 1, 1, 1, 0])
self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2])
self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.3)
else:
self.assert_no_sklearn()
@parameterized.expand([(is_sklearn_available(),), (False,)])
def test_update_candidate_strategy_with_matches_1(self, sklearn_available):
self.original_matches = [1, 1, 1, 1, 1]
self.candidate_generator.matches = self.original_matches
self.num_matches = 1
if sklearn_available:
self.candidate_generator.update_candidate_strategy(self.input_ids, None, self.num_matches)
self.assertEqual(self.candidate_generator.matches, [1, 1, 1, 1, 1, 1, 0])
self.assertEqual(self.candidate_generator.probs, [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3])
self.assertEqual(self.assistant_model.generation_config.assistant_confidence_threshold, 0.4)
else:
self.assert_no_sklearn()
def has_similar_generate_outputs(output_1, output_2, atol=1e-5, rtol=1e-5) -> bool:
"""
Returns a boolean indicating whether a pair of generate outputs are similar. Two `generate` call outputs are
considered similar in the following situations:
1. The sequences are the same
2. The sequences are different, but the scores up to (and including) the first mismatch are nearly identical
Args:
output_1 (`GenerateOutput`): The first `generate` call output.
output_2 (`GenerateOutput`): The second `generate` call output.
atol (`float`, *optional*, defaults to 1e-5): The absolute tolerance for the scores.
rtol (`float`, *optional*, defaults to 1e-5): The relative tolerance for the scores.
Returns:
A boolean indicating whether the two generate outputs are similar.
"""
# scores doesn't include data regarding decoder input tokens
decoder_input_length = output_1.sequences.shape[1] - len(output_1.scores)
output_matches = output_1.sequences == output_2.sequences
has_matching_outputs = output_matches.all()
has_matching_scores = None
if not has_matching_outputs:
for batch_idx in range(output_1.sequences.shape[0]):
batch_matches = output_matches[batch_idx]
if batch_matches.all():
continue
first_mismatch_idx = batch_matches.int().argmin() # gets the index of the first False
first_mismatch_idx -= decoder_input_length
output_1_first_mismatch_scores = output_1.scores[first_mismatch_idx][batch_idx]
output_2_first_mismatch_scores = output_2.scores[first_mismatch_idx][batch_idx]
has_matching_scores = torch.allclose(
output_1_first_mismatch_scores, output_2_first_mismatch_scores, atol=atol, rtol=rtol
)
if not has_matching_scores:
break
return has_matching_outputs or has_matching_scores
| TestAssistedCandidateGeneratorUpdateStrategy |
python | sympy__sympy | sympy/physics/quantum/state.py | {
"start": 14222,
"end": 16818
} | class ____(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
| TimeDepState |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/unit_tests/integration/test_report_based_streams.py | {
"start": 5808,
"end": 20800
} | class ____:
@staticmethod
def _read(stream_name: str, config_: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return read_output(
config_builder=config_,
stream_name=stream_name,
sync_mode=SyncMode.full_refresh,
expecting_exception=expecting_exception,
)
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_report_when_read_then_return_records(self, stream_name: str, data_format: str, http_mocker: HttpMocker) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), _create_report_response(_REPORT_ID))
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
_check_report_status_response(stream_name, report_document_id=_REPORT_DOCUMENT_ID),
)
http_mocker.get(
_get_document_download_url_request(_REPORT_DOCUMENT_ID).build(),
_get_document_download_url_response(_DOCUMENT_DOWNLOAD_URL, _REPORT_DOCUMENT_ID),
)
http_mocker.get(
_download_document_request(_DOCUMENT_DOWNLOAD_URL).build(),
_download_document_response(stream_name, data_format=data_format),
)
output = self._read(stream_name, config())
assert len(output.records) == DEFAULT_EXPECTED_NUMBER_OF_RECORDS
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_compressed_report_when_read_then_return_records(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), _create_report_response(_REPORT_ID))
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
_check_report_status_response(stream_name, report_document_id=_REPORT_DOCUMENT_ID),
)
http_mocker.get(
_get_document_download_url_request(_REPORT_DOCUMENT_ID).build(),
_get_document_download_url_response(_DOCUMENT_DOWNLOAD_URL, _REPORT_DOCUMENT_ID, compressed=True),
)
# a workaround to pass compressed document to the mocked response
document_request = _download_document_request(_DOCUMENT_DOWNLOAD_URL).build()
document_response = _download_document_response(stream_name, data_format=data_format, compressed=True)
document_request_matcher = HttpRequestMatcher(document_request, minimum_number_of_expected_match=1)
# http_mocker._matchers.append(document_request_matcher)
http_mocker._mocker.get(
requests_mock.ANY,
additional_matcher=http_mocker._matches_wrapper(document_request_matcher),
response_list=[{"content": document_response.body, "status_code": document_response.status_code}],
)
output = self._read(stream_name, config())
assert len(output.records) == DEFAULT_EXPECTED_NUMBER_OF_RECORDS
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_http_status_500_then_200_when_create_report_then_retry_and_return_records(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(
_create_report_request(stream_name).build(),
[response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR), _create_report_response(_REPORT_ID)],
)
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
_check_report_status_response(stream_name, report_document_id=_REPORT_DOCUMENT_ID),
)
http_mocker.get(
_get_document_download_url_request(_REPORT_DOCUMENT_ID).build(),
_get_document_download_url_response(_DOCUMENT_DOWNLOAD_URL, _REPORT_DOCUMENT_ID),
)
http_mocker.get(
_download_document_request(_DOCUMENT_DOWNLOAD_URL).build(),
_download_document_response(stream_name, data_format=data_format),
)
output = self._read(stream_name, config())
assert len(output.records) == DEFAULT_EXPECTED_NUMBER_OF_RECORDS
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_http_status_500_then_200_when_retrieve_report_then_retry_and_return_records(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), _create_report_response(_REPORT_ID))
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
[
response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR),
_check_report_status_response(stream_name, report_document_id=_REPORT_DOCUMENT_ID),
],
)
http_mocker.get(
_get_document_download_url_request(_REPORT_DOCUMENT_ID).build(),
_get_document_download_url_response(_DOCUMENT_DOWNLOAD_URL, _REPORT_DOCUMENT_ID),
)
http_mocker.get(
_download_document_request(_DOCUMENT_DOWNLOAD_URL).build(),
_download_document_response(stream_name, data_format=data_format),
)
output = self._read(stream_name, config())
assert len(output.records) == DEFAULT_EXPECTED_NUMBER_OF_RECORDS
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_http_status_500_then_200_when_get_document_url_then_retry_and_return_records(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), _create_report_response(_REPORT_ID))
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
_check_report_status_response(stream_name, report_document_id=_REPORT_DOCUMENT_ID),
)
http_mocker.get(
_get_document_download_url_request(_REPORT_DOCUMENT_ID).build(),
[
response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR),
_get_document_download_url_response(_DOCUMENT_DOWNLOAD_URL, _REPORT_DOCUMENT_ID),
],
)
http_mocker.get(
_download_document_request(_DOCUMENT_DOWNLOAD_URL).build(),
_download_document_response(stream_name, data_format=data_format),
)
output = self._read(stream_name, config())
assert len(output.records) == DEFAULT_EXPECTED_NUMBER_OF_RECORDS
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_http_status_500_then_200_when_download_document_then_retry_and_return_records(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), _create_report_response(_REPORT_ID))
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
_check_report_status_response(stream_name, report_document_id=_REPORT_DOCUMENT_ID),
)
http_mocker.get(
_get_document_download_url_request(_REPORT_DOCUMENT_ID).build(),
_get_document_download_url_response(_DOCUMENT_DOWNLOAD_URL, _REPORT_DOCUMENT_ID),
)
http_mocker.get(
_download_document_request(_DOCUMENT_DOWNLOAD_URL).build(),
[
response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR),
_download_document_response(stream_name, data_format=data_format),
],
)
output = self._read(stream_name, config())
assert len(output.records) == DEFAULT_EXPECTED_NUMBER_OF_RECORDS
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_report_access_forbidden_when_read_then_no_records_and_error_logged(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), response_with_status(status_code=HTTPStatus.FORBIDDEN))
output = self._read(stream_name, config())
message_on_access_forbidden = "Forbidden. You don't have permission to access this resource."
assert output.errors[0].trace.error.failure_type == FailureType.config_error
assert message_on_access_forbidden in output.errors[0].trace.error.message
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_report_status_cancelled_when_read_then_stream_completed_successfully_and_warn_about_cancellation(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), _create_report_response(_REPORT_ID))
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
_check_report_status_response(stream_name, processing_status=ReportProcessingStatus.CANCELLED),
)
message_on_report_cancelled = f"Exception while syncing stream {stream_name}"
output = self._read(stream_name, config())
assert_message_in_log_output(message=message_on_report_cancelled, entrypoint_output=output, log_level=Level.ERROR)
assert len(output.records) == 0
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_report_status_fatal_when_read_then_exception_raised(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), _create_report_response(_REPORT_ID))
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
_check_report_status_response(
stream_name, processing_status=ReportProcessingStatus.FATAL, report_document_id=_REPORT_DOCUMENT_ID
),
)
output = self._read(stream_name, config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
config_end_date = CONFIG_END_DATE
assert (
f"At least one job could not be completed for slice {{\\'start_time\\': \\'{CONFIG_START_DATE}\\', \\'end_time\\': \\'{config_end_date}\\'}}"
) in output.errors[-1].trace.error.message
@pytest.mark.parametrize(
("stream_name", "date_field", "expected_date_value"),
(
("GET_SELLER_FEEDBACK_DATA", "date", "2023-10-20"),
("GET_LEDGER_DETAIL_VIEW_DATA", "Date", "2023-11-21"),
("GET_LEDGER_SUMMARY_VIEW_DATA", "Date", "2023-12-22"),
),
)
@HttpMocker()
def test_given_report_with_incorrect_date_format_when_read_then_formatted(
self, stream_name: str, date_field: str, expected_date_value: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(_create_report_request(stream_name).build(), _create_report_response(_REPORT_ID))
http_mocker.get(
_check_report_status_request(_REPORT_ID).build(),
_check_report_status_response(stream_name, report_document_id=_REPORT_DOCUMENT_ID),
)
http_mocker.get(
_get_document_download_url_request(_REPORT_DOCUMENT_ID).build(),
_get_document_download_url_response(_DOCUMENT_DOWNLOAD_URL, _REPORT_DOCUMENT_ID),
)
http_mocker.get(_download_document_request(_DOCUMENT_DOWNLOAD_URL).build(), _download_document_response(stream_name))
output = self._read(stream_name, config())
assert len(output.records) == DEFAULT_EXPECTED_NUMBER_OF_RECORDS
assert output.records[0].record.data.get(date_field) == expected_date_value
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_http_error_500_on_create_report_when_read_then_no_records_and_error_logged(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
) -> None:
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
http_mocker.post(
_create_report_request(stream_name).build(),
response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR),
)
message_on_backoff_exception = "Giving up _send(...) after 6 tries"
output = self._read(stream_name, config())
assert list(filter(lambda error: error.trace.error.failure_type == FailureType.config_error, output.errors))
assert_message_in_log_output(message=message_on_backoff_exception, entrypoint_output=output, log_level=Level.ERROR)
@pytest.mark.parametrize(("stream_name", "data_format"), STREAMS)
@HttpMocker()
def test_given_http_error_not_support_account_id_of_type_vendor_when_read_then_no_records_and_error_logged(
self, stream_name: str, data_format: str, http_mocker: HttpMocker
):
http_mocker.clear_all_matchers()
mock_auth(http_mocker)
response_body = {
"errors": [
{
"code": "InvalidInput",
"message": "Report type 301 does not support account ID of type class com.amazon.partner.account.id.VendorGroupId.",
"details": "",
}
]
}
http_mocker.post(
_create_report_request(stream_name).build(),
response_with_status(status_code=HTTPStatus.BAD_REQUEST, body=response_body),
)
warning_message = (
"'POST' request to 'https://sellingpartnerapi-na.amazon.com/reports/2021-06-30/reports' failed with status code '400' and"
" error message: 'Report type 301 does not support account ID of type class com.amazon.partner.account.id.VendorGroupId.'."
)
output = self._read(stream_name, config())
assert list(filter(lambda error: error.trace.error.failure_type == FailureType.config_error, output.errors))
assert_message_in_log_output(message=warning_message, entrypoint_output=output, log_level=Level.ERROR)
@freezegun.freeze_time(NOW.isoformat())
| TestFullRefresh |
python | getsentry__sentry | src/sentry/monitors/endpoints/project_processing_errors_details.py | {
"start": 920,
"end": 2018
} | class ____(ProjectEndpoint):
permission_classes: tuple[type[BasePermission], ...] = (ProjectAlertRulePermission,)
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.CRONS
@extend_schema(
operation_id="Delete a processing error for a Monitor",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
MonitorParams.PROCESSING_ERROR_ID,
],
responses={
204: RESPONSE_NO_CONTENT,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(self, request: Request, project: Project, uuid: str) -> Response:
try:
parsed_uuid = UUID(uuid)
except ValueError:
raise ValidationError("Invalid UUID")
try:
delete_error(project, parsed_uuid)
except InvalidProjectError:
raise ValidationError("Invalid uuid for project")
return self.respond(status=204)
| ProjectProcessingErrorsDetailsEndpoint |
python | apache__airflow | providers/http/src/airflow/providers/http/triggers/http.py | {
"start": 2102,
"end": 6225
} | class ____(BaseTrigger):
"""
HttpTrigger run on the trigger worker.
:param http_conn_id: http connection id that has the base
API url i.e https://www.google.com/ and optional authentication credentials. Default
headers can also be specified in the Extra field in json format.
:param auth_type: The auth type for the service
:param method: the API method to be called
:param endpoint: Endpoint to be called, i.e. ``resource/v1/query?``.
:param headers: Additional headers to be passed through as a dict.
:param data: Payload to be uploaded or request parameters.
:param extra_options: Additional kwargs to pass when creating a request.
For example, ``run(json=obj)`` is passed as
``aiohttp.ClientSession().get(json=obj)``.
2XX or 3XX status codes
"""
def __init__(
self,
http_conn_id: str = "http_default",
auth_type: str | None = None,
method: str = "POST",
endpoint: str | None = None,
headers: dict[str, str] | None = None,
data: dict[str, Any] | str | None = None,
extra_options: dict[str, Any] | None = None,
):
super().__init__()
self.http_conn_id = http_conn_id
self.method = method
self.auth_type = deserialize_auth_type(auth_type)
self.endpoint = endpoint
self.headers = headers
self.data = data
self.extra_options = extra_options
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize HttpTrigger arguments and classpath."""
return (
"airflow.providers.http.triggers.http.HttpTrigger",
{
"http_conn_id": self.http_conn_id,
"method": self.method,
"auth_type": serialize_auth_type(self.auth_type),
"endpoint": self.endpoint,
"headers": self.headers,
"data": self.data,
"extra_options": self.extra_options,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make a series of asynchronous http calls via a http hook."""
hook = self._get_async_hook()
try:
response = await self._get_response(hook)
yield TriggerEvent(
{
"status": "success",
"response": base64.standard_b64encode(pickle.dumps(response)).decode("ascii"),
}
)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_async_hook(self) -> HttpAsyncHook:
return HttpAsyncHook(
method=self.method,
http_conn_id=self.http_conn_id,
auth_type=self.auth_type,
)
async def _get_response(self, hook):
async with aiohttp.ClientSession() as session:
client_response = await hook.run(
session=session,
endpoint=self.endpoint,
data=self.data,
headers=self.headers,
extra_options=self.extra_options,
)
response = await self._convert_response(client_response)
return response
@staticmethod
async def _convert_response(client_response: ClientResponse) -> requests.Response:
"""Convert aiohttp.client_reqrep.ClientResponse to requests.Response."""
response = requests.Response()
response._content = await client_response.read()
response.status_code = client_response.status
response.headers = CaseInsensitiveDict(client_response.headers)
response.url = str(client_response.url)
response.history = [await HttpTrigger._convert_response(h) for h in client_response.history]
response.encoding = client_response.get_encoding()
response.reason = str(client_response.reason)
cookies = RequestsCookieJar()
for k, v in client_response.cookies.items():
cookies.set(k, str(v)) # Convert Morsel to string
response.cookies = cookies
return response
| HttpTrigger |
python | getsentry__sentry | src/sentry/sentry_apps/api/serializers/sentry_app.py | {
"start": 1383,
"end": 2118
} | class ____(TypedDict):
allowedOrigins: list[str]
avatars: list[SentryAppAvatarSerializerResponse]
events: set[str]
featureData: list[str]
isAlertable: bool
metadata: str
name: str
schema: str
scopes: list[str]
slug: str
status: str
uuid: str
verifyInstall: bool
# Optional fields
author: NotRequired[str | None]
overview: NotRequired[str | None]
popularity: NotRequired[int | None]
redirectUrl: NotRequired[str | None]
webhookUrl: NotRequired[str | None]
clientSecret: NotRequired[str | None]
datePublished: NotRequired[datetime]
clientId: NotRequired[str]
owner: NotRequired[OwnerResponseField]
@register(SentryApp)
| SentryAppSerializerResponse |
python | pytorch__pytorch | test/quantization/core/test_quantized_op.py | {
"start": 6877,
"end": 156033
} | class ____(TestCase):
"""Helper function to test quantized activation functions."""
def _test_activation_function(self, X, fn_name, test_configs):
r"""
When writing a unit test for the activation function,
instead of specifying the test routines only applicable to the activation function itself,
you utilize the _test_activation_function that provides general testing.
To utilize the helper function, a test config must be provided.
A test config is a list that contains metadata about the quantized activation
functions that will be tested and how the tests need to be set up; it allows simpler and
more concise unit tests to be written by specifying the configurations needed
and calling the provided helper function _test_activation_function.
Inside the list, each config (as a dictionary) represents a suite of tests that assert the
correctness of various quantization functions.
You can check out the test_qrelu, test_qrelu6, test_qsigmoid, and test_qhardsigmoid for
how their test configs are specified.
Here's a list of the fields that can be included in a test config:
quantized_fn: a list of the quantized functions to be tested
reference_fn: the original reference function to be called on the
the dequantized X
extra_kwargs: the additional keyword arguments
for each test entry in ops_under_test, it must have at least the fields
for quantized_fn and reference_fn.
output_range: the output range the operator will map to. By default, if it is
no specified, the range will not be controlled and depend on Xmin and Xmax.
change_zero_point: a boolean flag indicating if the zero point parameter should
be determined based on torch_type during quantization (see sigmoid/hardsigmoid for
examples). By default, if it is not specified, change_zero_point is assumed to be
False and zero point will just take on the default value from X.
`output_is_observed`: if specified and is True, we'll append extra
output_scale/output_zero_point keyword argument when calling quantized op
"""
# Retrieves the default parameters from X.
X, (scale, zero_point, torch_type) = X
if not isinstance(X, torch.Tensor):
X = torch.from_numpy(X)
if (X.device.type == 'cuda') and (torch.backends.quantized.engine == 'qnnpack'):
return
# Quantizes the reference to account for max error.
# q_min and q_max only depend on the initial torch_type.
q_min, q_max = torch.iinfo(torch_type).min, torch.iinfo(torch_type).max
for op_group in test_configs:
ref_op = op_group['reference_fn']
for q_op in op_group['quantized_fn']:
for memory_format in (torch.channels_last, torch.contiguous_format):
if memory_format == torch.channels_last and len(X.shape) != 4:
continue
X = X.to(memory_format=memory_format)
# Retrieves the inplace keyword arguments
# some functions require inplace=True to test in-place.
# copy.copy is needed because these are modified in place
extra_kwargs = \
copy.copy(op_group.get('extra_kwargs', {}))
output_is_observed = \
copy.copy(op_group.get('output_is_observed', False))
# Quantizes and dequantizes to account for max error.
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
dqY_hat = ref_op(dqX.clone(), **extra_kwargs)
# Adjusts output_scale if needed.
# The output_scale determines the quantization scale for functions that
# have a constrained output range. e.x. sigmoid ranges from 0 to 1.
output_scale = scale
if 'output_range' in op_group:
(f_min, f_max) = op_group['output_range']
output_scale = (f_max - f_min) / (q_max - q_min + 1.0)
# Adjusts output_zero_point if needed (see explanation for the
# change_zero_point parameter above).
# output_zero_point determines the additional offset that will be
# added to a scaled value during quantization.
if op_group.get('change_zero_point', False):
output_zero_point = 0 if torch_type == torch.qint32 else q_min
else:
output_zero_point = zero_point
# Quantizes the dequantized version of Y_hat.
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale,
zero_point=output_zero_point,
dtype=torch_type)
if output_is_observed:
extra_kwargs.update({'output_scale': output_scale, 'output_zero_point': output_zero_point})
# Finds qY using in-place or non-in-place quantized operators.
qY = q_op(qX, **extra_kwargs)
self.assertEqual(qY, qY_hat, msg=f'{fn_name} - {q_op} failed: ({qY} vs. {qY_hat})')
"""Tests the correctness of the quantized::relu op."""
@override_qengines
def test_qrelu(self):
relu_test_configs = [
{
'quantized_fn': [
torch.relu,
torch.relu_,
torch.nn.functional.relu,
torch.nn.functional.relu,
],
'reference_fn': torch.nn.functional.relu
},
{
'quantized_fn': [
torch.nn.functional.relu,
torch.nn.functional.relu,
],
'reference_fn': torch.nn.functional.relu,
'extra_kwargs': {
'inplace': True
}
}
]
devices = ["cpu", "cuda"] if TEST_CUDA else ["cpu"]
for device in devices:
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
scales = (0.05, 0.1)
zero_points = (0, 5)
test_cases = itertools.product(shapes, dtypes, scales, zero_points)
for shape, dtype, scale, zero_point in test_cases:
X = torch.randn(*shape, device=device)
X = (X, (scale, zero_point, dtype))
self._test_activation_function(X, 'relu', relu_test_configs)
"""Tests the correctness of the quantized::relu6 op."""
def test_qrelu6(self):
relu6_test_configs = [
{
'quantized_fn': [
torch.ops.quantized.relu6,
torch.ao.nn.quantized.ReLU6(inplace=False),
torch.ao.nn.quantized.ReLU6(inplace=True)
],
'reference_fn': torch.nn.functional.relu6
}
]
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
scales = (0.05, 0.1)
zero_points = (0, 5)
test_cases = itertools.product(shapes, dtypes, scales, zero_points)
for shape, dtype, scale, zero_point in test_cases:
X = torch.randn(*shape) * 10
X = (X, (scale, zero_point, dtype))
self._test_activation_function(X, 'relu6', relu6_test_configs)
"""Tests the correctness of the quantized::sigmoid op."""
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_sigmoid_non_observed(self, X):
sigmoid_test_configs = [
{
'quantized_fn': [
torch.sigmoid
],
'reference_fn': torch.sigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True
}
]
self._test_activation_function(X, 'sigmoid', sigmoid_test_configs)
"""Tests the correctness of the quantized::sigmoid op."""
# TODO: enable after observed output is supported in qnnpack
# @override_qengines
@skipIfNoFBGEMM
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_sigmoid(self, X):
sigmoid_test_configs = [
{
'quantized_fn': [
torch.ops.quantized.sigmoid
],
'reference_fn': torch.sigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True,
'output_is_observed': True,
}
]
self._test_activation_function(X, 'sigmoid', sigmoid_test_configs)
@skipIfNoFBGEMM
def test_sigmoid_dequantize_rounding_error(self):
# issue #107030
sigmoid_test_configs = [
{
'quantized_fn': [
torch.ops.quantized.sigmoid
],
'reference_fn': torch.sigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True,
'output_is_observed': True,
}
]
X = (np.full(64, 514., dtype=np.float32), (1028.02, 255, torch.quint8))
self._test_activation_function(X, 'sigmoid', sigmoid_test_configs)
"""Tests the correctness of the quantized::hardsigmoid op."""
@override_qengines
def test_qhardsigmoid(self):
hardsigmoid_test_configs = [
{
'quantized_fn': [
torch.ao.nn.quantized.functional.hardsigmoid,
],
'reference_fn': torch.nn.functional.hardsigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True,
},
{
'quantized_fn': [
torch.ao.nn.quantized.functional.hardsigmoid,
],
'reference_fn': torch.nn.functional.hardsigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True,
'extra_kwargs': {
'inplace': True,
},
},
]
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
test_cases = itertools.product(shapes, dtypes)
for shape, dtype in test_cases:
X = (np.random.rand(*shape).astype(np.float32), (1.0, 0, dtype))
self._test_activation_function(X, 'hardsigmoid', hardsigmoid_test_configs)
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_leaky_relu_observed_output(self, X):
leaky_relu_test_configs = [
{
'quantized_fn': [
torch.ops.quantized.leaky_relu
],
'reference_fn': torch.nn.functional.leaky_relu,
'extra_kwargs': {
'negative_slope': 0.1,
'inplace': False,
},
'output_is_observed': True,
}
]
self._test_activation_function(X, 'leaky_relu', leaky_relu_test_configs)
"""Tests the correctness of the quantized::relu op."""
def test_leaky_relu(self):
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
memory_formats = (torch.channels_last, torch.contiguous_format)
test_cases = itertools.product(shapes, dtypes, memory_formats)
for shape, dtype, memory_format in test_cases:
if memory_format == torch.channels_last and len(shape) != 4:
continue
X, scale, zero_point, torch_type, alpha = \
torch.randn(*shape), 0.1, 0, dtype, 0.01
X = X.to(memory_format=memory_format)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
# torch.nn.functional
op = torch.nn.functional.leaky_relu
dqY = op(dqX, negative_slope=alpha)
qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = op(qX, negative_slope=alpha)
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
msg=f"F.leaky_relu failed ({qY} vs {qY_hat})")
"""Tests the correctness of the quantized::elu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
alpha=st.floats(0.01, 10.0, allow_nan=False, allow_infinity=False))
def test_qelu(self, X, alpha):
X, (scale, zero_point, torch_type) = X
output_scale = 0.5
output_zero_point = 1
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate ELU(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = dqX.clone()
dqY_hat = torch.nn.functional.elu(dqX, alpha)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale, zero_point=output_zero_point,
dtype=torch_type)
qY = torch.ao.nn.quantized.functional.elu(qX, output_scale, output_zero_point, alpha=alpha)
self.assertEqual(qY, qY_hat,
msg=f"F.elu failed ({qY} vs {qY_hat})")
"""Tests the correctness of the quantized::celu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e2, 1e2, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(scale_max=9.999999747378752e-06)),
alpha=st.floats(0.01, 100.0, allow_nan=False, allow_infinity=False))
def test_qcelu(self, X, alpha):
X, (scale, zero_point, torch_type) = X
output_scale = 0.5
output_zero_point = 1
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate CELU(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = torch.nn.functional.celu(dqX, alpha)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale, zero_point=output_zero_point,
dtype=torch_type)
# test regular
qY = torch.ops.quantized.celu(qX, output_scale, output_zero_point, alpha=alpha)
self.assertEqual(qY, qY_hat,
msg=f"F.celu failed ({qY} vs {qY_hat})")
"""Tests the correctness of the quantized::gelu op."""
def test_qgelu(self):
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
memory_formats = (torch.channels_last, torch.contiguous_format)
approximation = ['none', 'tanh']
test_cases = itertools.product(shapes, dtypes, memory_formats, approximation)
devices = ["cpu", "cuda"] if TEST_CUDA else ["cpu"]
for shape, dtype, memory_format, approximate in test_cases:
if memory_format == torch.channels_last and len(shape) != 4:
continue
X, scale, zero_point, torch_type = \
torch.randn(*shape), 0.1, 0, dtype
X = X.to(memory_format=memory_format)
for device in devices:
X = X.to(device=device)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
op = torch.nn.functional.gelu
dqY = op(dqX, approximate=approximate)
qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = op(qX)
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
msg=f"F.gelu failed ({qY} vs {qY_hat})")
"""Tests the correctness of the quantized::prelu op."""
def test_qprelu(self):
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
num_params = (0, 1) # 0: num_parameter = num_channels
dtypes = (torch.quint8, torch.qint8)
memory_formats = (torch.channels_last, torch.contiguous_format)
test_cases = itertools.product(shapes, num_params, dtypes, memory_formats)
for shape, num_param, dtype, memory_format in test_cases:
if memory_format == torch.channels_last and len(shape) != 4:
continue
X, scale, zero_point, torch_type = \
torch.randn(*shape), 0.1, 0, dtype
X = X.to(memory_format=memory_format)
num_parameter = 1 if num_param == 1 or len(shape) == 1 else shape[1]
W = torch.randn(num_parameter)
W, w_scale, w_zero_point = \
torch.randn(num_parameter), 0.2, 0
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
qW = torch.quantize_per_tensor(W, scale=w_scale, zero_point=w_zero_point,
dtype=torch_type)
dqW = qW.dequantize()
op = torch.nn.functional.prelu
qop = torch.ops.quantized.prelu
dqY = op(dqX, dqW)
qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = qop(qX, qW, scale, zero_point)
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
msg=f"F.prelu failed ({qY} vs {qY_hat})")
"""Tests the correctness of the quantized::qlayer_norm op."""
@skipIfNoFBGEMM
def test_qlayer_norm(self):
# hypothesis is flaky for this test, create test cases manually
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = (True, False)
affine_list = (True, False)
combined = [side_lens, torch_types, y_scales, y_zero_points,
channels_last_list, affine_list]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
side_len, torch_type, Y_scale, Y_zero_point, channels_last, \
affine = test_case
shapes = [side_len] * 4
# In the FP kernel, mean and variance are calculated in floating point.
# In the quantized kernel, they are calculated in integer arithmetic.
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do two things to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
qX = torch.quantize_per_tensor(X, scale=X_scale,
zero_point=X_zero_point,
dtype=torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
enough_unique_vals_in_each_layer = sum(
1 if (
dqX[i].shape[0] < 5 or
float(torch.unique(dqX[i]).shape[0]) / dqX[i].shape[0] > 0.01
) else 0
for i in range(dqX.shape[0])
) == dqX.shape[0]
assume(enough_unique_vals_in_each_layer)
# Initialize the weights non-randomly for reproducibility, to avoid
# flaky tests
if affine:
weight = torch.ones(*qX.size()[1:], dtype=torch.float) * 0.5
bias = torch.ones(*qX.size()[1:], dtype=torch.float) * 1
else:
weight = None
bias = None
epsilon = 1e-5
qY = torch.ops.quantized.layer_norm(
qX, qX.size()[1:], weight=weight, bias=bias, eps=epsilon,
output_scale=Y_scale, output_zero_point=Y_zero_point)
Y_hat = F.layer_norm(
dqX, dqX.size()[1:], weight=weight, bias=bias, eps=epsilon)
qY_hat = torch.quantize_per_tensor(
Y_hat, scale=Y_scale, zero_point=Y_zero_point, dtype=torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
"""Tests the correctness of the quantized::qnnpack_tanh op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_qtanh(self, X):
# Note: QNNPACK is tested separately in TestQNNPackOps
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
Y = torch.tanh(X)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Quantize the reference to account for max error.
# Note that the output scale has +1, because we use scale of 2.0/2^BITS
# in the implementations.
f_min, f_max = -1.0, 1.0
q_min, q_max = torch.iinfo(torch_type).min, torch.iinfo(torch_type).max
output_scale = (f_max - f_min) / (q_max - q_min + 1.0)
output_zero_point = int(round((q_max + q_min) / 2.0))
qY = torch.quantize_per_tensor(Y, scale=output_scale,
zero_point=output_zero_point,
dtype=torch_type)
qY_hat = torch.tanh(qX)
self.assertEqual(qY, qY_hat,
msg=f"TanH failed: {qY} vs. {qY_hat}")
"""Tests the correctness of the quantized::threshold op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
threshold=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
value=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False))
def test_qthreshold(self, X, threshold, value):
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate threshold(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = dqX.clone()
dqY_hat = torch.nn.functional.threshold(dqY_hat, threshold, value)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'native': torch.threshold,
'nn.functional': torch.nn.functional.threshold,
'ao.nn.quantized.functional': torch.ao.nn.quantized.functional.threshold,
}
for name, op in ops_under_test.items():
qY = op(qX, threshold, value)
self.assertEqual(qY, qY_hat, msg=f"{name} qthreshold failed")
"""Tests the correctness of the quantized::clamp op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8, max_numel=10**5),
elements=hu.floats(-1e6, 1e6, allow_nan=False),
qparams=hu.qparams()),
min_val=hu.floats(-1e6, 1e6, allow_nan=False),
max_val=hu.floats(-1e6, 1e6, allow_nan=False))
def test_qclamp(self, X, min_val, max_val):
X, (scale, zero_point, torch_type) = X
assume(min_val <= max_val)
Y_clamp = torch.clamp(torch.from_numpy(X), min=min_val, max=max_val)
qY_clamp = torch.quantize_per_tensor(Y_clamp, scale=scale,
zero_point=zero_point, dtype=torch_type)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'ops.quantized': torch.ops.quantized.clamp,
}
for name, op in ops_under_test.items():
qY_clamp_hat = op(qX, min=min_val, max=max_val)
self.assertEqual(qY_clamp, qY_clamp_hat, msg=f"{name} qclamp failed")
if torch.backends.quantized.engine == 'fbgemm':
with override_quantized_engine('fbgemm'):
Y_min_clamp = torch.clamp(X, min=min_val)
Y_max_clamp = torch.clamp(X, max=max_val)
qY_min_clamp = torch.quantize_per_tensor(Y_min_clamp, scale=scale,
zero_point=zero_point, dtype=torch_type)
qY_max_clamp = torch.quantize_per_tensor(Y_max_clamp, scale=scale,
zero_point=zero_point, dtype=torch_type)
for name, op in ops_under_test.items():
qY_min_clamp_hat = op(qX, min=min_val)
self.assertEqual(qY_min_clamp, qY_min_clamp_hat, msg=f"{name} qclamp failed")
qY_max_clamp_hat = op(qX, max=max_val)
self.assertEqual(qY_max_clamp, qY_max_clamp_hat, msg=f"{name} qclamp failed")
"""Tests the correctness of the quantized::hardtanh op."""
@skipIfNoFBGEMM
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8, max_numel=10**5),
elements=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
min_val=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False),
max_val=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
def test_hardtanh(self, X, min_val, max_val):
with override_quantized_engine('fbgemm'):
X, (scale, zero_point, torch_type) = X
assume(min_val <= max_val)
Y = X.copy()
Y[Y < min_val] = min_val
Y[Y > max_val] = max_val
qY = torch.quantize_per_tensor(torch.from_numpy(Y), scale=scale,
zero_point=zero_point, dtype=torch_type)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'ao.nn.quantized.functional.hardtanh':
torch.ao.nn.quantized.functional.hardtanh,
}
for name, op in ops_under_test.items():
qY_hat = op(qX, min_val, max_val)
self.assertEqual(qY, qY_hat, msg=f"{name} hardtanh failed")
ops_under_test_inplace = {
'inplace ao.nn.quantized.functional.hardtanh':
torch.ao.nn.quantized.functional.hardtanh,
}
for name, op_ in ops_under_test_inplace.items():
qY_hat = qX.clone()
op_(qY_hat, min_val, max_val, inplace=True)
self.assertEqual(qY, qY_hat, msg=f"{name} hardtanh failed")
"""Tests the correctness of the quantized::hardswish op."""
@override_qengines
def test_hardswish(self):
max_sides = (3, 4)
side_lens = (1, 7)
torch_types = (torch.quint8, torch.qint8)
y_scales = (0.1, )
y_zero_points = (1,)
combined = [max_sides, side_lens, torch_types, y_scales, y_zero_points]
test_cases = itertools.product(*combined)
for test_case in test_cases:
max_side, side_len, torch_type, Y_scale, Y_zero_point = test_case
if torch.backends.quantized.engine == 'qnnpack' and torch_type != torch.quint8:
continue
shapes = [side_len] * max_side
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 2.0, torch_type)
for memory_format in torch.channels_last, torch.contiguous_format:
if memory_format == torch.channels_last and len(shapes) == 4:
X = X.to(memory_format=memory_format)
qX = torch.quantize_per_tensor(X, scale=X_scale, zero_point=X_zero_point,
dtype=torch_type)
dqX = qX.dequantize()
dqY_hat = F.hardswish(dqX)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=Y_scale,
zero_point=Y_zero_point,
dtype=torch_type)
qY = torch.ao.nn.quantized.functional.hardswish(
qX, scale=Y_scale, zero_point=Y_zero_point)
self.assertEqual(
qY, qY_hat,
msg=f"Hardswish failed: {qY} vs {qY_hat}, {torch.backends.quantized.engine}")
"""Tests the correctness of the binary op + scalar."""
def _test_binary_op_scalar_relu(self, A, b, binary_op_name, binary_op, quantized_op, quantized_op_relu):
import copy
op_scalar = quantized_op
op_scalar_relu = quantized_op_relu
A, (scale, zero_point, dtype) = A
A = A.astype(np.float32)
qA = torch.quantize_per_tensor(torch.from_numpy(A), scale, zero_point, dtype)
if binary_op_name == 'add':
C = binary_op(qA.dequantize(), round(b / scale) * scale)
else:
C = binary_op(qA.dequantize(), b)
C_relu = copy.deepcopy(C)
C_relu[C_relu < 0] = 0
C_hat = op_scalar(qA, b)
C_ref = torch.quantize_per_tensor(C, C_hat.q_scale(), C_hat.q_zero_point(), dtype)
C_relu_hat = op_scalar_relu(qA, b)
C_relu_ref = torch.quantize_per_tensor(
C_relu, C_relu_hat.q_scale(), C_relu_hat.q_zero_point(), dtype)
self.assertEqual(C_ref.dequantize(), C_hat.dequantize(),
msg=f"{binary_op_name}_scalar results don't match: "
f"{C_ref.dequantize()} vs {C_hat.dequantize()}")
self.assertEqual(C_relu_ref.dequantize(), C_relu_hat.dequantize(),
msg=f"{binary_op_name}_scalar_relu results don't match: "
f"{C_relu_ref.dequantize()} vs {C_relu_hat.dequantize()}")
@unittest.skipIf(IS_MACOS, "skipping macos test")
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),
elements=hu.floats(-1e6, 1e6, allow_nan=False),
qparams=hu.qparams()),
b=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
def test_add_scalar_relu(self, A, b):
self._test_binary_op_scalar_relu(A, b, "add", operator.add, torch.ops.quantized.add, torch.ops.quantized.add_relu)
@unittest.skipIf(IS_MACOS, "skipping macos test")
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),
elements=hu.floats(-1e6, 1e6, allow_nan=False),
qparams=hu.qparams()),
b=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
def test_mul_scalar_relu(self, A, b):
self._test_binary_op_scalar_relu(A, b, "mul", operator.mul, torch.ops.quantized.mul, torch.ops.quantized.mul_relu)
"""Tests the correctness of the add and add_relu op."""
def test_qadd_relu_same_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
add_out = torch.ops.quantized.add
add_relu_out = torch.ops.quantized.add_relu
# NB: This is a strange size so that we exercise both the vectorized
# implementation (64-element chunks at at time) as well as the scalar
# implementation
A = torch.arange(-128, 130, dtype=torch.float)
B = torch.arange(-128, 130, dtype=torch.float)
scale = 2.0
zero_point = 127
qA = torch.quantize_per_tensor(A, scale=scale, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale, zero_point=zero_point,
dtype=dtype)
# Add ReLU ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale, zero_point, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
add_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="Add.out failed")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
add_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="AddReLU.out failed")
"""Tests the correctness of the cudnn add and add_relu op
(Similar to test_qadd_relu_different_qparams, will probably merge in the future)"""
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skipIf(not SM80OrLater, "requires sm80 or later.")
@unittest.skipIf(TEST_ROCM, "not supported on rocm.")
@unittest.skip("not currently working and feature isn't used")
def test_qadd_relu_cudnn(self):
dtype = torch.qint8
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
A = torch.arange(-128, 130, dtype=torch.float).to(torch.device("cuda"))
B = torch.arange(-128, 130, dtype=torch.float).to(torch.device("cuda"))
scale_A = 2.5
scale_B = 6.3
scale_C = 12.9
zero_point = 0
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point,
dtype=dtype)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).to(device="cpu").numpy()
qC = _quantize(C, scale_C, zero_point, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu")
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu")
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
"""Tests the correctness of the cudnn add and add_relu op for nhwc format"""
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skipIf(not SM80OrLater, "requires sm80 or later.")
@unittest.skipIf(TEST_ROCM, "not supported on rocm.")
@unittest.skip("not currently working and feature isn't used")
def test_qadd_relu_cudnn_nhwc(self):
dtype = torch.qint8
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
A = torch.rand(16, 8, 4, 12).to(device="cuda")
B = torch.rand(16, 8, 4, 12).to(device="cuda")
scale_A = 2.5
scale_B = 6.3
scale_C = 12.9
zero_point = 0
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point,
dtype=dtype)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).to(device="cpu").numpy()
qC = _quantize(C, scale_C, zero_point, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu")
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu")
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
"""Tests the correctness of the add and add_relu op."""
def test_qadd_relu_different_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
add_out = torch.ops.quantized.add
add_relu_out = torch.ops.quantized.add_relu
# NB: This is a strange size so that we exercise both the vectorized
# implementation (64-element chunks at at time) as well as the scalar
# implementation
A = torch.arange(-128, 130, dtype=torch.float)
B = torch.arange(-128, 130, dtype=torch.float)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=dtype)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
add_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="Add.out failed")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point_C, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
add_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="AddReLU.out failed")
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_relu_same_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul
mul_relu_out = torch.ops.quantized.mul_relu
A = torch.arange(-100, 100, dtype=torch.float)
B = torch.arange(-100, 100, dtype=torch.float)
scale = 2
zero_point = 127
qA = torch.quantize_per_tensor(A, scale=scale, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale, zero_point=zero_point,
dtype=dtype)
# mul ReLU ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale, zero_point, dtype=np_dtype[dtype])
qC_hat = mul(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized mulition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
mul_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="mul.out failed")
# mul + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = mul_relu(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized mulition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
mul_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="mulReLU.out failed")
# Scalar multiplication
for b in B:
C_ref = qA.dequantize().numpy() * b.item()
qC_hat = torch.ops.quantized.mul(qA, b.item())
self.assertEqual(C_ref, qC_hat.dequantize())
# Scalar multiplication + relu
for b in B:
C_ref = qA.dequantize().numpy() * b.item()
C_ref[C_ref < 0] = 0
qC_hat = torch.ops.quantized.mul_relu(qA, b.item())
self.assertEqual(C_ref, qC_hat.dequantize())
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_relu_different_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul
mul_relu_out = torch.ops.quantized.mul_relu
A = torch.arange(-100, 100, dtype=torch.float)
B = torch.arange(-100, 100, dtype=torch.float)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=dtype)
# mul ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype[dtype])
qC_hat = mul(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized multiplication failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
mul_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="mul.out failed")
# mul + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point_C, dtype=np_dtype[dtype])
qCrelu_hat = mul_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized multiplication with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
mul_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="mulReLU.out failed")
"""Tests the correctness of the matmul op."""
@given(num_dims=st.integers(2, 5),
outer_dims=st.lists(st.integers(2, 6), min_size=3, max_size=3),
m=st.integers(2, 6),
k=st.integers(2, 6),
n=st.integers(2, 6),
dtypes=st.sampled_from(((torch.qint8, np.int8),
(torch.quint8, np.uint8))))
def test_qmatmul(self, num_dims, outer_dims, m, k, n, dtypes):
(torch_dtype, np_dtype) = dtypes
size_a = outer_dims[:num_dims - 2] + [m, k]
size_b = outer_dims[:num_dims - 2] + [k, n]
A = torch.randn(size=size_a, dtype=torch.float32) * 3
B = torch.randn(size=size_b, dtype=torch.float32) * 3
scale_A = 3.1
zero_point_A = 7
scale_B = 5.3
zero_point_B = 127
scale_C = 1.3
zero_point_C = 5
qA = torch.quantize_per_tensor(A,
scale=scale_A,
zero_point=zero_point_A,
dtype=torch_dtype)
qB = torch.quantize_per_tensor(B,
scale=scale_B,
zero_point=zero_point_B,
dtype=torch_dtype)
# matmul ground truth
C = torch.matmul(qA.dequantize(), qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=(np_dtype))
qC_hat = torch.ops.quantized.matmul(qA,
qB,
scale=scale_C,
zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized multiplication failed.")
# Using per channel quantization fails
axis = 0
scales_A = torch.rand(size=(A.shape[axis],))
zero_points_A = torch.randint(low=0, high=5, size=(A.shape[axis],))
scales_B = torch.rand(size=(B.shape[axis],))
zero_points_B = torch.randint(low=0, high=5, size=(B.shape[axis],))
qA = torch.quantize_per_channel(A,
scales=scales_A,
zero_points=zero_points_A,
axis=axis,
dtype=torch.qint8)
qB = torch.quantize_per_channel(B,
scales=scales_B,
zero_points=zero_points_B,
axis=axis,
dtype=torch.qint8)
np.testing.assert_raises_regex(RuntimeError,
".*per-tensor.*",
torch.ops.quantized.matmul,
qA,
qB,
scale_C,
zero_point_C)
"""Tests the correctness of the quantized softmax op."""
@given(dims=st.lists(st.integers(2, 5), min_size=5, max_size=5))
def test_qsoftmax(self, dims):
for (num_dims, dim, memory_format) in [
(2, 1, torch.contiguous_format), # 2d softmax over last dim
(4, 3, torch.contiguous_format), # >2 dims, softmax along last dim
(5, 2, torch.contiguous_format), # >2 dims, softmax along not last dim (requires permute)
(4, 3, torch.channels_last), # >2 dims, softmax along last dim, but not contiguous
(4, 1, torch.channels_last), # Channels Last, doesn't require permute
(5, 1, torch.channels_last_3d), # Channels Last 3D, doesn't require permute
]:
size = dims[:num_dims]
torch_dtype = torch.quint8
np_dtype = np.uint8
scale_X = 1.3
zero_point_X = 5
X = torch.rand(size=size, dtype=torch.float32) * 8 + zero_point_X
X = X.to(memory_format=memory_format)
scale_Y = 1 / 256
zero_point_Y = 0
qX = torch.quantize_per_tensor(X,
scale=scale_X,
zero_point=zero_point_X,
dtype=torch_dtype)
# softmax ground truth
Y = torch.softmax(qX.dequantize(), dim=dim).numpy()
qY = _quantize(Y, scale_Y, zero_point_Y, dtype=np_dtype)
qY_hat = torch.ops.quantized.softmax(qX,
dim=dim,
output_scale=scale_Y,
output_zero_point=zero_point_Y)
np.testing.assert_equal(qY, qY_hat.int_repr(),
"Quantized softmax failed.")
"""Tests the correctness of the quantized softmax op using qnnpack."""
@skipIfNoQNNPACK
def test_qsoftmax_qnnpack(self):
with override_quantized_engine('qnnpack'):
self.test_qsoftmax()
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_broadcast(self):
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul
mul_relu_out = torch.ops.quantized.mul_relu
# A = torch.arange(-25, 25, dtype=torch.float)
# B = torch.arange(-25, 25, dtype=torch.float)
A = torch.randn(8, 1, 6, 1)
B = torch.randn(7, 1, 5)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=torch.quint8)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=torch.quint8)
# mul ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C)
qC_hat = mul(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized multiplication failed.")
"""Tests that quantized add works with broadcasting"""
def test_qadd_broadcast(self):
A = torch.randn(1, 1, 4, 4)
B = torch.randn(2, 1, 4, 4)
qA = torch.quantize_per_tensor(A, 0.02, 0, torch.quint8)
qB = torch.quantize_per_tensor(B, 0.04, 2, torch.quint8)
output_scale = 0.01
output_zp = 1
# ground truth
C = qA.dequantize() + qB.dequantize()
qC = torch.quantize_per_tensor(C, output_scale, output_zp, torch.quint8)
# quantized
qC_hat_1 = torch.ops.quantized.add(qA, qB, output_scale, output_zp)
qC_hat_2 = torch.ops.quantized.add(qB, qA, output_scale, output_zp)
self.assertTrue(torch.allclose(qC.dequantize(), qC_hat_1.dequantize()))
self.assertTrue(torch.allclose(qC.dequantize(), qC_hat_2.dequantize()))
"""Tests channel shuffle operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=2, max_side=32, max_numel=10**5),
qparams=hu.qparams(dtypes=[torch.quint8])),
groups=st.integers(2, 6))
def test_channel_shuffle(self, X, groups):
X, (scale, zero_point, torch_type) = X
channels = X.shape[-3]
iH, iW = X.shape[-2:]
assume(channels % groups == 0)
a = torch.from_numpy(X)
a = torch.rand(a.shape)
a_out = torch.nn.functional.channel_shuffle(a, groups)
a_ref = torch.quantize_per_tensor(a_out, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
a_hat = torch.nn.functional.channel_shuffle(qa, groups)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="torch.nn.functional.channel_shuffle results are off")
"""Tests 1D max pool operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=2, max_dims=3,
min_side=1, max_side=10),
qparams=hu.qparams()),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
dilation=st.integers(1, 2),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
def test_max_pool1d(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iW = X.shape[-1]
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
a = torch.from_numpy(X)
a_pool = torch.nn.functional.max_pool1d(a, kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
"torch": torch.max_pool1d,
"nn.functional": torch.nn.functional.max_pool1d,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.max_pool1d,
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg=f"{name} results are off")
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool1d(
qa, kernel_size=_single(kernel),
stride=_single(kernel if stride is None else stride),
padding=_single(padding), dilation=_single(dilation),
ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool1d results are off")
# TODO: merge this test with test_max_pool2d
"""Tests 2D cudnn max pool operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
# cudnn's support for quantized pooling is limited to
# int8 currently
qparams=hu.qparams(dtypes=[torch.qint8])),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
# currently there is no support for dilation for cudnn
# pooling
dilation=st.integers(1, 1),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skipIf(TEST_CUDNN_VERSION <= 90100, "cuDNN maxpool2d mishandles -128 before v90100")
@unittest.skipIf(TEST_ROCM, "not supported on rocm.")
def test_max_pool2d_cudnn(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
a = torch.from_numpy(X).to(device="cuda")
a_pool = torch.nn.functional.max_pool2d(a, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool2d(
qa, kernel_size=_pair(kernel),
stride=_pair(kernel if stride is None else stride),
padding=_pair(padding), dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool2d results are off")
"""Tests 2D max pool operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
dilation=st.integers(1, 2),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
def test_max_pool2d(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
a = torch.from_numpy(X)
a_pool = torch.nn.functional.max_pool2d(a, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
"torch": torch.max_pool2d,
"nn.functional": torch.nn.functional.max_pool2d,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.max_pool2d,
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg=f"{name} results are off")
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool2d(
qa, kernel_size=_pair(kernel),
stride=_pair(kernel if stride is None else stride),
padding=_pair(padding), dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool2d results are off")
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
def test_max_pool2d_pt2e(self):
kernel_list = [2, 3]
stride_list = [1, 2]
padding_list = [0, 2]
dilation_list = [1, 2]
ceil_mode_list = [False, True]
channels_last_input = [False, True]
options = itertools.product(kernel_list, stride_list, padding_list, dilation_list, ceil_mode_list, channels_last_input)
for kernel, stride, padding, dilation, ceil_mode, channels_last in options:
if padding >= (kernel // 2):
# Continue with invalid input
continue
input = torch.randint(0, 8, (1, 3, 8, 8), dtype=torch.uint8)
if channels_last:
input = input.contiguous(memory_format=torch.channels_last)
a_pool = torch.nn.functional.max_pool2d(input.to(torch.float32), kernel_size=kernel,
stride=stride, padding=padding, dilation=dilation,
ceil_mode=ceil_mode).to(torch.uint8)
a_hat = torch.ops.quantized.max_pool2d(input, kernel_size=_pair(kernel),
stride=_pair(stride), padding=_pair(padding),
dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(input.is_contiguous(), a_hat.is_contiguous(),
msg="ops.quantized.max_pool2d input output diff memory format")
self.assertEqual(a_pool, a_hat,
msg="ops.quantized.max_pool2d results are off")
"""Tests 3D max pool operation on quantized tensors."""
def test_max_pool3d(self):
torch_types = [torch.qint8, torch.quint8]
kernels = [1, 3]
strides = [1, 3]
dilations = [1, 3]
paddings = [1, 3]
ceil_modes = [True, False]
options = itertools.product(torch_types, kernels, strides, dilations, paddings, ceil_modes)
for torch_type, kernel, stride, dilation, padding, ceil_mode in options:
X = torch.randint(20, 40, (2, 3, 16, 10, 10)).to(torch.float)
scale = 15
zero_point = 20
# Check constraints for invalid input
if not (kernel // 2 >= padding):
continue
iT, iH, iW = X.shape[-3:]
oT = pool_output_shape(iT, kernel, padding, stride, dilation, ceil_mode)
if not (oT > 0):
continue
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
if not (oH > 0):
continue
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
if not (oW > 0):
continue
a_pool = torch.nn.functional.max_pool3d(X, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
"torch": torch.max_pool3d,
"nn.functional": torch.nn.functional.max_pool3d,
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg=f"{name} results are off")
"""Tests max pool operation on NHWC quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
dilation=st.integers(1, 2),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
def test_max_pool2d_nhwc(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
# Ensure we hit the vectorized paths
# 176 = 128 + 32 + 16
# 128 hits the interleaved path
# 32 hits the non-interleaved path
# 16 hits the scalar path
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
a = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
a_pool = torch.nn.functional.max_pool2d(a, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
self.assertTrue(qa.stride() != sorted(qa.stride()))
ops_under_test = {
"torch": torch.max_pool2d,
"nn.functional": torch.nn.functional.max_pool2d,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.max_pool2d,
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertTrue(a_hat.stride() != sorted(a_hat.stride()))
self.assertEqual(a_ref, a_hat.dequantize(),
msg=f"{name} results are off")
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool2d(
qa, kernel_size=_pair(kernel),
stride=_pair(kernel if stride is None else stride),
padding=_pair(padding), dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool2d results are off")
"""Tests 3D max pool operation on quantized channel_last tensors."""
def test_max_pool3d_nhwc(self):
torch_types = [torch.qint8, torch.quint8]
kernels = [1, 3]
strides = [1, 3]
dilations = [1, 3]
paddings = [1, 3]
ceil_modes = [True, False]
options = itertools.product(torch_types, kernels, strides, dilations, paddings, ceil_modes)
for torch_type, kernel, stride, dilation, padding, ceil_mode in options:
X = torch.randint(20, 40, (2, 67, 16, 10, 10)).to(torch.float)
X_copy = copy.deepcopy(X)
X = X.contiguous(memory_format=torch.channels_last_3d)
scale = 15
zero_point = 20
# Check constraints for invalid input
if not (kernel // 2 >= padding):
continue
iT, iH, iW = X.shape[-3:]
oT = pool_output_shape(iT, kernel, padding, stride, dilation, ceil_mode)
if not (oT > 0):
continue
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
if not (oH > 0):
continue
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
if not (oW > 0):
continue
a_pool = torch.nn.functional.max_pool3d(X, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(X_copy, scale=scale, zero_point=zero_point,
dtype=torch_type)
qa = qa.contiguous(memory_format=torch.channels_last_3d)
ops_under_test = {
"torch": torch.max_pool3d,
"nn.functional": torch.nn.functional.max_pool3d,
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg=f"{name} results are off")
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from((3, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool2d(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
"""
X, (scale, zero_point, torch_type) = X
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X = qX.dequantize()
# Run reference on float tensor and then quantize the result for comparison
X_ref = torch.nn.functional.avg_pool2d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool2d,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.avg_pool2d,
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
qX_ref = torch.quantize_per_tensor(X_ref, scale=qX_hat.q_scale(), zero_point=qX_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), qX_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), qX_hat.int_repr()))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.qint8)),
kernel=st.sampled_from((4, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool2d_nhwc(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: 1) we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
2) we cannot test the qint32, since the float point precision is much lower than int32 for big number,
which will make the test be very flaky.
"""
X, (scale, zero_point, torch_type) = X
H, W = X.shape[-2:]
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale,
zero_point=zero_point, dtype=torch_type).permute([0, 3, 1, 2])
X = qX.dequantize()
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.avg_pool2d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool2d,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.avg_pool2d,
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
qX_ref = torch.quantize_per_tensor(X_ref, scale=X_hat.q_scale(), zero_point=X_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), X_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from((3, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool3d(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
"""
X, (scale, zero_point, torch_type) = X
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iD, iH, iW = X.shape[-3:]
oD = pool_output_shape(iD, kernel, padding, stride, dilation=1)
assume(oD > 0)
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X = qX.dequantize()
# Run reference on float tensor and then quantize the result for comparison
X_ref = torch.nn.functional.avg_pool3d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool3d,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.avg_pool3d,
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
qX_ref = torch.quantize_per_tensor(X_ref, scale=qX_hat.q_scale(), zero_point=qX_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), qX_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), qX_hat.int_repr()))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.qint8)),
kernel=st.sampled_from((4, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool3d_nhwc(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: 1) we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
2) we cannot test the qint32, since the float point precision is much lower than int32 for big number,
which will make the test be very flaky.
"""
X, (scale, zero_point, torch_type) = X
D, H, W = X.shape[-3:]
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iD, iH, iW = X.shape[-3:]
oD = pool_output_shape(iD, kernel, padding, stride, dilation=1)
assume(oD > 0)
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale,
zero_point=zero_point, dtype=torch_type).permute([0, 4, 1, 2, 3])
X = qX.dequantize()
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.avg_pool3d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool3d,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.avg_pool3d,
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
qX_ref = torch.quantize_per_tensor(X_ref, scale=X_hat.q_scale(), zero_point=X_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), X_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
"""Tests adaptive average pool operation on NHWC quantized tensors."""
def test_adaptive_avg_pool2d_nhwc(self):
side_lens = (range(1, 10))
dim_lens = (range(3, 4))
torch_type = torch.qint8
zero_points = (0, 1)
combined = [side_lens, dim_lens, zero_points]
test_cases = itertools.product(*combined)
for test_case in test_cases:
output_size_h = random.randint(1, 10)
output_size_w = random.randint(1, 10)
side_len, dim_len, zero_point = test_case
shapes = [side_len] * dim_len
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, zero_point)
X = np.array(X)
scale = 1
H, W = X.shape[-2:]
output_size_h = min(output_size_h, H)
output_size_w = min(output_size_w, W)
if output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_h, output_size_w)
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
if X.ndim == 4:
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
X = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
else: # ndim == 3
X_nchw = np.ascontiguousarray(X.transpose([1, 2, 0]))
X = torch.from_numpy(X_nchw).permute([2, 0, 1])
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([2, 0, 1])
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.adaptive_avg_pool2d(qX.int_repr().to(torch.double), output_size).round()
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.adaptive_avg_pool2d,
"ao.nn.quantized.functional":
torch.ao.nn.quantized.functional.adaptive_avg_pool2d,
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, output_size=output_size)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
self.assertEqual(X_ref, X_hat.int_repr(), atol=1.0, rtol=0,
msg=error_message.format(name, X_ref, X_hat.int_repr()),
exact_dtype=False)
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
@unittest.skip("not currently working and feature isn't used")
def test_adaptive_avg_pool(self):
side_lens = (range(1, 10))
dim_lens = (range(3, 5))
torch_type = torch.qint8
zero_points = (0, 1)
combined = [side_lens, dim_lens, zero_points]
test_cases = itertools.product(*combined)
for test_case in test_cases:
output_size_d = random.randint(1, 10)
output_size_h = random.randint(1, 10)
output_size_w = random.randint(1, 10)
side_len, dim_len, zero_point = test_case
shapes = [side_len] * dim_len
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, zero_point)
X = np.array(X)
scale = 1
ndim = X.ndim
dim_to_check = []
if ndim <= 4:
dim_to_check.append(2)
if ndim >= 4:
dim_to_check.append(3)
D, H, W = X.shape[-3:]
output_size_d = min(output_size_d, D)
output_size_h = min(output_size_h, H)
output_size_w = min(output_size_w, W)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
for dim in dim_to_check:
if dim == 2:
if output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_h, output_size_w)
elif dim == 3:
if output_size_d == output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_d, output_size_h, output_size_w)
# Run reference on int_repr + round to avoid double rounding error.
ref_op = getattr(torch.nn.functional, f'adaptive_avg_pool{dim}d')
X_ref = ref_op(qX.int_repr().to(torch.float), output_size).round()
ops_under_test = {
"nn.functional":
getattr(torch.nn.functional, f'adaptive_avg_pool{dim}d'),
"nn.quantized.functional":
getattr(torch.ao.nn.quantized.functional, f'adaptive_avg_pool{dim}d'),
"ao.nn.quantized.functional":
getattr(torch.ao.nn.quantized.functional, f'adaptive_avg_pool{dim}d')
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
# TODO: torch.cuda.is_available() should be swapped for a flag that checks if cudnn
# is enabled in the build when cudnn supports adaptive average pooling
devices = ["cpu", "cuda"] if (dim == 2 and torch.cuda.is_available()) else ["cpu"]
for device in devices:
qX_hat = op(qX.to(device=device), output_size=output_size)
self.assertEqual(
X_ref, qX_hat.int_repr(), atol=1.0,
rtol=0, msg=error_message.format(name, X_ref, qX_hat), exact_dtype=False)
self.assertEqual(
scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale,
qX_hat.q_scale()))
self.assertEqual(
zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
"""Tests adaptive average pool operation on NHWC quantized tensors."""
def test_adaptive_avg_pool3d_ndhwc(self):
side_lens = (range(1, 10))
dim_lens = (range(4, 5))
torch_type = torch.qint8
zero_point = 0
combined = [side_lens, dim_lens]
test_cases = itertools.product(*combined)
for test_case in test_cases:
output_size_d = random.randint(1, 10)
output_size_h = random.randint(1, 10)
output_size_w = random.randint(1, 10)
side_len, dim_len = test_case
shapes = [side_len] * dim_len
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, zero_point)
X = np.array(X)
scale = 1
D, H, W = X.shape[-3:]
output_size_d = min(output_size_d, D)
output_size_h = min(output_size_h, H)
output_size_w = min(output_size_w, W)
if output_size_d == output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_d, output_size_h, output_size_w)
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
if X.ndim == 5:
X_ncdhw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
X = torch.from_numpy(X_ncdhw).permute([0, 4, 1, 2, 3])
qX = torch.quantize_per_tensor(torch.from_numpy(X_ncdhw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([0, 4, 1, 2, 3])
else: # ndim == 4
X_ncdhw = np.ascontiguousarray(X.transpose([1, 2, 3, 0]))
X = torch.from_numpy(X_ncdhw).permute([3, 0, 1, 2])
qX = torch.quantize_per_tensor(torch.from_numpy(X_ncdhw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([3, 0, 1, 2])
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.adaptive_avg_pool3d(
qX.int_repr().to(torch.double), output_size).round()
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.adaptive_avg_pool3d,
"ao.nn.quantized.functional":
torch.ao.nn.quantized.functional.adaptive_avg_pool3d,
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, output_size=output_size)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
self.assertEqual(X_ref, X_hat.int_repr(), atol=1.0, rtol=0,
msg=error_message.format(name, X_ref, X_hat.int_repr()),
exact_dtype=False)
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
def test_qtopk(self):
x_dims = [3, 4] # Num elements in the shape
sides = [3, 5] # Side of the tensor generated
dims = [0, 1, 2, 3] # dimension over which to perform topk
largest = [False, True] # Return largest or smallest element
sorted = [False, True] # Return sorted or not
dtypes = [torch.qint8, torch.quint8]
is_nhwc = [False, True] # Is input in the NHWC format?
test_cases = itertools.product(x_dims, sides, dims, largest, sorted, dtypes, is_nhwc)
k = 2
for x_dim, side, dim, large, sort, dtype, nhwc in test_cases:
if nhwc and x_dim != 4: # NHWC requires 4 dimensions
continue
if dim >= x_dim: # Dimension to find top-k for should exist
continue
shape = [side] * x_dim
X, scale, zp = _get_random_tensor_and_q_params(shape, 1.0, dtype)
qX = torch.quantize_per_tensor(X, scale, zp, dtype)
if nhwc:
qX = qX.permute([0, 3, 1, 2])
X = np.transpose(X, [0, 3, 1, 2])
unquantized_out = torch.topk(qX.dequantize(), k, dim=dim, largest=large, sorted=sort)
values = torch.quantize_per_tensor(X, scale, zp, dtype)
indices = torch.tensor(X).long()
quantized_out = torch.topk(qX, k, dim=dim, largest=large, sorted=sort)
assert len(unquantized_out) == len(quantized_out)
torch.testing.assert_close(quantized_out[0].dequantize(), unquantized_out[0])
torch.testing.assert_close(quantized_out[1], unquantized_out[1])
"""Tests quantize concatenation (both fused and not)."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
num=st.integers(1, 4),
dim=st.integers(1, 4),
relu=st.booleans())
def test_cat(self, X, num, dim, relu):
tensors_q = []
tensors_ref = []
X, (scale, zero_point, torch_type) = X
assume(dim < X.ndim)
X = torch.from_numpy(X)
new_shape = np.array(X.shape)
new_shape[dim] = 0
for _ in range(num):
tensors_q.append(torch.quantize_per_tensor(X, scale, zero_point,
torch_type))
tensors_ref.append(X)
new_shape[dim] += tensors_ref[-1].shape[dim]
cat_ref = torch.cat(tensors_ref, dim=dim)
cat_ref = torch.quantize_per_tensor(cat_ref, scale, zero_point, torch_type)
cat_ref = cat_ref.dequantize()
if relu:
cat_ref = F.relu(cat_ref)
q_cat_op = torch.ops.quantized.cat_relu
q_cat_out_op = torch.ops.quantized.cat_relu_out
else:
q_cat_op = torch.ops.quantized.cat
q_cat_out_op = torch.ops.quantized.cat_out
cat_q = q_cat_op(tensors_q, dim=dim, scale=scale,
zero_point=zero_point)
cat_q = cat_q.dequantize()
np.testing.assert_equal(cat_ref.numpy(), cat_q.numpy())
cat_q_out = torch._empty_affine_quantized(
list(new_shape), scale=scale,
zero_point=zero_point, dtype=torch_type)
q_cat_out_op(tensors_q, dim=dim, out=cat_q_out)
cat_q_out = cat_q_out.dequantize()
np.testing.assert_equal(cat_ref.numpy(), cat_q_out.numpy())
# Test the cat on per-channel quantized tensor.
ch_axis = 1
scales = torch.from_numpy(np.array([1.0] * X.shape[ch_axis]))
scales = scales.to(torch.float64)
zero_points = torch.from_numpy(np.array([0] * X.shape[ch_axis]))
zero_points = zero_points.to(torch.long)
tensors_q[0] = torch.quantize_per_channel(
X, scales, zero_points, axis=ch_axis, dtype=torch_type)
with self.assertRaisesRegex(RuntimeError, "supported.*cat"):
cat_q = q_cat_op(tensors_q, dim=ch_axis, scale=scale,
zero_point=zero_point)
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams()),
size=st.sampled_from((1, 3, 5, 10)),
mode=st.sampled_from(("bilinear", "nearest", "nearest-exact")),
scale_factor=st.sampled_from((None, 1.5, 2.0)),
align_corners=st.sampled_from((True, False)),
nhwc_layout=st.sampled_from((True, False)))
def test_interpolate(self, X, size, mode, scale_factor, align_corners, nhwc_layout):
"""
This test cover upsample_nearest2d and upsample_bilinear2d
"""
X, (scale, zero_point, torch_type) = X
if scale_factor is not None:
size = None
if mode in ("nearest", "nearest-exact"):
align_corners = None
if nhwc_layout:
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
X = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
else:
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X_ref = torch.nn.functional.interpolate(
qX.int_repr().to(torch.float), size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
ops_under_test = {
"nn.functional": torch.nn.functional.interpolate,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.interpolate,
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
self.assertEqual(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
msg=f"{name} results are off: qX_hat={qX_hat.int_repr()} X_ref={X_ref}",
exact_dtype=False)
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams()),
size=st.sampled_from((1, 3, 5, 5, 10)),
mode=st.sampled_from(("nearest", "nearest-exact")),
scale_factor=st.sampled_from((None, 1.5, 2.0)),
align_corners=st.sampled_from((True, False)),
nhwc_layout=st.sampled_from((True, False)))
def test_interpolate3d(self, X, size, mode, scale_factor, align_corners, nhwc_layout):
"""
This test cover upsample_nearest3d
"""
X, (scale, zero_point, torch_type) = X
if scale_factor is not None:
size = None
align_corners = None
if nhwc_layout:
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
X = torch.from_numpy(X_nchw).permute([0, 4, 1, 2, 3])
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 4, 1, 2, 3])
else:
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X_ref = torch.nn.functional.interpolate(
qX.int_repr().to(torch.float), size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
ops_under_test = {
"nn.functional": torch.nn.functional.interpolate,
"ao.nn.quantized.functional": torch.ao.nn.quantized.functional.interpolate,
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
self.assertEqual(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
msg=f"{name} results are off: qX_hat={qX_hat.int_repr()}, X_ref={X_ref}", exact_dtype=False)
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
"""Tests quantize concatenation (both fused and not)."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
relu=st.booleans())
def test_cat_nhwc(self, X, relu):
# X is NHWC
X, (scale, zero_point, torch_type) = X
# Tile out X so # channels is > 64
X = np.repeat(X, 70 / X.shape[3], 3)
X = torch.from_numpy(np.ascontiguousarray(X))
Y = X.clone()
Y = torch.from_numpy(np.ascontiguousarray(Y))
# We add a fast path in qcat: when inputs share the same scale and zero_point,
# it will go direct memcpy instead of dequant-cat-quant.
for scaleX, scaleY in ((scale, scale), (scale, scale * 1.1)):
# Here, we quantize and get quantized tensors in NHWC for both dims and strides. The
# permute switches it so that the tensor looks like NCHW but it laid out in memory as
# NHWC.
qX = torch.quantize_per_tensor(X, scaleX, zero_point, torch_type).permute([0, 3, 1, 2])
qY = torch.quantize_per_tensor(Y, scaleY, zero_point, torch_type).permute([0, 3, 1, 2])
ref = torch.cat([qX.dequantize(), qY.dequantize()], dim=1)
if relu:
ref[ref < 0] = 0.0
ref = torch.quantize_per_tensor(ref, scale=scale, zero_point=zero_point, dtype=torch_type)
if relu:
out = torch.ops.quantized.cat_relu(
[qX, qY], dim=1, scale=scale, zero_point=zero_point)
else:
out = torch.ops.quantized.cat([qX, qY], dim=1, scale=scale, zero_point=zero_point)
torch.testing.assert_close(out.dequantize(), ref.dequantize())
self.assertNotEqual(out.stride(), sorted(out.stride()))
@override_qengines
def test_mean(self):
scale_list = (1, 0.25)
zero_point_list = (0, 2)
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4), (4, 4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
dims = ((), (-1,), (0,), (1,), (2,), (3,), (0, 1), (1, 2), (3, 4))
test_cases = itertools.product(scale_list, zero_point_list, shapes, dtypes, dims)
op = torch.mean
for scale, zp, shape, dtype, dim in test_cases:
if not all(d < len(shape) for d in dim):
continue
X = torch.randn(*shape) * 10
qX = torch.quantize_per_tensor(X, scale, zp, dtype)
Y = op(qX.dequantize(), dim)
Y = torch.quantize_per_tensor(Y, scale, zp, dtype).dequantize()
qY = op(qX, dim)
self.assertEqual(Y, qY.dequantize())
@skipIfNoQNNPACK
@given(keep=st.booleans())
def test_quantized_mean_qnnpack(self, keep):
with override_quantized_engine("qnnpack"):
# using multiple of 4 sizes to satisfy pytorch_q8gavgpool_ukernel_up8xm__sse2() 4-byte alignment demand under ASAN
in_dim = (4, 4, 4, 4)
if keep:
out_dim = (4, 4, 1, 1)
else:
out_dim = (4, 4)
X = torch.ones(in_dim)
Y = torch.ones(out_dim)
XQ = torch.quantize_per_tensor(X, scale=0.2, zero_point=0, dtype=torch.quint8)
YQ = torch.quantize_per_tensor(Y, scale=0.2, zero_point=0, dtype=torch.quint8)
MQ = XQ.mean((2, 3), keepdim=keep)
self.assertTrue(torch.equal(MQ, YQ))
@override_qengines
def test_std(self):
scale_list = (1, 0.25)
zero_point_list = (0, 2)
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4), (4, 4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
dims = ((), (-1,), (0,), (1,), (2,), (3,), (0, 1), (1, 2), (3, 4))
unbiased_list = (True, False)
keep_dim_list = (True, False)
test_cases = itertools.product(scale_list, zero_point_list, shapes,
dtypes, dims, unbiased_list, keep_dim_list)
op = torch.std
for scale, zp, shape, dtype, dim, unbiased, keep_dim in test_cases:
if not all(d < len(shape) for d in dim):
continue
X = torch.randn(*shape) * 10
qX = torch.quantize_per_tensor(X, scale, zp, dtype)
Y = op(qX.dequantize(), dim, unbiased, keep_dim)
Y = torch.quantize_per_tensor(Y, scale, zp, dtype).dequantize()
qY = op(qX, dim, unbiased, keep_dim)
self.assertEqual(Y, qY.dequantize())
"""Tests the correctness of the quantized equal op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()),
X2=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()),
X_per_channel=st.booleans(),
X2_per_channel=st.booleans())
def test_equal(self, X, X2, X_per_channel, X2_per_channel):
X, X_params = X
(scale, zero_point, torch_type) = X_params
X2, X2_params = X2
(scale2, zero_point2, torch_type2) = X2_params
X = torch.from_numpy(X)
if X_per_channel:
X_scheme = 'per_channel'
channels = X.shape[-1]
qX = torch.quantize_per_channel(
X,
scales=torch.tensor([scale] * channels),
zero_points=torch.tensor([zero_point] * channels),
dtype=torch_type,
axis=X.ndim - 1)
else:
X_scheme = 'per_tensor'
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X2 = torch.from_numpy(X2)
if X2_per_channel:
X2_scheme = 'per_channel'
channels = X2.shape[-1]
qX2 = torch.quantize_per_channel(
X2,
scales=torch.tensor([scale2] * channels),
zero_points=torch.tensor([zero_point2] * channels),
dtype=torch_type2,
axis=X2.ndim - 1)
else:
X2_scheme = 'per_tensor'
qX2 = torch.quantize_per_tensor(X2, scale=scale2, zero_point=zero_point2,
dtype=torch_type2)
def equal_ref(qX, qX2):
if qX.qscheme() != qX2.qscheme():
return False
if qX.shape != qX2.shape:
return False
if qX.dtype != qX2.dtype:
return False
if qX.qscheme() == torch.per_tensor_affine:
if qX.q_scale() != qX2.q_scale():
return False
if qX.q_zero_point() != qX2.q_zero_point():
return False
elif qX.qscheme() == torch.per_channel_affine:
if (qX.q_per_channel_scales() !=
qX2.q_per_channel_scales()).any():
return False
if (qX.q_per_channel_zero_points() !=
qX2.q_per_channel_zero_points()).any():
return False
else:
raise NotImplementedError("Don't know what to do with",
qX.qscheme())
if (qX.int_repr().to(float) != qX2.int_repr().to(float)).any():
return False
return True
self.assertEqual(qX.equal(qX), equal_ref(qX, qX))
self.assertEqual(qX.equal(qX2), equal_ref(qX, qX2))
"""Tests quantized equal op with input of non-quantized tensor."""
def test_quantized_equal(self,):
x = torch.rand(1)
y = torch.quantize_per_tensor(x, scale=0.5, zero_point=0, dtype=torch.qint8)
self.assertTrue(not torch.equal(x, y))
self.assertTrue(not torch.equal(y, x))
@skipIfNoFBGEMM
def test_group_norm(self):
# hypothesis is flaky for this test, create test cases manually
batches_list = (1, 7)
num_groups_list = (1, 4)
channels_per_groups = (1, 36, 72)
elements_per_channels = (8, 128, 1024)
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = [True, False]
affine_list = [True, False]
combined = [batches_list, num_groups_list, channels_per_groups, elements_per_channels,
torch_types, y_scales, y_zero_points, channels_last_list, affine_list]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
batches, num_groups, channels_per_group, elements_per_channel, \
torch_type, Y_scale, Y_zero_point, channels_last, \
affine = test_case
num_channels = num_groups * channels_per_group
# minimum rank for channels_last
shapes = (batches, num_channels, elements_per_channel, 1)
# In the FP kernel, sums and sums of squares are calculated in floating point.
# In the int8 and uint8 versions of the quantized kernel, they are
# calculated in integer arithmetic (which is exact).
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do the following to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
# Initialize the weights non-randomly for reproducibility
if affine:
weight = torch.ones(num_channels).float() * 0.5
bias = torch.ones(num_channels).float()
for i in range(num_channels):
weight[i] *= i
bias[i] *= i
else:
weight = None
bias = None
eps = 0.001
qX = torch.quantize_per_tensor(X, X_scale, X_zero_point, torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
for batch_idx in range(batches):
for group_idx in range(num_groups):
ch_start = group_idx * channels_per_group
ch_end = ch_start + channels_per_group
group_vals = dqX[batch_idx][ch_start:ch_end]
assume(
float(torch.unique(group_vals).shape[0]) / group_vals.numel() > 0.001
or group_vals.numel() < 5)
qY = torch.ops.quantized.group_norm(qX, num_groups, weight, bias, eps, Y_scale, Y_zero_point)
dqY_hat = F.group_norm(dqX, num_groups=num_groups, weight=weight, bias=bias, eps=eps)
qY_hat = torch.quantize_per_tensor(dqY_hat, Y_scale, Y_zero_point, torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
@skipIfNoFBGEMM
def test_instance_norm(self):
max_sides = (4, 5)
shape_list = ([2, 2, 2, 2], [8, 8, 8, 8], [11, 11, 11, 11])
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = (True, False)
affine_list = (True, False)
combined = [shape_list, torch_types, y_scales, y_zero_points, channels_last_list, affine_list]
test_cases_product = itertools.product(*combined)
test_cases = list(test_cases_product)
# NB: Add just one test case to test overflow, but this case is too slow to run
# internally in @fbcode//mode/dev, the long pole is the 4x calls to torch.sort
# inside torch.unique current implementation
if not IS_SANDCASTLE:
test_cases.append([
[1, 4, 224, 224, 160], # shape,
torch.qint8, # torch_type
0.1, # scale
0, # zero_point
False, # channels_last
True, # affine
])
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
shapes, torch_type, Y_scale, Y_zero_point, channels_last, affine = test_case
if channels_last and shapes.__len__() >= 5:
# required rank 4 tensor to use channels_last format
continue
# In the FP kernel, sums and sums of squares are calculated in floating point.
# In the int8 and uint8 versions of the quantized kernel, they are
# calculated in integer arithmetic (which is exact).
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do the following to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
num_channels = shapes[1]
if affine:
weight = torch.rand(num_channels).float() * 0.5
bias = torch.rand(num_channels).float()
for i in range(num_channels):
weight[i] *= i
bias[i] *= i
else:
weight = None
bias = None
eps = 0.001
qX = torch.quantize_per_tensor(X, X_scale, X_zero_point, torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
batches = shapes[0]
for batch_idx in range(batches):
for ch_idx in range(num_channels):
ch_vals = dqX[batch_idx][ch_idx]
assume(
float(torch.unique(ch_vals).shape[0]) / ch_vals.numel() > 0.01
or ch_vals.numel() < 5 or ch_vals.numel() > 25600)
qY = torch.ops.quantized.instance_norm(qX, weight, bias, eps, Y_scale, Y_zero_point)
dqY_hat = F.instance_norm(dqX, weight=weight, bias=bias, eps=eps)
qY_hat = torch.quantize_per_tensor(dqY_hat, Y_scale, Y_zero_point, torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
@skipIfNoFBGEMM
def test_batch_norm_relu(self):
# hypothesis too slow for this test, create test cases manually
max_sides = (2, 3, 4, 5)
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
combined = [max_sides, side_lens, torch_types]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
max_side, side_len, torch_type = test_case
Y_zero_point = 1
Y_scale = 0.5
shapes = [side_len] * max_side
X, scale_x, zero_point_x = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
dtype_x = torch_type
c = X.shape[1]
mean = torch.rand(c).float()
var = torch.rand(c).float()
weight = torch.rand(c).float()
bias = torch.rand(c).float()
eps = 0.001
qx = torch.quantize_per_tensor(X, scale_x, zero_point_x, dtype_x)
if len(X.shape) == 2 or len(X.shape) == 3:
qy = torch.ops.quantized.batch_norm1d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
elif len(X.shape) == 4:
qy = torch.ops.quantized.batch_norm2d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
else:
qy = torch.ops.quantized.batch_norm3d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
float_ref = F.batch_norm(qx.dequantize(), weight=weight, bias=bias,
running_mean=mean, running_var=var,
training=False, momentum=0, eps=eps).numpy()
float_ref_relu = float_ref.copy()
float_ref_relu[float_ref < 0] = 0
quantize_ref = torch.quantize_per_tensor(
torch.from_numpy(float_ref_relu), Y_scale, Y_zero_point, dtype_x)
self.assertEqual(
qy.int_repr().numpy(),
quantize_ref.int_repr().numpy(),
msg=f"{qy} vs {quantize_ref}")
@skipIfNoFBGEMM
def test_batch_norm(self):
# hypothesis too slow for this test, create test cases manually
max_sides = (2, 3, 4, 5)
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
combined = [max_sides, side_lens, torch_types]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
max_side, side_len, torch_type = test_case
Y_zero_point = 1
Y_scale = 0.5
shapes = [side_len] * max_side
X, scale_x, zero_point_x = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
dtype_x = torch_type
c = X.shape[1]
mean = torch.rand(c).float()
var = torch.rand(c).float()
weight = torch.rand(c).float()
bias = torch.rand(c).float()
eps = 0.001
qx = torch.quantize_per_tensor(X, scale_x, zero_point_x, dtype_x)
if len(X.shape) == 2 or len(X.shape) == 3:
qy = torch.ops.quantized.batch_norm1d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
elif len(X.shape) == 4:
qy = torch.ops.quantized.batch_norm2d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
elif len(X.shape) == 5:
qy = torch.ops.quantized.batch_norm3d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
float_ref = F.batch_norm(qx.dequantize(), weight=weight, bias=bias,
running_mean=mean, running_var=var, training=False,
momentum=0, eps=eps)
quantize_ref = torch.quantize_per_tensor(float_ref, Y_scale, Y_zero_point, dtype_x)
self.assertEqual(
qy.int_repr().numpy(), quantize_ref.int_repr().numpy(),
msg=f"{qy} vs {quantize_ref}")
@override_qengines
def test_empty_batch(self):
scale = 1.0
zero_point = 0
X = torch.ones((0, 2, 4, 4), dtype=torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
# upsample_nearest2d
qY = torch.nn.functional.upsample_nearest(qX, scale_factor=2)
np.testing.assert_equal(qY.size(), (0, 2, 8, 8),
"Quantized upsample_nearsest2d with batch size 0 failed.")
# relu
qY = torch.nn.functional.relu(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized relu with batch size 0 failed.")
# tanh
qY = torch.tanh(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized tanh with batch size 0 failed.")
# sigmoid
qY = torch.sigmoid(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized sigmoid with batch size 0 failed.")
# interpolate
op = torch.ao.nn.quantized.functional.interpolate
for mode in ["nearest", "bilinear", "nearest-exact"]:
qY = op(qX, scale_factor=2, mode=mode)
np.testing.assert_equal(qY.size(), (0, 2, 8, 8),
"Quantized interpolate with batch size 0 failed.")
# avg_pool
kernel = (2, 2)
stride = (1, 1)
padding = (0, 0)
op = torch.ao.nn.quantized.functional.avg_pool2d
qY = op(qX, kernel, stride, padding)
np.testing.assert_equal(qY.size(), (0, 2, 3, 3),
"Quantized avg_pool2d with batch size 0 failed.")
# adaptive_avg_pool
op = torch.ao.nn.quantized.functional.adaptive_avg_pool2d
qY = op(qX, (3, 3))
np.testing.assert_equal(qY.size(), (0, 2, 3, 3),
"Quantized adaptive_avg_pool2d with batch size 0 failed.")
# max_pool
dilation = (1, 1)
qY = torch.ops.quantized.max_pool2d(qX, kernel, stride, padding, dilation, ceil_mode=False)
oH = pool_output_shape(4, 2, 0, 1, 1)
oW = pool_output_shape(4, 2, 0, 1, 1)
np.testing.assert_equal(qY.size(), (0, 2, oH, oW),
"Quantized maxpool2d with batch size 0 failed.")
# hardtanh
qY = torch.ao.nn.quantized.functional.hardtanh(qX, -1, 6)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized hardtanh with batch size 0 failed.")
# mul
qY = torch.ops.quantized.mul(qX, qX, 1.0, 0)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized mul with batch size 0 failed.")
# add
qY = torch.ops.quantized.add(qX, qX, 1.0, 0)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized addition with batch size 0 failed.")
# conv
w = torch.randn((2, 2, 2, 2), dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
bias_float = torch.ones(2, dtype=torch.float)
strides = [1, 1]
pads = [0, 0]
dilations = [1, 1]
w_packed = torch.ops.quantized.conv2d_prepack(qw, bias_float, strides, pads, dilations, 1)
result = torch.ops.quantized.conv2d(qX, w_packed, 1.0, 0)
self.assertEqual(result.shape, (0, 2, 3, 3))
# linear
X = torch.ones((0, 2), dtype=torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
w = torch.randn((2, 2), dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
w_packed = torch.ops.quantized.linear_prepack(qw, bias_float)
result = torch.ops.quantized.linear(qX, w_packed, 1.0, 0)
self.assertEqual(result.shape, (0, 2))
# dynamic linear
result = torch.ops.quantized.linear_dynamic(X, w_packed)
self.assertEqual(result.shape, (0, 2))
@override_qengines
def test_linear_bias_unpack(self):
"""
Verifies the correctness of bias() and unpack() API for LinearPackedParamBase.
"""
bias_float = torch.ones(2, dtype=torch.float)
w = torch.randn((2, 2), dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
w_packed = torch.ops.quantized.linear_prepack(qw, bias_float)
# test bias()
self.assertEqual(w_packed.bias(), bias_float)
# test unpack()
self.assertEqual(w_packed.unpack()[0], qw)
def test_advanced_indexing(self):
"""
Verifies that the x[:, [0], :, :] syntax works for quantized tensors.
"""
for dtype in (torch.qint8, torch.quint8, torch.qint32):
scale = 0.1
zp = 0
x_q = torch.quantize_per_tensor(
torch.randn(1, 4, 4, 4), scale, zp, dtype)
# reference
x_fp32 = x_q.dequantize()
# single dim, single index
x_q_s1 = x_q[:, [0], :, :]
x_fp32_s1 = x_fp32[:, [0], :, :]
x_fp32_s1_ref = \
torch.quantize_per_tensor(x_fp32_s1, scale, zp, dtype)
self.assertEqual(x_q_s1, x_fp32_s1_ref)
# multiple dim, single index
x_q_s2 = x_q[:, [0], [2], :]
x_fp32_s2 = x_fp32[:, [0], [2], :]
x_fp32_s2_ref = \
torch.quantize_per_tensor(x_fp32_s2, scale, zp, dtype)
self.assertEqual(x_q_s2, x_fp32_s2_ref)
# single dim, multiple indices
x_q_s3 = x_q[:, [2, 0, 1], :, :]
x_fp32_s3 = x_fp32[:, [2, 0, 1], :, :]
x_fp32_s3_ref = \
torch.quantize_per_tensor(x_fp32_s3, scale, zp, dtype)
self.assertEqual(x_q_s3, x_fp32_s3_ref)
# multiple dim, multiple indices
x_q_s4 = x_q[:, [2, 0, 1], :, [1]]
x_fp32_s4 = x_fp32[:, [2, 0, 1], :, [1]]
x_fp32_s4_ref = \
torch.quantize_per_tensor(x_fp32_s4, scale, zp, dtype)
self.assertEqual(x_q_s4, x_fp32_s4_ref)
@override_qengines
def test_custom_module_lstm(self):
class QuantizableLSTMSplitGates(torch.ao.nn.quantizable.LSTM):
@classmethod
def from_float(cls, other, qconfig=None):
return super().from_float(other, qconfig, split_gates=True)
qengine = torch.backends.quantized.engine
batch_size = 4
seq_len = 8
input_size = 12
hidden_size = 8
num_layers = 2
dropout = 0 # This is not supported
Bias = [False, True]
Batch_first = [False, True]
Bidirectional = [False, True]
Split_gates = [False, True]
dtype = np.uint8
qtype = torch.quint8
x = np.random.randn(seq_len, batch_size, input_size)
scale, zero_point = _calculate_dynamic_qparams(x, dtype=dtype)
x = torch.from_numpy(x).to(torch.float)
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point,
dtype=qtype)
x = qx.dequantize()
with torch.no_grad():
for bias, batch_first, bidirectional, split_gates in itertools.product(
Bias, Batch_first, Bidirectional, Split_gates):
# Assume 12dB is sufficient for functional equivalence
# Without the bias, linear performs poorly
min_power = 10 if bias else 5
max_mse = 5e-6 if bias else 5e-1
if batch_first:
x = x.reshape(batch_size, seq_len, input_size)
qx = qx.reshape(batch_size, seq_len, input_size)
else:
x = x.reshape(seq_len, batch_size, input_size)
qx = qx.reshape(seq_len, batch_size, input_size)
lstm = torch.nn.Sequential(
torch.nn.LSTM(input_size, hidden_size,
num_layers=num_layers,
bias=bias, batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional))
lstm.eval()
y_ref = lstm(x)
# Prepare
lstm.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
custom_config_dict = (
None
if not split_gates
else { # switch to class with split_gates True via from_float
"float_to_observed_custom_module_class": {
torch.nn.LSTM: QuantizableLSTMSplitGates
},
"observed_to_quantized_custom_module_class": {
QuantizableLSTMSplitGates: torch.ao.nn.quantized.LSTM,
},
}
)
lstm_prepared = torch.ao.quantization.prepare(
lstm, prepare_custom_config_dict=custom_config_dict
)
self.assertTrue(hasattr(lstm_prepared[0], 'layers'))
self.assertEqual(num_layers, len(lstm_prepared[0].layers))
self.assertEqual(
lstm_prepared[0].layers[0].layer_fw.cell.split_gates, split_gates
)
assert isinstance(lstm_prepared[0], torch.ao.nn.quantizable.LSTM)
# Calibrate
y = lstm_prepared(x)
self.assertEqual(y_ref, y)
# Quantize
lstm_quantized = torch.ao.quantization.convert(
lstm_prepared, convert_custom_config_dict=custom_config_dict
)
assert type(lstm_quantized[0]) is torch.ao.nn.quantized.LSTM
qy = lstm_quantized(qx)
snr = _snr(y, qy)
snr = [snr[0]] + snr[1]
for signal, mse, power in snr:
self.assertTrue(
power > min_power or mse < max_mse,
msg=(f"Error is too high: SNR(dB): {power}, "
f"Signal: {signal}, MSE: {mse}"))
# Trace
jit_qmodule = torch.jit.trace(lstm_quantized, qx)
# Script
jit_qmodule = torch.jit.script(lstm_quantized)
@override_qengines
def test_custom_module_multi_head_attention(self):
class MultiheadAttentionModel(torch.nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.layer = torch.nn.MultiheadAttention(*args, **kwargs)
def forward(
self,
query,
key,
value,
key_padding_mask: Optional[torch.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[torch.Tensor] = None,
):
return self.layer(query, key, value, key_padding_mask, need_weights, attn_mask)
qengine = torch.backends.quantized.engine
min_power = 30
max_mse = 2
num_heads = 16
batch_size = 4
target_seq_length = 128
source_seq_length = 64
qembed_dim = 512 # Must be divisible by the number of heads
kembed_dim = 128
vembed_dim = 256
dropout = 0.0 # This is not supported
Bias = [False, True]
Add_bias_kv = [False, True]
Add_zero_attn = [False, True]
dtype = np.uint8
qtype = torch.quint8
for kdim, vdim in ((kembed_dim, vembed_dim), (None, None)):
fp_data = [
torch.randn(target_seq_length, batch_size, qembed_dim), # Q
torch.randn(source_seq_length, batch_size,
qembed_dim if kdim is None else kembed_dim), # K
torch.randn(source_seq_length, batch_size,
qembed_dim if vdim is None else vembed_dim) # V
]
q_data = []
reduce_range = (qengine in ('x86', 'fbgemm', 'onednn'))
for idx, x in enumerate(fp_data):
scale, zero_point = _calculate_dynamic_qparams(
x, dtype=dtype, reduce_range=reduce_range)
x = x.to(torch.float)
qx = torch.quantize_per_tensor(x, scale=scale,
zero_point=zero_point, dtype=qtype)
q_data.append(qx)
# Dequantize the data back for reference
fp_data[idx] = qx.dequantize()
with torch.no_grad():
for bias, add_bias_kv, add_zero_attn in itertools.product(
Bias, Add_bias_kv, Add_zero_attn):
mha = MultiheadAttentionModel(qembed_dim, num_heads, dropout,
bias, add_bias_kv, add_zero_attn,
kdim=kdim, vdim=vdim)
mha.eval()
# Prepare
if qengine_is_onednn():
# `reduce_range` is False by default for ONEDNN backend
# but the test fails on earlier CPUs without VNNI.
# So we use a default qconfig with `reduce_range=True` here
mha.qconfig = torch.ao.quantization.get_default_qconfig()
else:
mha.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
mha_prepared = torch.ao.quantization.prepare(
mha)
# Calibrate
y = mha_prepared(*fp_data)
y_ref = mha(*fp_data)
# Check the result of the prepare
self.assertEqual(y_ref[0], y[0]) # Attention
self.assertEqual(y_ref[1], y[1]) # Weight
# Quantize
mha_quantized = torch.ao.quantization.convert(mha_prepared)
for name, _param in mha_quantized.named_parameters():
self.assertTrue("in_proj_weight" not in name)
qy = mha_quantized(*q_data)
# Reference result
mha.layer = mha_quantized.layer.dequantize()
y_ref = mha(*fp_data)
snr = _snr(y, qy)
for signal, mse, power in snr:
self.assertTrue(
power > min_power or mse < max_mse,
msg=(f"Error is too high: SNR(dB): {power}, "
f"Signal: {signal}, MSE: {mse}; "
f"Run with bias={bias}, "
f"add_bias_kv={add_bias_kv}, "
f"add_zero_attn={add_zero_attn}"))
# Verify the result is scriptable
mha_quantized_scripted = torch.jit.script(mha_quantized)
@skipIfNoONEDNN
def test_int8_mul_onednn(self):
output_dtype_list = [torch.uint8, torch.float, torch.bfloat16, torch.half]
shape_list = [(16, 64), (15, 63)]
cases = itertools.product(shape_list, output_dtype_list)
for shape, output_dtype in cases:
a = torch.randn(shape)
b = torch.randn(shape)
s_a, z_a = 0.1, 1
s_b, z_b = 0.2, 2
if output_dtype == torch.uint8:
s_c, z_c = 0.3, 3
else:
s_c, z_c = 1, 0
qa = torch.quantize_per_tensor(a, s_a, z_a, torch.quint8)
qb = torch.quantize_per_tensor(b, s_b, z_b, torch.quint8)
dqa = qa.dequantize()
dqb = qb.dequantize()
c_ref = dqa * dqb
if output_dtype == torch.uint8:
c_ref = torch.ops.quantized_decomposed.quantize_per_tensor.default(c_ref, s_c, z_c, 0, 255, torch.uint8)
c_ref = c_ref.to(output_dtype)
a_int8 = qa.int_repr()
b_int8 = qb.int_repr()
c = torch.ops.onednn.qmul.tensor(a_int8, s_a, z_a, b_int8, s_b, z_b, s_c, z_c, output_dtype)
self.assertEqual(c, c_ref)
@skipIfNoONEDNN
@given(relu_fused=st.booleans())
def test_int8_add_onednn(self, relu_fused):
output_dtype_list = [torch.uint8, torch.float, torch.bfloat16, torch.half]
shape_list = [(16, 64), (15, 63)]
cases = itertools.product(shape_list, output_dtype_list)
for shape, output_dtype in cases:
a = torch.randn(shape)
b = torch.randn(shape)
s_a, z_a = 0.1, 1
s_b, z_b = 0.2, 2
if output_dtype == torch.uint8:
s_c, z_c = 0.3, 3
else:
s_c, z_c = 1, 0
qa = torch.quantize_per_tensor(a, s_a, z_a, torch.quint8)
qb = torch.quantize_per_tensor(b, s_b, z_b, torch.quint8)
dqa = qa.dequantize()
dqb = qb.dequantize()
c_ref = dqa + dqb
if relu_fused:
c_ref = torch.nn.functional.relu(c_ref)
if output_dtype == torch.uint8:
c_ref = torch.ops.quantized_decomposed.quantize_per_tensor.default(c_ref, s_c, z_c, 0, 255, torch.uint8)
c_ref = c_ref.to(output_dtype)
a_int8 = qa.int_repr()
b_int8 = qb.int_repr()
if relu_fused:
c = torch.ops.onednn.qadd_relu.tensor(a_int8, s_a, z_a, b_int8, s_b, z_b, s_c, z_c, output_dtype)
else:
c = torch.ops.onednn.qadd.tensor(a_int8, s_a, z_a, b_int8, s_b, z_b, s_c, z_c, output_dtype)
self.assertEqual(c, c_ref)
@skipIfNoONEDNN
def test_int8_batch_norm_onednn(self):
# hypothesis too slow for this test, create test cases manually
channel_len_list = (8, 64, 100, 120, 128)
output_dtype_list = [torch.uint8, torch.float, torch.bfloat16, torch.half]
x_scale, x_zero_point = 0.1, 1
cases = itertools.product(channel_len_list, output_dtype_list)
for channels, out_dtype in cases:
shapes = [8, channels, 8, 8]
y_scale, y_zero_point = (0.2, 2) if out_dtype == torch.uint8 else (1, 0)
x = torch.randn(shapes, dtype=torch.float32)
mean = torch.rand(channels).float()
var = torch.rand(channels).float()
weight = torch.rand(channels).float()
bias = torch.rand(channels).float()
eps = 0.001
qx = torch.ops.quantized_decomposed.quantize_per_tensor.default(
x, x_scale, x_zero_point, 0, 255, torch.uint8
)
y = torch.ops.onednn.qbatch_norm2d(
qx, x_scale, x_zero_point, weight, bias, mean, var, eps, y_scale, y_zero_point, out_dtype
)
dqx = torch.ops.quantized_decomposed.dequantize_per_tensor.default(
qx, x_scale, x_zero_point, 0, 255, torch.uint8
)
y_ref = F.batch_norm(dqx, weight=weight, bias=bias,
running_mean=mean, running_var=var, training=False,
momentum=0, eps=eps)
if out_dtype == torch.uint8:
y_ref = torch.ops.quantized_decomposed.quantize_per_tensor.default(
y_ref, y_scale, y_zero_point, 0, 255, torch.uint8
)
y_ref = y_ref.to(out_dtype)
self.assertEqual(y, y_ref, msg=f"{y} vs {y_ref}")
| TestQuantizedOps |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.