language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/kubernetes_engine.py | {
"start": 15883,
"end": 18041
} | class ____(GoogleBaseHook, KubernetesHook):
"""
GKE authenticated hook for standard Kubernetes API.
This hook provides full set of the standard Kubernetes API provided by the KubernetesHook,
and at the same time it provides a GKE authentication, so it makes it possible to KubernetesHook
functionality against GKE clusters.
"""
def __init__(
self,
cluster_url: str,
ssl_ca_cert: str,
enable_tcp_keepalive: bool = False,
use_dns_endpoint: bool = False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self._cluster_url = cluster_url
self._ssl_ca_cert = ssl_ca_cert
self.enable_tcp_keepalive = enable_tcp_keepalive
self.use_dns_endpoint = use_dns_endpoint
def get_conn(self) -> client.ApiClient:
return GKEClusterConnection(
cluster_url=self._cluster_url,
ssl_ca_cert=self._ssl_ca_cert,
credentials=self.get_credentials(),
enable_tcp_keepalive=self.enable_tcp_keepalive,
use_dns_endpoint=self.use_dns_endpoint,
).get_conn()
def apply_from_yaml_file(
self,
api_client: Any = None,
yaml_file: str | None = None,
yaml_objects: list[dict] | None = None,
verbose: bool = False,
namespace: str = "default",
):
"""
Perform an action from a yaml file.
:param api_client: A Kubernetes client application.
:param yaml_file: Contains the path to yaml file.
:param yaml_objects: List of YAML objects; used instead of reading the yaml_file.
:param verbose: If True, print confirmation from create action. Default is False.
:param namespace: Contains the namespace to create all resources inside. The namespace must
preexist otherwise the resource creation will fail.
"""
super().apply_from_yaml_file(
api_client=api_client or self.get_conn(),
yaml_file=yaml_file,
yaml_objects=yaml_objects,
verbose=verbose,
namespace=namespace,
)
| GKEKubernetesHook |
python | apache__airflow | airflow-core/src/airflow/timetables/trigger.py | {
"start": 2351,
"end": 4776
} | class ____(Timetable):
_interval: datetime.timedelta | relativedelta
def infer_manual_data_interval(self, *, run_after: DateTime) -> DataInterval:
return DataInterval(
coerce_datetime(run_after - self._interval),
run_after,
)
def _calc_first_run(self) -> DateTime:
"""
If no start_time is set, determine the start.
If True, always prefer past run, if False, never. If None, if within 10% of next run,
if timedelta, if within that timedelta from past run.
"""
raise NotImplementedError()
def _align_to_next(self, current: DateTime) -> DateTime:
raise NotImplementedError()
def _align_to_prev(self, current: DateTime) -> DateTime:
raise NotImplementedError()
def _get_next(self, current: DateTime) -> DateTime:
raise NotImplementedError()
def _get_prev(self, current: DateTime) -> DateTime:
raise NotImplementedError()
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
if restriction.catchup:
if last_automated_data_interval is not None:
next_start_time = self._get_next(last_automated_data_interval.end)
elif restriction.earliest is None:
next_start_time = self._calc_first_run()
else:
next_start_time = self._align_to_next(restriction.earliest)
else:
start_time_candidates = [self._align_to_prev(coerce_datetime(utcnow()))]
if last_automated_data_interval is not None:
start_time_candidates.append(self._get_next(last_automated_data_interval.end))
elif restriction.earliest is None:
# Run immediately has no effect if there is restriction on earliest
start_time_candidates.append(self._calc_first_run())
if restriction.earliest is not None:
start_time_candidates.append(self._align_to_next(restriction.earliest))
next_start_time = max(start_time_candidates)
if restriction.latest is not None and restriction.latest < next_start_time:
return None
return DagRunInfo.interval(
coerce_datetime(next_start_time - self._interval),
next_start_time,
)
| _TriggerTimetable |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_polymorphic_rel.py | {
"start": 64394,
"end": 69337
} | class ____(_PolymorphicTestBase, _Polymorphic):
def test_joined_aliasing_unrelated_subuqery(self):
"""test #8456"""
inner = select(Engineer).where(Engineer.name == "vlad").subquery()
crit = select(inner.c.person_id)
outer = select(Engineer).where(Engineer.person_id.in_(crit))
# this query will not work at all for any "polymorphic" case
# as it will adapt the inner query as well. for those cases,
# aliased() has to be used for the inner entity to disambiguate it.
self.assert_compile(
outer,
"SELECT engineers.person_id, people.person_id AS person_id_1, "
"people.company_id, people.name, people.type, engineers.status, "
"engineers.engineer_name, engineers.primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE engineers.person_id IN "
"(SELECT anon_1.person_id FROM "
"(SELECT engineers.person_id AS person_id, "
"people.person_id AS person_id_1, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language FROM people "
"JOIN engineers ON people.person_id = engineers.person_id "
"WHERE people.name = :name_1) "
"AS anon_1)",
)
sess = fixture_session()
eq_(sess.scalars(outer).all(), [Engineer(name="vlad")])
def test_primary_eager_aliasing_three_dont_reset_selectable(self):
"""test now related to #7262
See test_primary_eager_aliasing_three_reset_selectable for
the reset selectable version.
"""
# assert the JOINs don't over JOIN
sess = fixture_session()
# selectable default is False
wp = with_polymorphic(Person, "*")
def go():
eq_(
sess.query(wp)
.order_by(wp.person_id)
.options(joinedload(wp.Engineer.machines))[1:3],
all_employees[1:3],
)
self.assert_sql_count(testing.db, go, 3)
eq_(
sess.scalar(
select(func.count("*")).select_from(
sess.query(wp)
.options(joinedload(wp.Engineer.machines))
.order_by(wp.person_id)
.limit(2)
.offset(1)
.subquery()
)
),
2,
)
def test_with_polymorphic_two_future_default_wp(self):
"""test #7262
compare to
test_with_polymorphic_two_future_adhoc_wp
"""
sess = fixture_session()
def go():
wp = with_polymorphic(Person, "*")
eq_(
sess.query(wp).order_by(wp.person_id).all(),
self._emps_wo_relationships_fixture(),
)
self.assert_sql_count(testing.db, go, 1)
def test_join_to_subclass_four(self):
sess = fixture_session()
eq_(
sess.query(Person)
.select_from(people.join(engineers))
.join(Engineer.machines)
.all(),
[e1, e2, e3],
)
def test_join_to_subclass_five(self):
sess = fixture_session()
eq_(
sess.query(Person)
.select_from(people.join(engineers))
.join(Engineer.machines)
.filter(Machine.name.ilike("%ibm%"))
.all(),
[e1, e3],
)
def test_correlation_w_polymorphic(self):
sess = fixture_session()
p_poly = with_polymorphic(Person, "*")
eq_(
sess.query(p_poly.name)
.filter(
sess.query(Company.name)
.filter(Company.company_id == p_poly.company_id)
.correlate(p_poly)
.scalar_subquery()
== "Elbonia, Inc."
)
.all(),
[(e3.name,)],
)
def test_correlation_w_polymorphic_flat(self):
sess = fixture_session()
p_poly = with_polymorphic(Person, "*", flat=True)
eq_(
sess.query(p_poly.name)
.filter(
sess.query(Company.name)
.filter(Company.company_id == p_poly.company_id)
.correlate(p_poly)
.scalar_subquery()
== "Elbonia, Inc."
)
.all(),
[(e3.name,)],
)
def test_join_to_subclass_ten(self):
pass
def test_mixed_entities_one(self):
pass
def test_mixed_entities_two(self):
pass
def test_mixed_entities_eight(self):
pass
def test_polymorphic_any_eight(self):
pass
| PolymorphicTest |
python | django__django | tests/utils_tests/test_csp.py | {
"start": 967,
"end": 4902
} | class ____(SimpleTestCase):
def assertPolicyEqual(self, a, b):
parts_a = sorted(a.split("; ")) if a is not None else None
parts_b = sorted(b.split("; ")) if b is not None else None
self.assertEqual(parts_a, parts_b, f"Policies not equal: {a!r} != {b!r}")
def test_config_empty(self):
self.assertPolicyEqual(build_policy({}), "")
def test_config_basic(self):
self.assertPolicyEqual(build_policy(basic_config), basic_policy)
def test_config_multiple_directives(self):
policy = {
"default-src": [CSP.SELF],
"script-src": [CSP.NONE],
}
self.assertPolicyEqual(
build_policy(policy), "default-src 'self'; script-src 'none'"
)
def test_config_value_as_string(self):
"""
Test that a single value can be passed as a string.
"""
policy = {"default-src": CSP.SELF}
self.assertPolicyEqual(build_policy(policy), "default-src 'self'")
def test_config_value_as_tuple(self):
"""
Test that a tuple can be passed as a value.
"""
policy = {"default-src": (CSP.SELF, "foo.com")}
self.assertPolicyEqual(build_policy(policy), "default-src 'self' foo.com")
def test_config_value_as_set(self):
"""
Test that a set can be passed as a value.
Sets are often used in Django settings to ensure uniqueness, however,
sets are unordered. The middleware ensures consistency via sorting if a
set is passed.
"""
policy = {"default-src": {CSP.SELF, "foo.com", "bar.com"}}
self.assertPolicyEqual(
build_policy(policy), "default-src 'self' bar.com foo.com"
)
def test_config_value_none(self):
"""
Test that `None` removes the directive from the policy.
Useful in cases where the CSP config is scripted in some way or
explicitly not wanting to set a directive.
"""
policy = {"default-src": [CSP.SELF], "script-src": None}
self.assertPolicyEqual(build_policy(policy), basic_policy)
def test_config_value_boolean_true(self):
policy = {"default-src": [CSP.SELF], "block-all-mixed-content": True}
self.assertPolicyEqual(
build_policy(policy), "default-src 'self'; block-all-mixed-content"
)
def test_config_value_boolean_false(self):
policy = {"default-src": [CSP.SELF], "block-all-mixed-content": False}
self.assertPolicyEqual(build_policy(policy), basic_policy)
def test_config_value_multiple_boolean(self):
policy = {
"default-src": [CSP.SELF],
"block-all-mixed-content": True,
"upgrade-insecure-requests": True,
}
self.assertPolicyEqual(
build_policy(policy),
"default-src 'self'; block-all-mixed-content; upgrade-insecure-requests",
)
def test_config_with_nonce_arg(self):
"""
Test when the `CSP.NONCE` is not in the defined policy, the nonce
argument has no effect.
"""
self.assertPolicyEqual(build_policy(basic_config, nonce="abc123"), basic_policy)
def test_config_with_nonce(self):
policy = {"default-src": [CSP.SELF, CSP.NONCE]}
self.assertPolicyEqual(
build_policy(policy, nonce="abc123"),
"default-src 'self' 'nonce-abc123'",
)
def test_config_with_multiple_nonces(self):
policy = {
"default-src": [CSP.SELF, CSP.NONCE],
"script-src": [CSP.SELF, CSP.NONCE],
}
self.assertPolicyEqual(
build_policy(policy, nonce="abc123"),
"default-src 'self' 'nonce-abc123'; script-src 'self' 'nonce-abc123'",
)
def test_config_with_empty_directive(self):
policy = {"default-src": []}
self.assertPolicyEqual(build_policy(policy), "")
| CSPBuildPolicyTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1587386,
"end": 1587558
} | class ____(sgqlc.types.Union):
"""Types that can own an IP allow list."""
__schema__ = github_schema
__types__ = (App, Enterprise, Organization)
| IpAllowListOwner |
python | tiangolo__fastapi | docs_src/schema_extra_example/tutorial002.py | {
"start": 111,
"end": 517
} | class ____(BaseModel):
name: str = Field(examples=["Foo"])
description: Union[str, None] = Field(default=None, examples=["A very nice Item"])
price: float = Field(examples=[35.4])
tax: Union[float, None] = Field(default=None, examples=[3.2])
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
results = {"item_id": item_id, "item": item}
return results
| Item |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 5067,
"end": 6312
} | class ____(DataModel):
_bit_type = ir.IntType(1)
_byte_type = ir.IntType(8)
def get_value_type(self):
return self._bit_type
def get_data_type(self):
return self._byte_type
def get_return_type(self):
return self.get_data_type()
def get_argument_type(self):
return self.get_data_type()
def as_data(self, builder, value):
return builder.zext(value, self.get_data_type())
def as_argument(self, builder, value):
return self.as_data(builder, value)
def as_return(self, builder, value):
return self.as_data(builder, value)
def from_data(self, builder, value):
ty = self.get_value_type()
resalloca = cgutils.alloca_once(builder, ty)
cond = builder.icmp_unsigned('==', value, value.type(0))
with builder.if_else(cond) as (then, otherwise):
with then:
builder.store(ty(0), resalloca)
with otherwise:
builder.store(ty(1), resalloca)
return builder.load(resalloca)
def from_argument(self, builder, value):
return self.from_data(builder, value)
def from_return(self, builder, value):
return self.from_data(builder, value)
| BooleanModel |
python | pytorch__pytorch | test/test_linalg.py | {
"start": 4324,
"end": 472841
} | class ____(TestCase):
def setUp(self):
super().setUp()
torch.backends.cuda.matmul.allow_tf32 = False
def tearDown(self):
torch.backends.cuda.matmul.allow_tf32 = True
super().tearDown()
@contextlib.contextmanager
def _tunableop_ctx(self):
# Initialize and then tear down TunableOp
import glob
import os
self._set_tunableop_defaults()
torch.cuda.tunable.enable(True)
try:
yield
finally:
# disables TunableOp
torch.cuda.tunable.enable(False)
# clean up, remove any files that were generated
results_filename = torch.cuda.tunable.get_filename()
results_filename_pattern, _, _ = results_filename.rpartition('.')
untuned_filename = get_tunableop_untuned_filename()
untuned_filename_pattern, _, _ = untuned_filename.rpartition('.')
patterns = [f"{results_filename_pattern[:-1]}*.csv", f"{untuned_filename_pattern[:-1]}*.csv"]
files = [f for pattern in patterns for f in glob.glob(pattern)]
for file in files:
try:
os.remove(file)
# NB: The file is locked on Windows
except (FileNotFoundError, PermissionError):
pass
# undo all the environment variables set
# loop through a list of potentially used
# environment variables.
env_list = ["PYTORCH_TUNABLEOP_BLAS_LOG",
"PYTORCH_TUNABLEOP_UNTUNED_FILENAME"]
for env in env_list:
try:
del os.environ[env]
except KeyError:
pass
def _set_tunableop_defaults(self):
if not torch.cuda.is_available():
# TunableOp not supported on CPU at this time.
return
# disable TunableOp and restore to default values
torch.cuda.tunable.enable(False)
torch.cuda.tunable.record_untuned_enable(False)
torch.cuda.tunable.tuning_enable(True)
torch.cuda.tunable.set_max_tuning_duration(30)
torch.cuda.tunable.set_max_tuning_iterations(100)
torch.cuda.tunable.set_rotating_buffer_size(-1)
torch.cuda.tunable.set_numerical_check_tolerances(False)
ordinal = torch.cuda.current_device()
# Set filenames to be unique on a per test basis
import os
unique_id = self.id().split(".")[-1]
torch.cuda.tunable.set_filename(f"tunableop_results_{unique_id}_{ordinal}.csv")
# ordinal gets automatically appended
os.environ["PYTORCH_TUNABLEOP_UNTUNED_FILENAME"] = f"tunableop_untuned_{unique_id}_.csv"
def _compare_untuned_tuned_entries(self, untuned_filename=None, tuned_filename=None):
# Compare the entries of untuned and tuned Tunableop results
# file. Verify that for each Op+Param Signature in the untuned file
# there is a matching one in the tuned results file.
import csv
ok = False
ordinal = torch.cuda.current_device()
if untuned_filename is None:
untuned_filename = get_tunableop_untuned_filename()
if tuned_filename is None:
tuned_filename = torch.cuda.tunable.get_filename()
with open(untuned_filename) as file1:
with open(tuned_filename) as file2:
untuned_reader = csv.reader(file1)
untuned_csv_entries = {(row[0], row[1]) for row in untuned_reader}
tuned_reader = csv.reader(file2)
for _ in range(5): # Skip the first 5 lines for the validator
next(tuned_reader, None)
result_csv_entries = {(row[0], row[1]) for row in tuned_reader}
missing = untuned_csv_entries - result_csv_entries
if missing:
ok = False
else:
ok = True
return ok
exact_dtype = True
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.float: 1e-06, torch.cfloat: 1e-06})
@tf32_on_and_off(5e-3)
@reduced_f32_on_and_off(5e-3)
def test_inner(self, device, dtype):
def check(a_sizes_, b_sizes_):
for a_sizes, b_sizes in ((a_sizes_, b_sizes_), (b_sizes_, a_sizes_)):
a = torch.randn(a_sizes, dtype=dtype, device=device)
b = torch.randn(b_sizes, dtype=dtype, device=device)
res = torch.inner(a, b)
ref = np.inner(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(res.cpu(), torch.from_numpy(np.array(ref)))
out = torch.zeros_like(res)
torch.inner(a, b, out=out)
self.assertEqual(res, out)
check([], []) # scalar x scalar
check([], [0]) # scalar x empty
check([], [3]) # scalar x 1D
check([], [2, 3, 4]) # scalar x 3D
check([0], [0]) # empty x empty
check([0], [2, 0]) # empty x 2D
check([2], [2]) # 1D x 1D
check([2], [3, 1, 2]) # 1D x 3D
check([2], [3, 0, 2]) # 1D x 3D empty
check([1, 2], [3, 2]) # 2D x 2D
check([1, 2], [3, 4, 2]) # 2D x 3D
check([2, 1, 3, 2], [1, 3, 2, 2]) # 4D x 4D
# Test error message
with self.assertRaisesRegex(RuntimeError,
r"inner\(\) the last dimension must match on both "
r"input tensors but got shapes \[2, 3\] and \[2, 2\]"):
torch.randn(2, 3, device=device, dtype=dtype).inner(torch.randn(2, 2, device=device, dtype=dtype))
# Tests torch.outer, and its alias, torch.ger, vs. NumPy
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_outer(self, device, dtype):
def run_test_case(a, b):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
exact_dtype = True
expected = np.outer(a_np, b_np)
self.assertEqual(torch.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.ger(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.ger(a, b), expected, exact_dtype=False)
# test out variant
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.outer(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.ger(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
a = torch.randn(50).to(device=device, dtype=dtype)
b = torch.randn(50).to(device=device, dtype=dtype)
run_test_case(a, b)
# test 0 strided tensor
zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)
run_test_case(zero_strided, b)
run_test_case(a, zero_strided)
def test_matrix_rank_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.matrix_rank(a)
def test_solve_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
b = make_tensor(5, 1, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.solve(b, a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
b.solve(a)
def test_eig_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.eig(a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
a.eig()
def test_symeig_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.symeig(a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
a.symeig()
def test_lstsq_removed_error(self, device):
a = make_tensor(5, 5, device=device, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
torch.lstsq(a, a)
with self.assertRaisesRegex(RuntimeError, "This function was deprecated since version 1.9 and is now removed"):
a.lstsq(a)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipIfTorchDynamo("flaky, needs investigation")
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
if self.device_type == 'cpu':
drivers = ('gels', 'gelsy', 'gelsd', 'gelss', None)
else:
drivers = ('gels', None)
def check_solution_correctness(a, b, sol):
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, atol=1e-5, rtol=1e-5)
def check_correctness_ref(a, b, res, ref, driver="default"):
def apply_if_not_empty(t, f):
if t.numel():
return f(t)
else:
return t
def select_if_not_empty(t, i):
selected = apply_if_not_empty(t, lambda x: x.select(0, i))
return selected
m = a.size(-2)
n = a.size(-1)
nrhs = b.size(-1)
batch_size = int(np.prod(a.shape[:-2]))
if batch_size == 0:
batch_size = 1
a_3d = a.view(batch_size, m, n)
b_3d = b.view(batch_size, m, nrhs)
solution_3d = res.solution.view(batch_size, n, nrhs)
residuals_2d = apply_if_not_empty(res.residuals, lambda t: t.view(-1, nrhs))
rank_1d = apply_if_not_empty(res.rank, lambda t: t.view(-1))
singular_values_2d = res.singular_values.view(batch_size, res.singular_values.shape[-1])
if a.numel() > 0:
for i in range(batch_size):
sol, residuals, rank, singular_values = ref(
a_3d.select(0, i).numpy(),
b_3d.select(0, i).numpy()
)
# Singular values are None when lapack_driver='gelsy' in SciPy
if singular_values is None:
singular_values = []
self.assertEqual(sol, solution_3d.select(0, i), atol=1e-5, rtol=1e-5)
self.assertEqual(rank, select_if_not_empty(rank_1d, i), atol=1e-5, rtol=1e-5)
self.assertEqual(singular_values, singular_values_2d.select(0, i), atol=1e-5, rtol=1e-5)
# SciPy and NumPy operate only on non-batched input and
# return an empty array with shape (0,) if rank(a) != n
# in PyTorch the batched inputs are supported and
# matrices in the batched input can have different ranks
# we compute residuals only if all matrices have rank == n
# see https://github.com/pytorch/pytorch/issues/56483
if m > n:
if torch.all(rank_1d == n):
self.assertEqual(
residuals, select_if_not_empty(residuals_2d, i), atol=1e-5, rtol=1e-5, exact_dtype=False
)
else:
self.assertTrue(residuals_2d.numel() == 0)
else:
self.assertEqual(res.solution.shape, (*a.shape[:-2], n, nrhs))
self.assertEqual(res.rank.shape, a.shape[:-2])
# residuals are not always computed (and have non-zero shape)
if m > n and driver != "gelsy":
self.assertEqual(res.residuals.shape, (*a.shape[:-2], 0))
else:
self.assertEqual(res.residuals.shape, (0, ))
# singular_values are not always computed (and have non-zero shape)
if driver == "default" or driver == "gelsd" or driver == "gelss":
self.assertEqual(res.singular_values.shape, (*a.shape[:-2], min(m, n)))
else:
self.assertEqual(res.singular_values.shape, (0, ))
def check_correctness_scipy(a, b, res, driver, cond):
# SciPy provides 3 driver options: gelsd, gelss, gelsy
if TEST_SCIPY and driver in ('gelsd', 'gelss', 'gelsy'):
import scipy.linalg
def scipy_ref(a, b):
return scipy.linalg.lstsq(a, b, lapack_driver=driver, cond=cond)
check_correctness_ref(a, b, res, scipy_ref, driver=driver)
def check_correctness_numpy(a, b, res, driver, rcond):
# NumPy uses only gelsd routine
if driver == 'gelsd':
def numpy_ref(a, b):
return np.linalg.lstsq(a, b, rcond=rcond)
check_correctness_ref(a, b, res, numpy_ref)
ms = [2 ** i for i in range(5)]
m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]
# cases m < n are only supported on CPU and for cuSOLVER path on CUDA
m_l_n_sizes = [(m // 2, m) for m in ms]
include_m_l_n_case = (has_cusolver() or device == 'cpu')
matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])
batches = [(), (2,), (2, 2), (2, 2, 2)]
# we generate matrices with singular values sampled from a normal distribution,
# that is why we use `cond=1.0`, the mean to cut roughly half of all
# the singular values and compare whether torch.linalg.lstsq agrees with
# SciPy and NumPy.
# if rcond is True then set value for it based on the used algorithm
# rcond == -1 or any other negative value forces LAPACK to use machine precision tolerance
rconds = (None, True, -1)
for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):
# keep the rcond value if it is None or -1, set the driver specific value if it is True
if rcond and rcond != -1:
if driver in ('gelss', 'gelsd'):
# SVD based algorithm; set to zero roughly half of all the singular values
rcond = 1.0
else:
# driver == 'gelsy'
# QR based algorithm; setting the value too high might lead to non-unique solutions and flaky tests
# so we skip this case
continue
# specifying rcond value has no effect for gels driver so no need to run the tests again
if driver == 'gels' and rcond is not None:
continue
shape = batch + matrix_size
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = torch.rand(*shape, dtype=dtype, device=device)
m = a.size(-2)
n = a.size(-1)
res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
sol = res.solution
# Only checks gelsd, gelss, gelsy drivers
check_correctness_scipy(a, b, res, driver, rcond)
# Only checks gelsd driver
check_correctness_numpy(a, b, res, driver, rcond)
# gels driver is not checked by comparing to NumPy or SciPy implementation
# because NumPy and SciPy do not implement this driver
if driver == 'gels' and rcond is None:
check_solution_correctness(a, b, sol)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_batch_broadcasting(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
def check_correctness(a, b):
sol = torch.linalg.lstsq(a, b).solution
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, rtol=1e-5, atol=1e-5)
ms = [2 ** i for i in range(5)]
batches = [(), (0,), (2,), (2, 2), (2, 2, 2)]
# the case when a single matrix is batch-broadcasted over the rhs
for m, batch in itertools.product(ms, batches):
a = random_well_conditioned_matrix(m, m, dtype=dtype, device=device).view(*([1] * len(batch)), m, m)
b = torch.rand(*(batch + (m, m)), dtype=dtype, device=device)
check_correctness(a, b)
# cases with broadcastable shapes
for m in ms:
a = random_well_conditioned_matrix(1, 3, 1, 3, m, m, dtype=dtype, device=device)
b = torch.rand(3, 1, 3, 1, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(3, 1, 3, 1, m, dtype=dtype, device=device)
# unsqueeze for b because `check_correctness` checks against
# a.pinverse() @ b, which requires b to be a matrix
check_correctness(a, b.unsqueeze(-1))
a = random_well_conditioned_matrix(3, 1, 3, 1, m, m, dtype=dtype, device=device)
b = torch.rand(1, 3, 1, 3, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(1, 3, 1, 3, m, dtype=dtype, device=device)
check_correctness(a, b.unsqueeze(-1))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_input_checks(self, device, dtype):
# check empty inputs
# empty batches
a = torch.rand(0, 0, 3, 3, dtype=dtype, device=device)
b = torch.rand(0, 0, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(0, 0, 3, 2, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a but not b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 2, dtype=dtype, device=device)
)
# empty a and b
if torch.device(device).type == 'cpu':
# only CPU since CUDA does not support overdetermined systems
a = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 3, 3, dtype=dtype, device=device)
)
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'input must have at least 2 dimensions'):
torch.linalg.lstsq(b, b)
with self.assertRaisesRegex(RuntimeError, 'other must have at least 1 dimension'):
torch.linalg.lstsq(a, torch.tensor(1, dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-1\)'):
torch.linalg.lstsq(a, b)
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-2\)'):
torch.linalg.lstsq(a, b.unsqueeze(-1))
a = torch.randn(1, 1, 1, dtype=dtype, device=device)
b = torch.randn(3, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-2\)'):
torch.linalg.lstsq(a, b)
def complement_device(device):
if device == 'cpu' and torch.cuda.is_available():
return 'cuda'
else:
return 'cpu'
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=complement_device(device))
if a.device != b.device:
with self.assertRaisesRegex(RuntimeError, 'be on the same device'):
torch.linalg.lstsq(a, b)
b = (torch.rand(2, 2, 2, dtype=dtype, device=device) * 100).long()
with self.assertRaisesRegex(RuntimeError, 'the same dtype'):
torch.linalg.lstsq(a, b)
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=device)
if device != 'cpu':
with self.assertRaisesRegex(RuntimeError, '`driver` other than `gels` is not supported on CUDA'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# if on cpu
else:
with self.assertRaisesRegex(RuntimeError, r'parameter `driver` should be one of \(gels, gelsy, gelsd, gelss\)'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
expected_L = np.linalg.cholesky(A.cpu().numpy())
actual_L = torch.linalg.cholesky(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
larger_input_case = [(100, (5, ), True)]
for shape, batch, contiguous in list(itertools.product(shapes, batches, (True, False))) + larger_input_case:
run_test(shape, batch, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 3, dtype=dtype, device=device)
out = torch.empty_like(A)
ans = torch.linalg.cholesky(A, out=out)
self.assertEqual(ans, out)
expected = torch.linalg.cholesky(A)
self.assertEqual(expected, out)
# check the upper= variant
expected = torch.linalg.cholesky(A).mH
actual = torch.linalg.cholesky(A, upper=True)
self.assertEqual(expected, actual)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# cholesky requires the input to be a square matrix or batch of square matrices
A = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
A = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Last 2 dimensions of the array must be square'):
np.linalg.cholesky(A.cpu().numpy())
# cholesky requires the input to be at least 2 dimensional tensor
A = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError,
r'1-dimensional array given\. Array must be at least two-dimensional'):
np.linalg.cholesky(A.cpu().numpy())
# if the input matrix is not positive definite, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is not positive definite
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Matrix is not positive definite'):
np.linalg.cholesky(A.cpu().numpy())
# if at least one matrix in the batch is singular, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[4, -1, -1] = 0 # Now A[4] is not positive definite
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 4\): The factorization could not be completed'):
torch.linalg.cholesky(A)
# if out tensor with wrong shape is passed a warning is given
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
out = torch.empty(2, 3, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cholesky(A, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*A.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.cholesky(A, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.cholesky(A, out=out)
# NOTE: old_cholesky* tests were moved here from test_torch.py and test_autograd.py
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_old_cholesky_batched_many_batches(self, device, dtype):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
def cholesky_test_helper(n, batchsize, device, upper):
A = random_symmetric_pd_matrix(n, batchsize, dtype=dtype, device=device)
chol_fact = torch.cholesky(A, upper=upper)
if upper:
# Correctness check
self.assertEqual(A, chol_fact.mT.matmul(chol_fact))
# Upper triangular check
self.assertEqual(chol_fact, chol_fact.triu())
else:
# Correctness check
self.assertEqual(A, chol_fact.matmul(chol_fact.mT))
# Lower triangular check
self.assertEqual(chol_fact, chol_fact.tril())
for upper, batchsize in itertools.product([True, False], [262144, 524288]):
cholesky_test_helper(2, batchsize, device, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_cholesky_batched(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def cholesky_test_helper(n, batch_dims, upper):
A = random_hermitian_pd_matrix(n, *batch_dims, dtype=dtype, device=device)
cholesky_exp = torch.stack([m.cholesky(upper=upper) for m in A.reshape(-1, n, n)])
cholesky_exp = cholesky_exp.reshape_as(A)
self.assertEqual(cholesky_exp, torch.cholesky(A, upper=upper))
for upper, batchsize in itertools.product([True, False], [(3,), (3, 4), (2, 3, 4)]):
cholesky_test_helper(3, batchsize, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipIfRocmArch(MI300_ARCH)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@tf32_on_and_off(0.01)
@reduced_f32_on_and_off(0.01)
def test_old_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
A = random_hermitian_pd_matrix(10, dtype=dtype, device=device)
# default Case
C = torch.cholesky(A)
B = torch.mm(C, C.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0)
# test Upper Triangular
U = torch.cholesky(A, True)
B = torch.mm(U.t().conj(), U)
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.cholesky(A, False)
B = torch.mm(L, L.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (lower) did not allow rebuilding the original matrix')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_cholesky_empty(self, device, dtype):
def run_test(upper):
A = torch.empty(0, 0, dtype=dtype, device=device)
chol = torch.cholesky(A, upper)
chol_A = torch.matmul(chol, chol.t().conj())
self.assertEqual(A, chol_A)
for upper in [True, False]:
run_test(upper)
# Test for issue
# https://github.com/pytorch/pytorch/issues/57032
# torch.cholesky with upper=True for batched CUDA inputs was wrong
# it was using the lower triangular part instead of the upper one
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_old_cholesky_batched_upper(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batchsize = 2
A = random_hermitian_pd_matrix(3, batchsize, dtype=dtype, device=device)
A_triu = A.triu() # fill the lower triangular part with zero
U = torch.cholesky(A_triu, upper=True)
reconstruct_A = U.mH @ U
self.assertEqual(A, reconstruct_A)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(n, batch):
A = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
expected_L = np.linalg.cholesky(A.cpu().numpy())
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
actual_L, actual_info = torch.linalg.cholesky_ex(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
self.assertEqual(actual_info, expected_info)
ns = (0, 3, 5)
batches = ((), (2, ), (2, 1))
for n, batch in itertools.product(ns, batches):
run_test(n, batch)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex_non_pd(self, device, dtype):
# if the input matrix is not positive definite, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
_, info = torch.linalg.cholesky_ex(A)
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
_, info = torch.linalg.cholesky_ex(A)
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The factorization could not be completed'):
torch.linalg.cholesky_ex(A, check_errors=True)
def _test_addr_vs_numpy(self, device, dtype, beta=1, alpha=1):
def check(m, a, b, beta, alpha):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
m_np = m.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
m_np = m.cpu().numpy()
exact_dtype = True
if beta == 0:
expected = alpha * np.outer(a_np, b_np)
else:
expected = beta * m_np + alpha * np.outer(a_np, b_np)
res = torch.addr(m, a, b, beta=beta, alpha=alpha)
self.assertEqual(res, expected, exact_dtype=exact_dtype)
# Test out variant
out = torch.empty_like(res)
torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
self.assertEqual(out, expected, exact_dtype=exact_dtype)
m = make_tensor((50, 50), device=device, dtype=dtype, low=-2, high=2)
a = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
b = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
check(m, a, b, beta, alpha)
# test transpose
m_transpose = torch.transpose(m, 0, 1)
check(m_transpose, a, b, beta, alpha)
# test 0 strided tensor
zero_strided = make_tensor((1,), device=device, dtype=dtype, low=-2, high=2).expand(50)
check(m, zero_strided, b, beta, alpha)
# test scalar
m_scalar = torch.tensor(1, device=device, dtype=dtype)
check(m_scalar, a, b, beta, alpha)
# test nans and infs are not propagated to the output when beta == 0
float_and_complex_dtypes = floating_and_complex_types_and(torch.half, torch.bfloat16)
if beta == 0 and dtype in float_and_complex_dtypes:
m[0][10] = m[10][10] = m[20][20] = float('inf')
m[1][10] = m[11][10] = m[21][20] = float('nan')
check(m, a, b, 0, alpha)
@dtypes(torch.bool)
def test_addr_bool(self, device, dtype):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True)
@dtypes(*integral_types())
def test_addr_integral(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'argument beta must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2., alpha=1)
with self.assertRaisesRegex(RuntimeError,
'argument alpha must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=1.)
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0, alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_addr_float_and_complex(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2)
if dtype in complex_types():
self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j))
@dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
def test_outer_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
# don't use @dtypes decorator to avoid generating ~1700 tests per device
def test_addr_type_promotion(self, device):
for dtypes0, dtypes1, dtypes2 in product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), repeat=3):
a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2)
b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2)
m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2)
desired_dtype = torch.promote_types(torch.promote_types(dtypes0, dtypes1),
dtypes2)
for op in (torch.addr, torch.Tensor.addr):
result = op(m, a, b)
self.assertEqual(result.dtype, desired_dtype)
# Tests migrated from test_torch.py
# 1) test the shape of the result tensor when there is empty input tensor
# 2) test the Runtime Exception when there is scalar input tensor
def test_outer_ger_addr_legacy_tests(self, device):
for size in ((0, 0), (0, 5), (5, 0)):
a = torch.rand(size[0], device=device)
b = torch.rand(size[1], device=device)
self.assertEqual(torch.outer(a, b).shape, size)
self.assertEqual(torch.ger(a, b).shape, size)
m = torch.empty(size, device=device)
self.assertEqual(torch.addr(m, a, b).shape, size)
m = torch.randn(5, 6, device=device)
a = torch.randn(5, device=device)
b = torch.tensor(6, device=device)
self.assertRaises(RuntimeError, lambda: torch.outer(a, b))
self.assertRaises(RuntimeError, lambda: torch.outer(b, a))
self.assertRaises(RuntimeError, lambda: torch.ger(a, b))
self.assertRaises(RuntimeError, lambda: torch.ger(b, a))
self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))
self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))
# Tests torch.det and its alias, torch.linalg.det, vs. NumPy
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cdouble)
def test_det(self, device, dtype):
tensors = (
torch.randn((2, 2), device=device, dtype=dtype),
torch.randn((129, 129), device=device, dtype=dtype),
torch.randn((3, 52, 52), device=device, dtype=dtype),
torch.randn((4, 2, 26, 26), device=device, dtype=dtype))
ops = (torch.det, torch.Tensor.det,
torch.linalg.det)
for t in tensors:
expected = np.linalg.det(t.cpu().numpy())
for op in ops:
actual = op(t)
self.assertEqual(actual, expected)
self.compare_with_numpy(op, np.linalg.det, t)
# NOTE: det requires a 2D+ tensor
t = torch.randn(1, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(t)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
# additionally we can multiply the eigenvector with a phase factor e^{i\phi} and then compare the values
# let's choose the convention that the first element of the eigenvectors from torch and numpy be the same
# for real inputs, this phase factor is plus or minus one
if matrix.numel() > 0:
phase = torch.from_numpy(expected_v[..., 0, :]).to(device=device).div(actual_v[..., 0, :])
actual_v_rotated = actual_v * phase.unsqueeze(-2).expand_as(actual_v)
self.assertEqual(actual_v_rotated, expected_v)
# check the out= variant
out_w = torch.empty_like(actual_w)
out_v = torch.empty_like(actual_v)
ans_w, ans_v = torch.linalg.eigh(matrix, UPLO=uplo, out=(out_w, out_v))
self.assertEqual(ans_w, out_w)
self.assertEqual(ans_v, out_v)
self.assertEqual(ans_w, actual_w)
self.assertEqual(abs(ans_v), abs(actual_v))
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_lower_uplo(self, device, dtype):
def run_test(shape, batch, uplo):
# check lower case uplo
# use non-symmetric input to check whether uplo argument is working as intended
matrix = torch.randn(shape, shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
self.assertEqual(abs(actual_v), abs(expected_v))
uplos = ["u", "l"]
for uplo in uplos:
run_test(3, (2, 2), uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigh_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# eigh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigh(t)
# eigh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be 'L' or 'U'"):
torch.linalg.eigh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be 'L' or 'U'"):
np.linalg.eigh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigh(a, out=(out_w, out_v))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigh(a, out=(out_w, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
@unittest.skipIf((not TEST_WITH_ROCM) and _get_torch_cuda_version() < (12, 1), "Test is fixed on cuda 12.1 update 1.")
def test_eigh_svd_illcondition_matrix_input_should_not_crash(self, device, dtype):
# See https://github.com/pytorch/pytorch/issues/94772, https://github.com/pytorch/pytorch/issues/105359
# This test crashes with `cusolver error: CUSOLVER_STATUS_EXECUTION_FAILED` on cuda 11.8,
# but passes on cuda 12.1 update 1 or later.
a = torch.ones(512, 512, dtype=dtype, device=device)
a[0, 0] = 1.0e-5
a[-1, -1] = 1.0e5
eigh_out = torch.linalg.eigh(a)
svd_out = torch.linalg.svd(a)
# Matrix input a is too ill-conditioned.
# We'll just compare the first two singular values/eigenvalues. They are 1.0e5 and 511.0
# The precision override with tolerance of 1.0 makes sense since ill-conditioned inputs are hard to converge
# to exact values.
self.assertEqual(eigh_out.eigenvalues.sort(descending=True).values[:2], [1.0e5, 511.0], atol=1.0, rtol=1.0e-2)
self.assertEqual(svd_out.S[:2], [1.0e5, 511.0], atol=1.0, rtol=1.0e-2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# check the out= variant
out = torch.empty_like(actual_w)
ans = torch.linalg.eigvalsh(matrix, UPLO=uplo, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, actual_w)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvalsh_errors_and_warnings(self, device, dtype):
# eigvalsh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvalsh(t)
# eigvalsh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be 'L' or 'U'"):
torch.linalg.eigvalsh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be 'L' or 'U'"):
np.linalg.eigvalsh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
real_dtype = t.real.dtype if dtype.is_complex else dtype
out = torch.empty_like(t).to(real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvalsh(t, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.eigvalsh(t, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvalsh(t, out=out)
@onlyCPU
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigh_lwork_lapack(self, device, dtype):
# test that the calculated lwork does not cause a crash, see https://github.com/pytorch/pytorch/issues/145801
t = torch.rand(3000, 3000, device=device, dtype=dtype)
y = torch.linalg.eigh(t)
self.assertEqual(y.eigenvalues.shape, (3000,))
@dtypes(*floating_and_complex_types())
def test_kron(self, device, dtype):
def run_test_case(a_shape, b_shape):
a = torch.rand(a_shape, dtype=dtype, device=device)
b = torch.rand(b_shape, dtype=dtype, device=device)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(4,), (2, 2), (1, 2, 3), (1, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
run_test_case(a_shape, b_shape)
@dtypes(*floating_and_complex_types())
def test_kron_empty(self, device, dtype):
def run_test_case(empty_shape):
a = torch.eye(3, dtype=dtype, device=device)
b = torch.empty(empty_shape, dtype=dtype, device=device)
result = torch.kron(a, b)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(result, expected)
# NumPy doesn't work if the first argument is empty
result = torch.kron(b, a)
self.assertEqual(result.shape, expected.shape)
empty_shapes = [(0,), (2, 0), (1, 0, 3)]
for empty_shape in empty_shapes:
run_test_case(empty_shape)
@dtypes(*floating_and_complex_types())
def test_kron_errors_and_warnings(self, device, dtype):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.eye(3, dtype=dtype, device=device)
b = torch.ones((2, 2), dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.kron(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should match
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
torch.kron(a, b, out=out)
# This test confirms that torch.linalg.norm's dtype argument works
# as expected, according to the function's documentation
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_norm_dtype(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
def run_test_case(input_size, ord, keepdim, to_dtype):
msg = (
f'input_size={input_size}, ord={ord}, keepdim={keepdim}, '
f'dtype={dtype}, to_dtype={to_dtype}')
input = make_arg(input_size)
result = torch.linalg.norm(input, ord, keepdim=keepdim)
self.assertEqual(result.dtype, input.real.dtype, msg=msg)
result_out = torch.empty((0), dtype=result.dtype, device=device)
torch.linalg.norm(input, ord, keepdim=keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
result = torch.linalg.norm(input.to(to_dtype), ord, keepdim=keepdim)
result_with_dtype = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype)
self.assertEqual(result, result_with_dtype, msg=msg)
result_out_with_dtype = torch.empty_like(result_with_dtype)
torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype, out=result_out_with_dtype)
self.assertEqual(result_with_dtype, result_out_with_dtype, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf, None]
# In these orders we are computing the 10-th power and 10-th root of numbers.
# We avoid them for half-precision types as it makes the tests above too badly conditioned
if dtype != torch.float16 and dtype != torch.bfloat16:
ord_vector.extend([0.1, -0.1])
ord_matrix = ['fro', 'nuc', 1, -1, 2, -2, inf, -inf, None]
S = 10
if dtype == torch.cfloat:
norm_dtypes = (torch.cfloat, torch.cdouble)
elif dtype == torch.cdouble:
norm_dtypes = (torch.cdouble,)
elif dtype in (torch.float16, torch.bfloat16, torch.float):
norm_dtypes = (torch.float, torch.double)
elif dtype == torch.double:
norm_dtypes = (torch.double,)
else:
raise RuntimeError("Unsupported dtype")
for ord, keepdim, norm_dtype in product(ord_vector, (True, False), norm_dtypes):
run_test_case((S,) , ord, keepdim, norm_dtype)
for ord, keepdim, norm_dtype in product(ord_matrix, (True, False), norm_dtypes):
if ord in [2, -2, 'nuc']:
# We need torch.svdvals
if dtype == torch.float16 or dtype == torch.bfloat16:
continue
# We need LAPACK or equivalent
if ((torch.device(device).type == 'cuda' and not torch.cuda.has_magma and not has_cusolver()) or
(torch.device(device).type == 'cpu' and not torch._C.has_lapack)):
continue
run_test_case((S, S) , ord, keepdim, norm_dtype)
# This test confirms torch.linalg.norm bfloat16 and half get right result.
@dtypes(torch.bfloat16, torch.float16)
def test_norm_bfloat16_and_half(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
def run_test_case(input_size, ord, keepdim):
msg = (
f'input_size={input_size}, ord={ord}, keepdim={keepdim}, '
f'dtype={dtype}')
input = make_arg(input_size).fill_(1)
result_ref = torch.linalg.norm(input.float(), ord, keepdim=keepdim).to(dtype=dtype)
result = torch.linalg.norm(input, ord, keepdim=keepdim)
self.assertEqual(result_ref, result, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf, None]
for S, ord, keepdim in product((10, 2049), ord_vector, (True, False)):
run_test_case((S,) , ord, keepdim, )
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm(self, device, dtype):
if IS_ARM64 and device == 'cpu' and dtype in [torch.float16, torch.bfloat16, torch.float32]:
raise unittest.SkipTest("Fails on ARM, see https://github.com/pytorch/pytorch/issues/125438")
# have to use torch.randn(...).to(bfloat16) instead of
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf, 1 + 2j]
input_sizes = [
(1, ),
(10, ),
(4, 5),
(3, 4, 5),
(0, ),
(0, 10),
(0, 0),
(10, 0, 10),
]
def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):
if dim is None:
input_maybe_flat = input.flatten(0, -1)
else:
input_maybe_flat = input
result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)
if keepdim and dim is None:
result = result.reshape([1] * input.dim())
return result
def run_test_case(input, ord, dim, keepdim, norm_dtype):
if isinstance(ord, complex):
error_msg = "Expected a non-complex scalar"
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
elif (input.numel() == 0 and
(ord < 0. or ord == inf) and
(dim is None or input.shape[dim] == 0)):
# The operation does not have an identity.
error_msg = "linalg.vector_norm cannot compute"
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)
else:
msg = (f'input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}')
result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
if dtype.is_complex:
result_dtype_reference = result_dtype_reference.real
self.assertEqual(result_dtype, result_dtype_reference, msg=msg)
if norm_dtype is not None:
ref = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)
actual = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
self.assertEqual(ref, actual, msg=msg)
if dtype == torch.cfloat:
norm_dtypes = (None, torch.cfloat, torch.cdouble)
elif dtype == torch.cdouble:
norm_dtypes = (None, torch.cdouble)
elif dtype in (torch.float16, torch.bfloat16, torch.float):
norm_dtypes = (None, torch.float, torch.double)
elif dtype == torch.double:
norm_dtypes = (None, torch.double)
else:
raise RuntimeError("Unsupported dtype")
for amp in [False, True]:
with torch.autocast(device_type=device, enabled=amp):
for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
input = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
for dim in [None, random.randint(0, len(input_size) - 1)]:
run_test_case(
input,
ord,
dim,
keepdim,
norm_dtype)
def test_vector_norm_decom_unbacked_checks(self):
from torch._refs.linalg import _check_vector_norm_args
class Mod(torch.nn.Module):
def __init__(self, ord, dim):
super().__init__()
self.ord = ord
self.dim = dim
def forward(self, a):
x = a.item()
tensor_unbacked_size = torch.ones(x, x + 1, x + 2)
_check_vector_norm_args(tensor_unbacked_size, self.ord, self.dim)
return tensor_unbacked_size
def test(
ord: Union[float, int],
dim: Optional[DimsType],
expect_numel_runtime_check: bool,
expect_index_0_check: bool = False,
) -> None:
m = Mod(ord, dim)
exported_program: torch.export.ExportedProgram = torch.export.export(
m, args=tuple(torch.tensor([1]))
)
self.assertEqual(
"Runtime assertion failed for expression Ne(u0*(u0 + 1)*(u0 + 2), 0)"
in exported_program.graph_module.code,
expect_numel_runtime_check,
)
self.assertEqual(
"Runtime assertion failed for expression Ne(u0, 0) | Ne(u0*(u0 + 1)*(u0 + 2), 0)"
in exported_program.graph_module.code,
expect_index_0_check,
)
# dim is int
test(-1, 1, True)
# dim is None
test(-1, None, True)
# len(dim) == 0
test(-1, [], True)
# shape[d] == 0
test(-1, [0], False, True)
# u0 + 1 == 0 is False we do not see a runtime assert in the generated graph.
test(-1, [1], False, False)
test(-1, [0, 1], False, True)
test(-1, [0, 0], False, True)
def test_vector_norm_dim_tuple_arg(self, device):
test_cases = [
# input size, dim, error, error message
((4, ), (0, ), None, None),
((4, ), (1, ), IndexError, r'Dimension out of range'),
((4, ), (-2, ), IndexError, r'Dimension out of range'),
((4, 3), (0, -1), None, None),
((4, 3), (0, 0), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, -2), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, 1.0), TypeError, r"argument 'dim' must be tuple of ints"),
((4, 3), (None, ), TypeError, r"argument 'dim' must be tuple of ints"),
]
for input_size, dim_tuple, error, error_msg in test_cases:
input = torch.randn(input_size, device=device)
# vector_norm should accept a tuple or a list for dim arg
for dim in [dim_tuple, list(dim_tuple)]:
if error is None:
torch.linalg.vector_norm(input, dim=dim)
else:
with self.assertRaises(error, msg=error_msg):
torch.linalg.vector_norm(input, dim=dim)
# This test compares torch.linalg.norm and numpy.linalg.norm to ensure that
# their vector norm results match
@dtypes(torch.float, torch.double)
def test_norm_vector(self, device, dtype):
def run_test_case(input, p, dim, keepdim):
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
torch.linalg.norm(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf]
S = 10
test_cases = [
# input size, p settings, dim
((S, ), ord_vector, None),
((S, ), ord_vector, 0),
((S, S, S), ord_vector, 0),
((S, S, S), ord_vector, 1),
((S, S, S), ord_vector, 2),
((S, S, S), ord_vector, -1),
((S, S, S), ord_vector, -2),
]
L = 1_000_000
if dtype == torch.double:
test_cases.append(((L, ), ord_vector, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_test_case(input, ord, dim, keepdim)
# This test compares torch.linalg.norm, torch.linalg.matrix_norm and numpy.linalg.norm to
# ensure that their matrix norm results match.
@skipMeta # https://github.com/pytorch/pytorch/issues/54082
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-4})
def test_norm_matrix(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
if ord is not None and dim is not None:
result = torch.linalg.matrix_norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = [1, -1, 2, -2, inf, -inf, 'nuc', 'fro']
S = 10
test_cases = [
# input size, dim
((S, S), None),
((S, S), (0, 1)),
((S, S), (1, 0)),
((S, S, S, S), (2, 0)),
((S, S, S, S), (-1, -2)),
((S, S, S, S), (-1, -3)),
((S, S, S, S), (-3, 2)),
]
for (shape, dim), keepdim, ord in product(test_cases, [True, False], ord_matrix):
if ord in [2, -2, 'nuc']:
# We need torch.svdvals
if dtype == torch.float16 or dtype == torch.bfloat16:
continue
# We need LAPACK or equivalent
if ((torch.device(device).type == 'cuda' and not torch.cuda.has_magma and not has_cusolver()) or
(torch.device(device).type == 'cpu' and not torch._C.has_lapack)):
continue
run_test_case(make_arg(shape), ord, dim, keepdim)
@onlyCUDA
@dtypes(torch.bfloat16, torch.float16)
def test_norm_fused_type_promotion(self, device, dtype):
x = torch.randn(10, device=device, dtype=dtype)
def profile_and_check(fn, x, kwargs):
with torch.profiler.profile(activities=(torch.profiler.ProfilerActivity.CPU,)) as p:
fn(x, **kwargs, dtype=torch.float)
# smoke check that profiler returned some events
self.assertTrue("aten::linalg_vector_norm" in (e.name for e in p.events()))
# test that there was no explicit copy
self.assertFalse("aten::to" in (e.name for e in p.events()))
for f, kwargs, in zip((torch.linalg.vector_norm, torch.norm), ({}, {"p" : 2})):
profile_and_check(f, x, kwargs)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3})
def test_cond(self, device, dtype):
def run_test_case(input, p):
result = torch.linalg.cond(input, p)
result_numpy = np.linalg.cond(input.cpu().numpy(), p)
self.assertEqual(result, result_numpy, rtol=1e-2, atol=self.precision, exact_dtype=False)
self.assertEqual(result.shape, result_numpy.shape)
# test out= variant
out = torch.empty_like(result)
ans = torch.linalg.cond(input, p, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
input_sizes = [(32, 32), (2, 3, 3, 3)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test empty batch sizes
input_sizes = [(0, 3, 3), (0, 2, 5, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test non-square input
input_sizes = [(16, 32), (32, 16), (2, 3, 5, 3), (2, 3, 3, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in [2, -2, None]:
run_test_case(input, p)
# test for singular input
a = torch.eye(3, dtype=dtype, device=device)
a[-1, -1] = 0 # make 'a' singular
for p in norm_types:
try:
run_test_case(a, p)
except np.linalg.LinAlgError:
# Numpy may fail to converge for some BLAS backends (although this is very rare)
# See the discussion in https://github.com/pytorch/pytorch/issues/67675
pass
# test for 0x0 matrices. NumPy doesn't work for such input, we return 0
input_sizes = [(0, 0), (2, 5, 0, 0)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in ['fro', 2]:
expected_dtype = a.real.dtype if dtype.is_complex else dtype
expected = torch.zeros(input_size[:-2], dtype=expected_dtype, device=device)
actual = torch.linalg.cond(input, p)
self.assertEqual(actual, expected)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3})
def test_cond_errors_and_warnings(self, device, dtype):
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
# cond expects the input to be at least 2-dimensional
a = torch.ones(3, dtype=dtype, device=device)
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'at least 2 dimensions'):
torch.linalg.cond(a, p)
# for some norm types cond expects the input to be square
a = torch.ones(3, 2, dtype=dtype, device=device)
norm_types = [1, -1, inf, -inf, 'fro', 'nuc']
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cond(a, p)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.ones((2, 2), dtype=dtype, device=device)
for p in ['fro', 2]:
real_dtype = a.real.dtype if dtype.is_complex else dtype
out = torch.empty(a.shape, dtype=real_dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cond(a, p, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cond(a, p, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.cond(a, p, out=out)
# for batched input if at least one matrix in the batch is not invertible,
# we can't get the result for all other (possibly) invertible matrices in the batch without an explicit for loop.
# this should change when at::inverse works with silent errors
# NumPy works fine in this case because it's possible to silence the error and get the inverse matrix results
# possibly filled with NANs
batch_dim = 3
a = torch.eye(3, 3, dtype=dtype, device=device)
a = a.reshape((1, 3, 3))
a = a.repeat(batch_dim, 1, 1)
a[1, -1, -1] = 0 # now a[1] is singular
for p in [1, -1, inf, -inf, 'fro', 'nuc']:
result = torch.linalg.cond(a, p)
self.assertEqual(result[1], float('inf'))
# check invalid norm type
a = torch.ones(3, 3, dtype=dtype, device=device)
for p in ['wrong_norm', 5]:
with self.assertRaisesRegex(RuntimeError, f"linalg.cond got an invalid norm type: {p}"):
torch.linalg.cond(a, p)
# This test calls torch.linalg.norm and numpy.linalg.norm with illegal arguments
# to ensure that they both throw errors
@dtypes(torch.float, torch.double)
def test_norm_errors(self, device, dtype):
def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex):
test_case_info = (
f'test case input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}')
with self.assertRaisesRegex(error_type, error_regex, msg=test_case_info):
torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
msg = f'numpy does not raise error but pytorch does, for case "{test_case_info}"'
with self.assertRaises(Exception, msg=test_case_info):
np.linalg.norm(input_numpy, ord, dim, keepdim)
S = 10
error_test_cases = [
# input size, p settings, dim, error type, error regex
((S, ), ['fro', 'nuc'], None, RuntimeError, r'A must have at least 2 dimensions'),
((S, S), [3.5], None, RuntimeError, r'matrix_norm: Order 3.5 not supported'),
((S, S), [0], None, RuntimeError, r'matrix_norm: Order 0 not supported'),
((S, S), ['fail'], None, RuntimeError, r'matrix_norm: Order fail not supported'),
((S, S), ['fro', 'nuc'], 0, RuntimeError, r'matrix_norm: dim must be a 2-tuple'),
((S, S), ['fro', 'nuc', 2], (0, 0), RuntimeError, r'dims must be different'),
((S, S), ['fro', 'nuc', 2], (-1, 1), RuntimeError, r'dims must be different'),
((S, S), ['fro', 'nuc', 2], (0, 4), IndexError, r'Dimension out of range'),
((S, ), [0], (4, ), IndexError, r'Dimension out of range'),
((S, ), [None], (0, 0), RuntimeError, r'dim 0 appears multiple times'),
((S, S, S), [1], (0, 1, 2), RuntimeError, r"If dim is specified, it must be of length 1 or 2."),
((S, S, S), [1], None, RuntimeError, r"If dim is not specified but ord is, the input must be 1D or 2D"),
]
for keepdim in [True, False]:
for input_size, ord_settings, dim, error_type, error_regex in error_test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_error_test_case(input, ord, dim, keepdim, error_type, error_regex)
# Test complex number inputs for linalg.norm
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.cfloat, torch.cdouble)
@precisionOverride({torch.cfloat: 5e-4})
def test_norm_complex(self, device, dtype):
def gen_error_message(input_size, ord, keepdim, dim=None):
return f"complex norm failed for input size {input_size}, ord={ord}, keepdim={keepdim}, dim={dim}"
vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = [None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf]
# Test supported ords
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in vector_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([], device=device, dtype=res.dtype)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in matrix_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([], device=device, dtype=res.dtype)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out, expected, msg=msg)
@onlyCPU
def test_norm_complexhalf(self, device):
def gen_error_message(input_size, ord, keepdim, dim=None):
return f"complex norm failed for input size {input_size}, ord={ord}, keepdim={keepdim}, dim={dim}"
vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]
# Test supported ords
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device, dtype=torch.chalf)
x_cfloat = x.to(torch.cfloat)
for ord in vector_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim)
res_float = torch.linalg.norm(x_cfloat, ord, keepdim=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, res_float.shape, msg=msg)
self.assertEqual(res.dtype, torch.half, msg=msg)
self.assertEqual(res, res_float, msg=msg, exact_dtype=False)
res_out = torch.tensor([], device=device, dtype=res.dtype)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, res_float.shape, msg=msg)
self.assertEqual(res_out.dtype, torch.half, msg=msg)
self.assertEqual(res_out, res_float, msg=msg, exact_dtype=False)
# Test that linal.vector_norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
def test_vector_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
vectors = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
for vector in vectors:
x = torch.tensor(vector, device=device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.vector_norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_vector_norm_reduce_over_1D_vector(self, device, dtype):
input_sizes_and_dims = [
((6, 1), -1),
((3, 1, 2, 1), (1, 3)),
((1,), None),
]
orders = [float('inf'), -float('inf'), 0, 1, -1, 2, -2]
keepdims = [True, False]
for input_size_and_dim, ord, keepdim in product(input_sizes_and_dims, orders, keepdims):
input_size = input_size_and_dim[0]
dim = input_size_and_dim[1]
if type(dim) is tuple and ord == 0:
# skip because np.linalg.norm raises 'ValueError: Invalid norm order for matrices.'
continue
input = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
result = torch.linalg.vector_norm(input, ord, dim, keepdim)
result_numpy = np.linalg.norm(input.cpu().numpy(), ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_matrix_norm(self, device, dtype):
# Test only inputs for which torch.linalg.matrix_norm diverges from torch.linalg.norm
A = make_tensor((2, 2, 2), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm:.*must have at least 2 dimensions.*'):
torch.linalg.matrix_norm(make_tensor((2,), dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm:.*must be a 2-tuple.*'):
torch.linalg.matrix_norm(A, dim=(0,))
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=0)
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=3.0)
with self.assertRaisesRegex(RuntimeError, "Expected a non-complex scalar"):
torch.linalg.matrix_norm(A, ord=1 + 2j)
# Test dim=None behavior
ref = torch.linalg.norm(A, dim=(-2, -1))
res = torch.linalg.matrix_norm(A)
self.assertEqual(ref, res)
# Test that linal.norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_MACOS, "Skipped on MacOS!")
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
# matrix_ords 'nuc', 2, -2 are skipped currently
# See issue https://github.com/pytorch/pytorch/issues/71911
matrix_ords = ['fro', 1, inf, -1, -inf]
vectors = []
matrices = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
matrices.append([[pair[0], pair[1]]])
matrices.append([[pair[0]], [pair[1]]])
for vector in vectors:
x = torch.tensor(vector).to(device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# TODO: Remove this function once the broken cases are fixed
def is_broken_matrix_norm_case(ord, x):
if self.device_type == 'cuda':
if x.size() == torch.Size([1, 2]):
if ord in ['nuc', 2, -2] and isnan(x[0][0]) and x[0][1] == 1:
# These cases are broken because of an issue with svd
# https://github.com/pytorch/pytorch/issues/43567
return True
if ord in ['nuc', 2, -2]:
# These cases are broken because of another issue with svd
# https://github.com/pytorch/pytorch/issues/52633
return True
return False
for matrix in matrices:
x = torch.tensor(matrix).to(device)
x_n = x.cpu().numpy()
for ord in matrix_ords:
msg = f'ord={ord}, matrix={matrix}'
if is_broken_matrix_norm_case(ord, x):
continue
else:
result_n = np.linalg.norm(x_n, ord=ord)
result = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# Test degenerate shape results match numpy for linalg.norm vector norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_vector_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
if (input.numel() == 0 and
(ord < 0. or ord == inf) and
(dim is None or input.shape[dim] == 0)):
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
else:
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_vector = [0, 0.5, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
S = 10
test_cases = [
# input size, dim
((0, ), None),
((0, S), 0),
((0, S), 1),
((S, 0), 0),
((S, 0), 1),
]
for keepdim in [True, False]:
for input_size, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_vector:
run_test_case(input, ord, dim, keepdim)
# Test degenerate shape results match numpy for linalg.norm matrix norms
@skipIf(np.lib.NumpyVersion(np.__version__) < '2.3.0', 'Numpy changed handling of degenerate inputs in 2.3.0')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_matrix_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
ops = [torch.linalg.norm]
if ord is not None and dim is not None:
ops.append(torch.linalg.matrix_norm)
if should_error:
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
with self.assertRaises(IndexError):
op(input, ord, dim, keepdim)
else:
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf, None]
S = 10
test_cases = [
# input size, p settings that cause error, dim
((0, 0), [-1, -2, -inf], None),
((0, S), [-2, -inf], None),
((S, 0), [-1, -2], None),
((S, S, 0), [], (0, 1)),
((1, S, 0), [], (0, 1)),
((0, 0, S), [-1, -2, -inf], (0, 1)),
((0, 0, S), [-1, -2, -inf], (1, 0)),
]
for keepdim in [True, False]:
for input_size, error_ords, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_matrix:
run_test_case(input, ord, dim, keepdim, ord in error_ords)
# TODO this is redundant with test_norm_matrix_degenerate_shapes above,
# remove when old numpy versions are dropped
@skipIf(np.lib.NumpyVersion(np.__version__) >= '2.3.0', 'Numpy changed handling of degenerate inputs in 2.3.0')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_matrix_degenerate_shapes_old_numpy(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
ops = [torch.linalg.norm]
if ord is not None and dim is not None:
ops.append(torch.linalg.matrix_norm)
if should_error == 'both':
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
with self.assertRaises(IndexError):
op(input, ord, dim, keepdim)
elif should_error == 'np_only':
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
result = op(input, ord, dim, keepdim)
dim_ = dim
if dim_ is None:
dim_ = (0, 1)
expected_shape = list(input.shape)
if keepdim:
expected_shape[dim_[0]] = 1
expected_shape[dim_[1]] = 1
else:
del expected_shape[max(dim_)]
del expected_shape[min(dim_)]
expected = torch.zeros(expected_shape, dtype=dtype.to_real())
self.assertEqual(expected, result, msg=msg)
else:
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf, None]
S = 10
test_cases = [
# input size, p settings that cause error,
# p settings that error numpy but not torch, dim
((0, 0), [-1, -2, -inf], [inf, 1, 2], None),
((0, S), [-2, -inf], [inf, 2], None),
((S, 0), [-1, -2], [1, 2], None),
((S, S, 0), [], [], (0, 1)),
((1, S, 0), [], [], (0, 1)),
((0, 0, S), [-1, -2, -inf], [inf, 1, 2], (0, 1)),
((0, 0, S), [-1, -2, -inf], [inf, 1, 2], (1, 0)),
]
for keepdim in [True, False]:
for input_size, error_ords, np_error_ords, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_matrix:
if ord in error_ords:
should_error = 'both'
elif ord in np_error_ords:
should_error = 'np_only'
else:
should_error = 'no'
run_test_case(input, ord, dim, keepdim, should_error)
def test_norm_fastpaths(self, device):
x = torch.randn(3, 5, device=device)
# slow path
result = torch.linalg.norm(x, 4.5, 1)
expected = torch.pow(x.abs().pow(4.5).sum(1), 1.0 / 4.5)
self.assertEqual(result, expected)
# fast 0-norm
result = torch.linalg.norm(x, 0, 1)
expected = (x != 0).type_as(x).sum(1)
self.assertEqual(result, expected)
# fast 1-norm
result = torch.linalg.norm(x, 1, 1)
expected = x.abs().sum(1)
self.assertEqual(result, expected)
# fast 2-norm
result = torch.linalg.norm(x, 2, 1)
expected = torch.sqrt(x.pow(2).sum(1))
self.assertEqual(result, expected)
# fast 3-norm
result = torch.linalg.norm(x, 3, 1)
expected = torch.pow(x.pow(3).abs().sum(1), 1.0 / 3.0)
self.assertEqual(result, expected)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eig_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eig(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected[0], axis=-1)[::-1]
expected = (np.take_along_axis(expected[0], ind, axis=-1), np.take_along_axis(expected[1], ind[:, None], axis=-1))
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual[0].cpu().numpy(), axis=-1)[::-1]
actual_np = [x.cpu().numpy() for x in actual]
sorted_actual = (
np.take_along_axis(actual_np[0], ind, axis=-1),
np.take_along_axis(actual_np[1], ind[:, None], axis=-1))
self.assertEqual(expected[0], sorted_actual[0], exact_dtype=False)
self.assertEqual(abs(expected[1]), abs(sorted_actual[1]), exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
complementary_device = 'cpu'
# compare eigenvalues with CPU
expected = torch.linalg.eig(a.to(complementary_device))
self.assertEqual(expected[0], actual[0])
# set tolerance for correctness check
if dtype in [torch.float32, torch.complex64]:
atol = 1e-3 # CuSolver gives less accurate results for single precision (1-2 larger than OOM NumPy)
else:
atol = 1e-13 # Same OOM for NumPy
# check correctness using eigendecomposition identity
w, v = actual
a = a.to(v.dtype)
if a.numel() == 0 and v.numel() == 0 and w.numel() == 0:
pass
elif a.numel() == 0 or v.numel() == 0 or w.numel() == 0:
raise RuntimeError("eig returned empty tensors unexpectedly")
self.assertEqual(a @ v, v * w.unsqueeze(-2), atol=atol, rtol=0)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@slowTest
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32)
def test_eig_check_magma(self, device, dtype):
# For CUDA inputs only matrices of size larger than 2048x2048 actually call MAGMA library
shape = (2049, 2049)
a = make_tensor(shape, dtype=dtype, device=device)
w, v = torch.linalg.eig(a)
# check correctness using eigendecomposition identity
self.assertEqual(a.to(v.dtype) @ v, w * v, atol=1e-3, rtol=1e-3)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eig_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eig(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eig(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(lambda) = lambda^2 - 2lambda + 5 = 0, with roots lambda = 1[+-]2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvectors to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(0, dtype=torch.int, device=device)
out1 = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, dtype=torch.complex128, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
# if non-empty out tensor with wrong shape is passed a warning is given
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(1, device=device, dtype=torch.complex128)
out1 = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eig(a, out=(out0, out1))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
out_v = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=torch.complex128)
out_v = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_with_nan(self, device, dtype):
for val in [np.inf, np.nan]:
for batch_dim in [(), (10,)]:
a = make_tensor((*batch_dim, 5, 5), device=device, dtype=dtype)
a[..., -1, -1] = val
with self.assertRaisesRegex(RuntimeError, "torch.linalg.eig: input tensor should not"):
torch.linalg.eig(a)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eigvals_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eigvals(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected, axis=-1)[::-1]
expected = np.take_along_axis(expected, ind, axis=-1)
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual.cpu().numpy(), axis=-1)[::-1]
actual_np = actual.cpu().numpy()
sorted_actual = np.take_along_axis(actual_np, ind, axis=-1)
self.assertEqual(expected, sorted_actual, exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eigvals_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eigvals(a.to(complementary_device))
self.assertEqual(expected, actual)
# check out= variant
complex_dtype = dtype
if not dtype.is_complex:
complex_dtype = torch.complex128 if dtype == torch.float64 else torch.complex64
out = torch.empty(0, dtype=complex_dtype, device=device)
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
# check non-contiguous out
if a.numel() > 0:
out = torch.empty(2 * shape[0], *shape[1:-1], dtype=complex_dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvals_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eigvals(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvals(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(lambda) = lambda^2 - 2lambda + 5 = 0, with roots lambda = 1[+-]2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eigvals(a, out=out)
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigvals(a, out=out)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvals(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvals(a, out=out_w)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return f"norm failed for input size {input_size}, p={p}, keepdim={keepdim}, dim={dim}"
# 'nuc' norm uses SVD, and thus its precision is much lower than other norms.
# test_svd takes @precisionOverride({torch.float: 1e-4, torch.cfloat: 2e-4}),
# and here we are doing the same thing for nuc norm.
class PrecisionContext:
def __init__(self, test, norm):
self.norm = norm
self.saved_overrides = getattr(test, 'precision_overrides', None)
self.target_test = test
def __enter__(self):
if 'nuc' != self.norm:
return None
self.target_test.precision_overrides = {torch.float: 1e-4, torch.cfloat: 2e-4}
return self.target_test.precision_overrides
def __exit__(self, type, value, tb) -> bool:
if 'nuc' != self.norm:
return True
if self.saved_overrides is None:
delattr(self.target_test, 'precision_overrides')
else:
self.target_test.precision_overrides = self.saved_overrides
return True
for keepdim in [False, True]:
# full reduction
x = torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3, 1.5]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
self.assertEqual(res, expected, atol=1e-5, rtol=0, msg=gen_error_message(x.size(), p, keepdim))
# one dimension
x = torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3]:
dim = 1
res = x.norm(p, dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
for p in ['fro', 'nuc']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
with PrecisionContext(self, p):
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# zero dimensions
x = torch.randn((), device=device)
xn = x.cpu().numpy()
res = x.norm(keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, keepdims=keepdim)
msg = gen_error_message(x.size(), None, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# larger tensor sanity check
self.assertEqual(
2 * torch.norm(torch.ones(10000), keepdim=keepdim),
torch.norm(torch.ones(40000), keepdim=keepdim))
# matrix norm with non-square >2-D tensors, all combinations of reduction dims
x = torch.randn(5, 6, 7, 8, device=device)
xn = x.cpu().numpy()
for p in ['fro', 'nuc']:
for dim in itertools.product(*[list(range(4))] * 2):
if dim[0] == dim[1]:
continue
res = x.norm(p=p, dim=dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord=p, axis=dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
with PrecisionContext(self, p):
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# Test that torch.norm with p=+/-inf propagates NaN
def test_norm_old_nan_propagation(self, device):
ords = [inf, -inf]
for pair in itertools.product([0.0, nan, 1.0], repeat=2):
x = torch.tensor(list(pair), device=device)
for ord in ords:
result = torch.norm(x, p=ord)
result_check = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_check)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_complex_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return f"complex norm failed for input size {input_size}, p={p}, keepdim={keepdim}, dim={dim}"
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device) + 1j * torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, inf, -1, -2, -3, -inf]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device) + 1j * torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in ['nuc', 'fro']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, rtol=4e-6, atol=6e-4)
# Ensure torch.norm with p='fro' and p=2 give the same results for mutually supported input combinations
@dtypes(torch.float)
def test_norm_fro_2_equivalence_old(self, device, dtype):
input_sizes = [
(0,),
(10,),
(0, 0),
(4, 30),
(0, 45),
(100, 0),
(45, 10, 23),
(0, 23, 59),
(23, 0, 37),
(34, 58, 0),
(0, 0, 348),
(0, 3434, 0),
(0, 0, 0),
(5, 3, 8, 1, 3, 5)]
for input_size in input_sizes:
a = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
# Try full reduction
dim_settings = [None]
# Try all possible 1-D reductions
dim_settings += list(range(-a.dim(), a.dim()))
def wrap_dim(dim, ndims):
assert (dim < ndims) and (dim >= -ndims)
if dim >= 0:
return dim
else:
return dim + ndims
# Try all possible 2-D reductions
dim_settings += [
(d0, d1) for d0, d1 in itertools.combinations(range(-a.dim(), a.dim()), 2)
if wrap_dim(d0, a.dim()) != wrap_dim(d1, a.dim())]
for dim in dim_settings:
for keepdim in [True, False]:
a_norm_2 = torch.norm(a, p=2, dim=dim, keepdim=keepdim)
a_norm_fro = torch.norm(a, p='fro', dim=dim, keepdim=keepdim)
self.assertEqual(a_norm_fro, a_norm_2)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_nuclear_norm_axes_small_brute_force_old(self, device):
def check_single_nuclear_norm(x, axes):
if self.device_type != 'cpu' and randrange(100) < 95:
return # too many cpu <==> device copies
a = np.asarray(x.cpu())
expected = np.linalg.norm(a, "nuc", axis=axes)
ans = torch.norm(x, "nuc", dim=axes)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
out = torch.zeros(expected.shape, dtype=x.dtype, device=x.device)
ans = torch.norm(x, "nuc", dim=axes, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
for n in range(1, 3):
for m in range(1, 3):
for axes in itertools.permutations([0, 1], 2):
# 2d, inner dimensions C
x = torch.randn(n, m, device=device)
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions Fortran
x = torch.randn(m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions non-contiguous
x = torch.randn(n, 2 * m, device=device)[:, ::2]
check_single_nuclear_norm(x, axes)
# 2d, all dimensions non-contiguous
x = torch.randn(7 * n, 2 * m, device=device)[::7, ::2]
check_single_nuclear_norm(x, axes)
for o in range(1, 3):
for axes in itertools.permutations([0, 1, 2], 2):
# 3d, inner dimensions C
x = torch.randn(o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions Fortran
x = torch.randn(o, m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions non-contiguous
x = torch.randn(o, n, 2 * m, device=device)[:, :, ::2]
check_single_nuclear_norm(x, axes)
# 3d, all dimensions non-contiguous
x = torch.randn(7 * o, 5 * n, 2 * m, device=device)[::7, ::5, ::2]
check_single_nuclear_norm(x, axes)
for r in range(1, 3):
for axes in itertools.permutations([0, 1, 2, 3], 2):
# 4d, inner dimensions C
x = torch.randn(r, o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions Fortran
x = torch.randn(r, o, n, m, device=device).mT
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions non-contiguous
x = torch.randn(r, o, n, 2 * m, device=device)[:, :, :, ::2]
check_single_nuclear_norm(x, axes)
# 4d, all dimensions non-contiguous
x = torch.randn(7 * r, 5 * o, 11 * n, 2 * m, device=device)[::7, ::5, ::11, ::2]
check_single_nuclear_norm(x, axes)
@skipCUDAIfNoMagma
def test_nuclear_norm_exceptions_old(self, device):
for lst in [], [1], [1, 2]:
x = torch.tensor(lst, dtype=torch.double, device=device)
for axes in (), (0,):
self.assertRaises(RuntimeError, torch.norm, x, "nuc", axes)
self.assertRaises(RuntimeError, torch.norm, x, "nuc", (0, 1))
x = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, "must be different", torch.norm, x, "nuc", (0, 0))
self.assertRaisesRegex(IndexError, "Dimension out of range", torch.norm, x, "nuc", (0, 2))
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cdouble)
def test_svd_lowrank(self, device, dtype):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, niter=3, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = (u * s.unsqueeze(-2)).matmul(v.mH)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as linalg.svdvals
U, S, Vh = torch.linalg.svd(a, full_matrices=False)
V = Vh.mH
self.assertEqual(s, S, rtol=5e-7, atol=1e-7)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, v = u[..., :actual_rank], v[..., :actual_rank]
U, V = U[..., :actual_rank], V[..., :actual_rank]
expected_ones = u.mH.matmul(U).det().abs()
self.assertEqual(expected_ones, torch.ones_like(expected_ones))
self.assertEqual(v.mH.matmul(V).det().abs(), torch.ones_like(expected_ones))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [ # noqa: B020
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@precisionOverride({torch.float: 1e-4, torch.cfloat: 2e-4})
@setLinalgBackendsToDefaultFinally
@dtypes(*floating_and_complex_types())
@serialTest()
def test_svd(self, device, dtype):
# tests linalg.svd, svd, linalg.svdvals
make_arg = partial(make_tensor, dtype=dtype, device=device)
backends = ["default"]
if torch.device(device).type == 'cuda':
if torch.cuda.has_magma:
backends.append("magma")
if has_cusolver() or has_hipsolver():
backends.append("cusolver")
ns = (12, 4, 2, 0)
batches = ((), (0,), (1,), (2,), (2, 1), (0, 2))
drivers = (None, 'gesvd', 'gesvdj', 'gesvda')
for backend in backends:
torch.backends.cuda.preferred_linalg_library(backend)
for batch, m, n, driver in product(batches, ns, ns, drivers):
if not (backend == 'cusolver' or driver is None):
# only test cases below and skip otherwise:
# - backend == 'cusolver' (driver can be anything)
# - backend != 'cusolver' (driver should only be None)
continue
shape = batch + (m, n)
k = min(m, n)
A = make_arg(shape)
U, S, Vh = torch.linalg.svd(A, full_matrices=False, driver=driver)
self.assertEqual((U @ S.to(A.dtype).diag_embed()) @ Vh, A)
U_f, S_f, Vh_f = torch.linalg.svd(A, full_matrices=True, driver=driver)
self.assertEqual(S_f, S)
self.assertEqual((U_f[..., :k] @ S_f.to(A.dtype).diag_embed()) @ Vh_f[..., :k, :], A)
S_s = torch.linalg.svdvals(A, driver=driver)
self.assertEqual(S_s, S)
U, S, V = torch.svd(A, some=True)
self.assertEqual((U @ S.to(A.dtype).diag_embed()) @ V.mH, A)
U_f, S_f, V_f = torch.svd(A, some=False)
self.assertEqual(S_f, S)
self.assertEqual((U_f[..., :k] @ S_f.to(A.dtype).diag_embed()) @ V_f[..., :k].mH, A)
S_s = torch.svd(A, compute_uv=False).S
self.assertEqual(S_s, S)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.complex128)
def test_invariance_error_spectral_decompositions(self, device, dtype):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=True)
A = make_arg((3, 3))
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
U, _, Vh = torch.linalg.svd(A, full_matrices=False)
(U + Vh).sum().abs().backward()
A = make_arg((3, 3))
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
V = torch.linalg.eig(A).eigenvectors
V.sum().abs().backward()
A = make_arg((3, 3))
A = A + A.mH
with self.assertRaisesRegex(RuntimeError, "ill-defined"):
Q = torch.linalg.eigh(A).eigenvectors
Q.sum().abs().backward()
# I don't know how much memory this test uses but on complex64 it needs at least 4GB
@largeTensorTest("4GB", device="cuda")
@serialTest(TEST_CUDA)
@skipCUDAIfNoCusolver # MAGMA backend doesn't work in this case
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_memory_allocation(self, device, dtype):
# test for https://github.com/pytorch/pytorch/issues/61949
# the problem was that tensors of incorrect size were allocated and then narrowed
m = 3
n = 2**20
a = make_tensor((m, n), dtype=dtype, device=device)
# the following should run without errors
S = torch.linalg.svdvals(a)
result = torch.linalg.svd(a, full_matrices=False)
self.assertEqual(result.S, S)
def cholesky_solve_test_helper(self, A_dims, b_dims, upper, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_hermitian_pd_matrix(*A_dims, dtype=dtype, device=device)
L = torch.cholesky(A, upper=upper)
return b, A, L
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve(self, device, dtype):
for (k, n), upper in itertools.product(zip([2, 3, 5], [3, 5, 7]), [True, False]):
b, A, L = self.cholesky_solve_test_helper((n,), (n, k), upper, device, dtype)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched(self, device, dtype):
def cholesky_solve_batch_helper(A_dims, b_dims, upper):
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.cholesky_solve(b[i], L[i], upper=upper))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.cholesky_solve(b, L, upper=upper) # Actual output
self.assertEqual(x_act, x_exp) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax) # Correctness check
for upper, batchsize in itertools.product([True, False], [1, 3, 4]):
cholesky_solve_batch_helper((5, batchsize), (batchsize, 5, 10), upper)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(5, 256, 256), (5,)], [(5, 10), (512, 512, 5, 10)]):
for upper in [True, False]:
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x = torch.cholesky_solve(b, L, upper)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(A_dims, b_dims, upper):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = random_hermitian_pd_matrix(A_matrix_size, *A_batch_dims,
dtype=dtype, device='cpu')
b = torch.randn(*b_dims, dtype=dtype, device='cpu')
x_exp = torch.tensor(solve(A.numpy(), b.numpy()), dtype=dtype, device=device)
A, b = A.to(dtype=dtype, device=device), b.to(dtype=dtype, device=device)
L = torch.linalg.cholesky(A, upper=upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
# https://github.com/pytorch/pytorch/issues/42695
x = torch.cholesky_solve(b, L, upper=upper, out=x)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
for upper in [True, False]:
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), upper) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), upper) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), upper) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), upper) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_solve(b, a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.cholesky_solve(b, a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.cholesky_solve(b, a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_cholesky_solve_backward(self, device, dtype):
b_dims = (5, 2)
L_dims = (5, 5)
for test_L_grad in (False, True):
b = torch.randn(*b_dims, dtype=dtype, device=device, requires_grad=True)
L = torch.randn(*L_dims, dtype=dtype, device=device, requires_grad=test_L_grad)
if test_L_grad:
torch.autograd.gradcheck(lambda b, L: torch.cholesky_solve(b, torch.tril(L), upper=False), (b, L))
else:
torch.autograd.gradcheck(lambda b: torch.cholesky_solve(b, L, upper=False), (b,))
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_inverse(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def run_test(torch_inverse, matrix, batches, n):
matrix_inverse = torch_inverse(matrix)
# Compare against NumPy output
# NumPy uses 'gesv' LAPACK routine solving the equation A A_inv = I
# But in PyTorch 'gertf' + 'getrs' is used. As such, there may be some element-wise differences
expected = np.linalg.inv(matrix.cpu().numpy())
self.assertEqual(matrix_inverse, expected, atol=self.precision, rtol=self.precision)
# Additional correctness tests, check matrix*matrix_inverse == identity
identity = torch.eye(n, dtype=dtype, device=device)
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix.cpu(), matrix_inverse.cpu()))
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix_inverse.cpu(), matrix.cpu()))
# check the out= variant
# prepare the expected out tensor
matrix_inverse_out = torch.empty(*batches, n, n, dtype=dtype, device=device)
matrix_inverse_out_t = matrix_inverse_out.mT.clone(memory_format=torch.contiguous_format)
matrix_inverse_out = matrix_inverse_out_t.mT
ans = torch_inverse(matrix, out=matrix_inverse_out)
self.assertEqual(matrix_inverse_out, ans, atol=0, rtol=0)
self.assertEqual(matrix_inverse_out, matrix_inverse, atol=0, rtol=0)
# batched matrices: 3+ dimensional tensors, check matrix_inverse same as single-inverse for each matrix
if matrix.ndim > 2 and batches[0] != 0:
expected_inv_list = []
p = int(np.prod(batches)) # use `p` instead of -1, so that the test works for empty input as well
for mat in matrix.contiguous().view(p, n, n):
expected_inv_list.append(torch_inverse(mat))
expected_inv = torch.stack(expected_inv_list).view(*batches, n, n)
if self.device_type == 'cuda' and dtype in [torch.float32, torch.complex64]:
# single-inverse is done using cuSOLVER, while batched inverse is done using MAGMA
# individual values can be significantly different for fp32, hence rather high rtol is used
# the important thing is that torch_inverse passes above checks with identity
self.assertEqual(matrix_inverse, expected_inv, atol=1e-1, rtol=1e-2)
else:
self.assertEqual(matrix_inverse, expected_inv)
# helper function for testing torch.linalg.inv_ex
def test_inv_ex(input, out=None):
if out is not None:
info = torch.empty(0, dtype=torch.int32, device=device)
return torch.linalg.inv_ex(input, out=(out, info)).inverse
return torch.linalg.inv_ex(input).inverse
for torch_inverse in [torch.inverse, torch.linalg.inv, test_inv_ex]:
for batches, n in itertools.product(
[[], [0], [2], [2, 1]],
[0, 5]
):
matrices = make_arg(*batches, n, n)
run_test(torch_inverse, matrices, batches, n)
# test non-contiguous input
run_test(torch_inverse, matrices.mT, batches, n)
if n > 0:
run_test(
torch_inverse,
make_arg(*batches, 2 * n, 2 * n)
.view(-1, n * 2, n * 2)[:, ::2, ::2].view(*batches, n, n),
batches, n
)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_ex_info_device(self, device, dtype):
A = torch.eye(3, 3, dtype=dtype, device=device)
info = torch.linalg.inv_ex(A).info
self.assertTrue(info.device == A.device)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_ex_singular(self, device, dtype):
# if the input matrix is not invertible, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
info = torch.linalg.inv_ex(A).info
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError,
r'diagonal element 3 is zero, the inversion could not be completed'):
torch.linalg.inv_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
info = torch.linalg.inv_ex(A).info
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The diagonal element 2 is zero'):
torch.linalg.inv_ex(A, check_errors=True)
@slowTest
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-5, torch.complex128: 1e-5})
def test_inverse_many_batches(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def test_inverse_many_batches_helper(torch_inverse, b, n):
matrices = make_arg(b, n, n)
matrices_inverse = torch_inverse(matrices)
# Compare against NumPy output
expected = np.linalg.inv(matrices.cpu().numpy())
self.assertEqual(matrices_inverse, expected, atol=self.precision, rtol=1e-3)
for torch_inverse in [torch.inverse, torch.linalg.inv]:
test_inverse_many_batches_helper(torch_inverse, 5, 256)
test_inverse_many_batches_helper(torch_inverse, 3, 512)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@dtypes(*floating_and_complex_types())
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/129882")
def test_inverse_errors(self, device, dtype):
# inverse expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.inverse(torch.randn(2, 3, 4, 3))
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
x = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
x[n, -1, -1] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.inverse(x)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Test fails for float64 on GPU (P100, V100) on Meta infra")
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@dtypes(*floating_and_complex_types())
def test_inverse_errors_large(self, device, dtype):
# Test batched inverse of singular matrices reports errors without crashing (gh-51930)
x = torch.empty((8, 10, 616, 616), dtype=dtype, device=device)
x[:] = torch.eye(616, dtype=dtype, device=device)
x[..., 10, 10] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 0\): The diagonal element 11 is zero'):
torch.inverse(x)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3, torch.float64: 1e-7, torch.complex128: 1e-7})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinv(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test_main(A, hermitian):
# Testing against definition for pseudo-inverses
A_pinv = torch.linalg.pinv(A, hermitian=hermitian)
np_A = A.cpu().numpy()
np_A_pinv = A_pinv.cpu().numpy()
if A.numel() > 0:
self.assertEqual(A, np_A @ np_A_pinv @ np_A, atol=self.precision, rtol=self.precision)
self.assertEqual(A_pinv, np_A_pinv @ np_A @ np_A_pinv, atol=self.precision, rtol=self.precision)
self.assertEqual(np_A @ np_A_pinv, (np_A @ np_A_pinv).conj().swapaxes(-2, -1))
self.assertEqual(np_A_pinv @ np_A, (np_A_pinv @ np_A).conj().swapaxes(-2, -1))
else:
self.assertEqual(A.shape, A_pinv.shape[:-2] + (A_pinv.shape[-1], A_pinv.shape[-2]))
# Check out= variant
out = torch.empty_like(A_pinv)
ans = torch.linalg.pinv(A, hermitian=hermitian, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, A_pinv)
def run_test_numpy(A, hermitian):
# Check against NumPy output
# Test float rcond, and specific value for each matrix
rconds = [float(torch.rand(1)), ]
# Test different types of rcond tensor
for rcond_type in all_types():
rconds.append(torch.rand(A.shape[:-2], dtype=torch.double, device=device).to(rcond_type))
# Test broadcasting of rcond
if A.ndim > 2:
rconds.append(torch.rand(A.shape[-3], device=device))
for rcond in rconds:
actual = torch.linalg.pinv(A, rcond=rcond, hermitian=hermitian)
torch_rtol = torch.linalg.pinv(A, rtol=rcond, hermitian=hermitian)
self.assertEqual(actual, torch_rtol)
numpy_rcond = rcond if isinstance(rcond, float) else rcond.cpu().numpy()
expected = np.linalg.pinv(A.cpu().numpy(), rcond=numpy_rcond, hermitian=hermitian)
self.assertEqual(actual, expected, atol=self.precision, rtol=1e-5)
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(3, 2), (5, 3, 2), (2, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (2, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
A = torch.randn(*sizes, dtype=dtype, device=device)
hermitian = False
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
# Check hermitian = True
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(0, 0), (3, 0, 0), ]: # zero numel square matrices
A = random_hermitian_pd_matrix(sizes[-1], *sizes[:-2], dtype=dtype, device=device)
hermitian = True
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinv_errors_and_warnings(self, device, dtype):
# pinv requires at least 2D tensor
a = torch.randn(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "expected a tensor with 2 or more dimensions"):
torch.linalg.pinv(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, dtype=dtype, device=device)
out = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.pinv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes of out and input should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.pinv(a, out=out)
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.pinv(a, out=out)
# device of rcond and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
rcond = torch.full((), 1e-2, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.pinv(a, rcond=rcond)
# rcond can't be complex
rcond = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rcond tensor of complex type is not supported"):
torch.linalg.pinv(a, rcond=rcond)
# atol can't be complex
atol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "atol tensor of complex type is not supported"):
torch.linalg.pinv(a, atol=atol)
# rtol can't be complex
rtol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rtol tensor of complex type is not supported"):
torch.linalg.pinv(a, rtol=rtol)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/129882")
def test_inv_errors_and_warnings(self, device, dtype):
# inv expects batches of square matrices as input
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.inv(a)
# inv requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.inv(a)
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf"\(Batch element {n}\): The diagonal element 3 is zero"):
torch.linalg.inv(a)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# dtypes should match
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got int instead"):
torch.linalg.inv(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.inv(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor in batched column major format but with wrong a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(3, 3, dtype=dtype, device=device)
out = out.mT.clone(memory_format=torch.contiguous_format)
out = out.mT
self.assertTrue(out.mT.is_contiguous())
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
def solve_test_helper(self, A_dims, b_dims, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
return b, A
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve(self, device, dtype):
def run_test(n, batch, rhs):
A_dims = (*batch, n, n)
b_dims = (*batch, n, *rhs)
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
# Correctness test
x = torch.linalg.solve(A, b)
if rhs == ():
Ax = np.matmul(A.cpu(), x.unsqueeze(-1).cpu())
Ax.squeeze_(-1)
else:
Ax = np.matmul(A.cpu(), x.cpu())
self.assertEqual(b.expand_as(Ax), Ax)
# Check against NumPy
if rhs == ():
# In NumPy 2, "b" can no longer be a vector (i.e. rhs == ()) if has batch dimensions.
# So, reshape it to a matrix and back. Related documentation:
# https://numpy.org/doc/1.26/reference/generated/numpy.linalg.solve.html
# https://numpy.org/doc/2.0/reference/generated/numpy.linalg.solve.html
expected = np.linalg.solve(A.cpu().numpy(), b.cpu().numpy().reshape(*b.shape, 1)).reshape(b.shape)
else:
expected = np.linalg.solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(x, expected)
batches = [(), (0, ), (3, ), (2, 3)]
ns = [0, 5, 32]
nrhs = [(), (1, ), (5, )]
for n, batch, rhs in itertools.product(ns, batches, nrhs):
run_test(n, batch, rhs)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
def run_test(A_dims, B_dims):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
B, A = self.solve_test_helper(A_batch_dims + (A_matrix_size, A_matrix_size), B_dims, device, dtype)
actual = torch.linalg.solve(A, B)
expected = solve(A.cpu().numpy(), B.cpu().numpy())
self.assertEqual(actual, expected)
# test against numpy.linalg.solve
run_test((5, 5), (2, 0, 5, 3)) # broadcasting with 0 batch dim
run_test((2, 0, 5, 5), (5, 3)) # broadcasting with 0 batch dim
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting B
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & B
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve(self, device, dtype):
def run_test(a_shape, dims):
a = torch.randn(a_shape, dtype=dtype, device=device)
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_tensorsolve_empty(self, device, dtype):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
b = torch.empty(a.shape[:2], dtype=dtype, device=device)
x = torch.linalg.tensorsolve(a, b)
self.assertEqual(torch.tensordot(a, x, dims=len(x.shape)), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32)
def test_tensorsolve_errors_and_warnings(self, device, dtype):
# tensorsolve expects the input that can be reshaped to a square matrix
a = torch.eye(2 * 3 * 4, dtype=dtype, device=device).reshape((2 * 3, 4, 2, 3, 4))
b = torch.randn(8, 4, dtype=dtype, device=device)
self.assertTrue(np.prod(a.shape[2:]) != np.prod(b.shape))
with self.assertRaisesRegex(RuntimeError, r'Expected self to satisfy the requirement'):
torch.linalg.tensorsolve(a, b)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty_like(a)
b = torch.randn(6, 4, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorsolve(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorsolve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorsolve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv(self, device, dtype):
def run_test(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
# compare to NumPy output
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_empty(self, device, dtype):
for ind in range(1, 4):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
a_inv = torch.linalg.tensorinv(a, ind=ind)
self.assertEqual(a_inv.shape, a.shape[ind:] + a.shape[:ind])
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_errors_and_warnings(self, device, dtype):
def check_shape(a_shape, ind):
# tensorinv requires the input to satisfy
# prod(a.shape[ind:]) == prod(a.shape[:ind])
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected self to satisfy the requirement"):
torch.linalg.tensorinv(a, ind=ind)
def check_ind(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected a strictly positive integer"):
torch.linalg.tensorinv(a, ind=ind)
def check_out(a_shape, ind):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(a_shape, dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorinv(a, ind=ind, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# test for invalid shape
check_shape((2, 3, 4), ind=1)
check_shape((1, 2, 3, 4), ind=3)
# test for invalid ind
check_ind((12, 3, 4), ind=-1)
check_ind((18, 3, 3, 2), ind=0)
# test for invalid out tensor
check_out((12, 3, 4), ind=1)
check_out((3, 8, 24), ind=2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_singular_input(self, device, dtype):
def check_singular_input(a_shape, ind):
prod_ind_end = np.prod(a_shape[ind:])
a = torch.eye(prod_ind_end, dtype=dtype, device=device)
a[-1, -1] = 0 # Now `a` is singular
a = a.reshape(a_shape)
with self.assertRaisesRegex(torch.linalg.LinAlgError, "The diagonal element"):
torch.linalg.tensorinv(a, ind=ind)
# test for non-invertible input
check_singular_input((12, 3, 4), ind=1)
check_singular_input((3, 6, 18), ind=2)
def _test_dot_vdot_vs_numpy(self, device, dtype, torch_fn, np_fn):
def check(x, y):
# Compare with numpy
res = torch_fn(x, y)
if x.dtype == torch.bfloat16:
ref = torch.from_numpy(np.array(np_fn(x.cpu().float().numpy(), y.cpu().float().numpy())))
else:
ref = torch.from_numpy(np.array(np_fn(x.cpu().numpy(), y.cpu().numpy())))
if res.dtype == torch.bfloat16:
self.assertEqual(res.cpu(), ref.bfloat16())
else:
self.assertEqual(res.cpu(), ref)
# Test out variant
out = torch.empty_like(res)
torch_fn(x, y, out=out)
self.assertEqual(out, res)
# Empty
x = torch.tensor([], dtype=dtype, device=device)
y = torch.tensor([], dtype=dtype, device=device)
check(x, y)
# Contiguous
x = 0.1 * torch.randn(5000, dtype=dtype, device=device)
y = 0.1 * torch.randn(5000, dtype=dtype, device=device)
check(x, y)
# 0 strided
y = 0.1 * torch.randn(1, dtype=dtype, device=device).expand(5000)
check(x, y)
# 2 strided
check(x[::2], y[::2])
@dtypes(torch.float, torch.cfloat, torch.bfloat16, torch.float16)
@dtypesIfCUDA(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5, torch.bfloat16: 1e-0})
def test_dot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.dot, np.dot)
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5})
def test_vdot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.vdot, np.vdot)
def _test_dot_vdot_invalid_args(self, device, torch_fn, complex_dtypes=False):
def check(x, y, regex):
with self.assertRaisesRegex(RuntimeError, regex):
torch_fn(x, y)
if complex_dtypes:
x = torch.randn(1, dtype=torch.cfloat, device=device)
y = torch.randn(3, dtype=torch.cdouble, device=device)
else:
x = torch.randn(1, dtype=torch.float, device=device)
y = torch.randn(3, dtype=torch.double, device=device)
check(x, y, 'dot : expected both vectors to have same dtype')
check(x.reshape(1, 1), y, '1D tensors expected')
check(x.expand(9), y.to(x.dtype), 'inconsistent tensor size')
if self.device_type != 'cpu':
x_cpu = x.expand(3).cpu()
check(x_cpu, y.to(x.dtype), 'Expected all tensors to be on the same device')
@onlyNativeDeviceTypes
def test_vdot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.vdot)
self._test_dot_vdot_invalid_args(device, torch.vdot, complex_dtypes=True)
@onlyNativeDeviceTypes
def test_dot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.dot)
self._test_dot_vdot_invalid_args(device, torch.dot, complex_dtypes=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
# check against NumPy
self.assertEqual(rank_a, np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
self.assertEqual(rank_aaH, np.linalg.matrix_rank(aaH.cpu().numpy()))
self.assertEqual(matrix_rank(aaH, 0.01), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01))
# hermitian flag for NumPy was added in 1.14.0
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(rank_aaH_hermitian,
np.linalg.matrix_rank(aaH.cpu().numpy(), hermitian=True))
self.assertEqual(matrix_rank(aaH, 0.01, True),
np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01, True))
# check out= variant
out = torch.empty(a.shape[:-2], dtype=torch.int64, device=device)
ans = matrix_rank(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, rank_a)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_atol(self, device, dtype):
def run_test_atol(shape0, shape1, batch):
a = make_tensor((*batch, shape0, shape1), dtype=dtype, device=device)
# Check against NumPy output
# Test float tol, and specific value for each matrix
tolerances = [float(torch.rand(1)), ]
# Test different types of tol tensor
for tol_type in all_types():
tolerances.append(make_tensor(a.shape[:-2], dtype=tol_type, device=device, low=0))
# Test broadcasting of tol
if a.ndim > 2:
tolerances.append(make_tensor(a.shape[-3], dtype=torch.float32, device=device, low=0))
for tol in tolerances:
actual = torch.linalg.matrix_rank(a, atol=tol)
actual_tol = torch.linalg.matrix_rank(a, tol=tol)
self.assertEqual(actual, actual_tol)
numpy_tol = tol if isinstance(tol, float) else tol.cpu().numpy()
expected = np.linalg.matrix_rank(a.cpu().numpy(), tol=numpy_tol)
self.assertEqual(actual, expected)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test_atol(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64)
def test_matrix_rank_atol_rtol(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
# creates a matrix with singular values rank=n and singular values in range [2/3, 3/2]
# the singular values are 1 + 1/2, 1 - 1/3, 1 + 1/4, 1 - 1/5, ...
n = 9
a = make_arg(n, n)
# test float and tensor variants
for tol_value in [0.81, torch.tensor(0.81, device=device)]:
# using rtol (relative tolerance) takes into account the largest singular value (1.5 in this case)
result = torch.linalg.matrix_rank(a, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above 1.5*0.81 = 1.215
# atol is used directly to compare with singular values
result = torch.linalg.matrix_rank(a, atol=tol_value)
self.assertEqual(result, 7) # there are 7 singular values above 0.81
# when both are specified the maximum tolerance is used
result = torch.linalg.matrix_rank(a, atol=tol_value, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above max(0.81, 1.5*0.81)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_empty(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
# NumPy doesn't work for input with no elements
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
expected = torch.zeros(batch, dtype=torch.int64, device=device)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
self.assertEqual(rank_a, expected)
self.assertEqual(matrix_rank(a, 0.01), expected)
self.assertEqual(rank_aaH, expected)
self.assertEqual(matrix_rank(aaH, 0.01), expected)
self.assertEqual(rank_aaH_hermitian, expected)
self.assertEqual(matrix_rank(aaH, 0.01, True), expected)
batches = ((), (4, ), (3, 5, ))
for batch in batches:
run_test(0, 0, batch)
run_test(0, 3, batch)
run_test(3, 0, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.bool, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Bool"):
torch.linalg.matrix_rank(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.matrix_rank(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(3, dtype=dtype, device=device)
# Trigger warning
torch.linalg.matrix_rank(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_basic(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(matrix_rank(a).item(), 10)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 10)
a[5, 5] = 0
self.assertEqual(matrix_rank(a).item(), 9)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)
@onlyNativeDeviceTypes
@dtypes(torch.double)
# This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an "alias" for.
def test_chain_matmul(self, device, dtype):
# chain_matmul accepts a single input tensor while multi_dot does not
t = make_tensor((2, 2), dtype=dtype, device=device)
self.assertEqual(t, torch.chain_matmul(t))
with self.assertRaisesRegex(RuntimeError, r"chain_matmul\(\): Expected one or more matrices"):
torch.chain_matmul()
# chain_matmul expects all tensors to be 2D whereas multi_dot allows the first and last tensors to
# be either 1D or 2D
with self.assertRaisesRegex(RuntimeError, r"Tensor dimension is 1, expected 2 instead"):
torch.chain_matmul(make_tensor(1, dtype=dtype, device=device), make_tensor(1, dtype=dtype, device=device))
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.cdouble)
def test_multi_dot(self, device, dtype):
def check(*shapes):
tensors = [make_tensor(shape, dtype=dtype, device=device) for shape in shapes]
np_arrays = [tensor.cpu().numpy() for tensor in tensors]
res = torch.linalg.multi_dot(tensors).cpu()
ref = torch.from_numpy(np.array(np.linalg.multi_dot(np_arrays)))
self.assertEqual(res, ref)
# test for inputs with empty dimensions
check([0], [0])
check([2], [2, 0])
check([1, 0], [0])
check([0, 2], [2, 1])
check([2, 2], [2, 0])
check([2, 0], [0, 3])
check([0, 0], [0, 1])
check([4, 2], [2, 0], [0, 3], [3, 2])
# test variable output shapes
check([2], [2])
check([1, 2], [2])
check([2], [2, 1])
check([1, 2], [2, 1])
check([3, 2], [2, 4])
# test multiple input tensors
check([3], [3, 4], [4, 2], [2, 5], [5])
check([1, 2], [2, 2], [2, 3], [3, 1])
# test large tensors
check([10, 100], [100, 5], [5, 50])
check([10, 20], [20, 30], [30, 5])
@onlyNativeDeviceTypes
@dtypes(torch.float)
def test_multi_dot_errors(self, device, dtype):
def check(tensors, out, msg):
with self.assertRaisesRegex(RuntimeError, msg):
torch.linalg.multi_dot(tensors, out=out)
a = make_tensor(2, dtype=dtype, device=device)
check([], None, "expected at least 2 tensors")
check([a], None, "expected at least 2 tensors")
check([torch.tensor(1, device=device, dtype=dtype), a], None, "the first tensor must be 1D or 2D")
check([a, torch.tensor(1, device=device, dtype=dtype)], None, "the last tensor must be 1D or 2D")
check([a, a, a], None, "tensor 1 must be 2D")
check([a, make_tensor((2, 2, 2), dtype=dtype, device=device), a], None, "tensor 1 must be 2D")
check([a, make_tensor(2, dtype=torch.double, device=device)], None, "all tensors must have be the same dtype")
check([a, a], torch.empty(0, device=device, dtype=torch.double), "expected out tensor to have dtype")
if self.device_type == 'cuda':
check([a, make_tensor(2, dtype=dtype, device="cpu")], None, "all tensors must be on the same device")
check([a, a], torch.empty(0, dtype=dtype), "expected out tensor to be on device")
check([a, make_tensor(3, dtype=dtype, device=device)], None, "cannot be multiplied")
check([a, make_tensor((3, 2), dtype=dtype, device=device), a], None, "cannot be multiplied")
@precisionOverride({torch.float32: 5e-6, torch.complex64: 5e-6})
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_qr(self, device, dtype):
def run_test(tensor_dims, some):
A = torch.randn(*tensor_dims, dtype=dtype, device=device)
Q, R = torch.qr(A, some=some)
# Check0: Q[-2:] = (m, n_columns), R[-2:] = (n_columns, n)
m, n = tensor_dims[-2:]
n_columns = m if (not some) and m > n else min(m, n)
self.assertEqual(Q.size(-2), m)
self.assertEqual(R.size(-1), n)
self.assertEqual(Q.size(-1), n_columns)
A_ = A.cpu().numpy()
Q_ = Q.cpu().numpy()
R_ = R.cpu().numpy()
# Check1: A = QR
self.assertEqual(A_, np.matmul(Q_, R_))
# Check2: A = QR (with out)
Q_out, R_out = torch.full_like(Q, math.nan), torch.full_like(R, math.nan)
torch.qr(A, some=some, out=(Q_out, R_out))
Q_out_ = Q_out.cpu().numpy()
R_out_ = R_out.cpu().numpy()
self.assertEqual(A_, np.matmul(Q_out_, R_out_))
# Check3: Q == Q_out, R == R_out
self.assertEqual(Q_, Q_out_)
self.assertEqual(R_, R_out_)
# Check4: Q^{T}Q = I, triu(R) = R
eye = torch.eye(n_columns, device=device, dtype=dtype).expand(Q.shape[:-2] + (n_columns, n_columns)).cpu().numpy()
self.assertEqual(np.matmul(Q_.swapaxes(-1, -2).conj(), Q_), eye)
self.assertEqual(R.triu(), R)
tensor_dims_list = [(0, 5), (0, 0), (5, 0), # Empty Tensors
(2, 1, 0, 5), (2, 1, 0, 0), (2, 1, 5, 0), (2, 0, 5, 5), # Batched empty Tensors
(3, 5), (5, 5), (5, 3), # Single matrix
(7, 3, 5), (7, 5, 5), (7, 5, 3), # 3-dim Tensors
(7, 5, 3, 5), (7, 5, 5, 5), (7, 5, 5, 3)] # 4-dim Tensors
for tensor_dims, some in itertools.product(tensor_dims_list, [True, False]):
run_test(tensor_dims, some)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_vs_numpy(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr
"""
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0), # empty
(0, 5), # empty
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np.linalg.qr(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
#
# for mode='r' we need a special logic because numpy returns only r
exp_r = np.linalg.qr(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_linalg_qr_autograd(self, device, dtype):
# Check differentiability for modes as specified in the docs.
# Differentiability in all cases is only guaranteed if first k = min(m, n) columns are linearly independent.
# Mode 'reduced' is always differentiable.
# Mode 'r' is never differentiable.
# Mode 'complete' is differentiable for m <= n.
for mode in 'complete', 'reduced', 'r':
for m, n in [(5, 7), (7, 5)]:
# Random matrix inputs will effectively satisfy rank requirement of k = min(m, n) columns linearly
# independent.
inp = torch.randn((m, n), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode=mode)
b = torch.sum(r)
if mode == 'complete' and m > n:
with self.assertRaisesRegex(RuntimeError,
"The QR decomposition is not differentiable when mode='complete' and "
"nrows > ncols"):
b.backward()
elif mode == 'r':
# torch.linalg.qr(mode='r') returns only 'r' and discards 'q', but
# without 'q' you cannot compute the backward pass. Check that
# linalg_qr_backward complains cleanly in that case.
self.assertEqual(q.shape, (0,)) # empty tensor
with self.assertRaisesRegex(RuntimeError,
"The derivative of linalg.qr depends on Q"):
b.backward()
else:
b.backward()
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_batched(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr. We need some special logic
because numpy does not support batched qr
"""
def np_qr_batched(a, mode):
"""poor's man batched version of np.linalg.qr"""
all_q = []
all_r = []
for matrix in a:
result = np.linalg.qr(matrix, mode=mode)
if mode == 'r':
all_r.append(result)
else:
q, r = result
all_q.append(q)
all_r.append(r)
if mode == 'r':
return np.array(all_r)
else:
return np.array(all_q), np.array(all_r)
t = torch.randn((3, 7, 5), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np_qr_batched(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
# for mode='r' we need a special logic because numpy returns only r
exp_r = np_qr_batched(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_qr_error_cases(self, device, dtype):
t1 = torch.randn(5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, 'linalg.qr: The input tensor A must have at least 2 dimensions.'):
torch.linalg.qr(t1)
t2 = torch.randn((5, 7), device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "qr received unrecognized mode 'hello'"):
torch.linalg.qr(t2, mode='hello')
def _check_einsum(self, *args, np_args=None):
if np_args is None:
np_args = [arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg for arg in args]
ref = np.einsum(*np_args)
res = torch.einsum(*args)
self.assertEqual(ref, res)
# Check that the other variations for opt_einsum work too
if TEST_OPT_EINSUM:
with opt_einsum.flags(enabled=False):
res = torch.einsum(*args)
self.assertEqual(ref, res)
with opt_einsum.flags(enabled=True, strategy='greedy'):
res = torch.einsum(*args)
self.assertEqual(ref, res)
with opt_einsum.flags(enabled=True, strategy='optimal'):
res = torch.einsum(*args)
self.assertEqual(ref, res)
@dtypes(torch.double, torch.cdouble)
def test_einsum(self, device, dtype):
# Test cases from https://gist.github.com/rockt/15ee013889d65342088e9260a377dc8f
x = make_tensor((5,), dtype=dtype, device=device)
y = make_tensor((7,), dtype=dtype, device=device)
A = make_tensor((3, 5), dtype=dtype, device=device)
B = make_tensor((2, 5), dtype=dtype, device=device)
C = make_tensor((2, 3, 5), dtype=dtype, device=device)
D = make_tensor((2, 5, 7), dtype=dtype, device=device)
E = make_tensor((7, 9), dtype=dtype, device=device)
F = make_tensor((2, 3, 3, 5), dtype=dtype, device=device)
G = make_tensor((5, 4, 6), dtype=dtype, device=device)
H = make_tensor((4, 4), dtype=dtype, device=device)
I = make_tensor((2, 3, 2), dtype=dtype, device=device)
# Vector operations
self._check_einsum('i->', x) # sum
self._check_einsum('i,i->', x, x) # dot
self._check_einsum('i,i->i', x, x) # vector element-wisem mul
self._check_einsum('i,j->ij', x, y) # outer
# Matrix operations
self._check_einsum("ij->ji", A) # transpose
self._check_einsum("ij->j", A) # row sum
self._check_einsum("ij->i", A) # col sum
self._check_einsum("ij,ij->ij", A, A) # matrix element-wise mul
self._check_einsum("ij,j->i", A, x) # matrix vector multiplication
self._check_einsum("ij,kj->ik", A, B) # matmul
self._check_einsum("ij,ab->ijab", A, E) # matrix outer product
# Tensor operations
self._check_einsum("Aij,Ajk->Aik", C, D) # batch matmul
self._check_einsum("ijk,jk->i", C, A) # tensor matrix contraction
self._check_einsum("aij,jk->aik", D, E) # tensor matrix contraction
self._check_einsum("abCd,dFg->abCFg", F, G) # tensor tensor contraction
self._check_einsum("ijk,jk->ik", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,jk->ij", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,ik->j", C, B) # non contiguous
self._check_einsum("ijk,ik->jk", C, B) # non contiguous with double indices
# Test diagonals
self._check_einsum("ii", H) # trace
self._check_einsum("ii->i", H) # diagonal
self._check_einsum('iji->j', I) # non-contiguous trace
self._check_einsum('ngrg...->nrg...', make_tensor((2, 1, 3, 1, 4), dtype=dtype, device=device))
# Test ellipsis
self._check_einsum("i...->...", H)
self._check_einsum("ki,...k->i...", A.t(), B)
self._check_einsum("k...,jk->...", A.t(), B)
self._check_einsum('...ik, ...j -> ...ij', C, x)
self._check_einsum('Bik,k...j->i...j', C, make_tensor((5, 3), dtype=dtype, device=device))
self._check_einsum('i...j, ij... -> ...ij', C, make_tensor((2, 5, 2, 3), dtype=dtype, device=device))
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), dtype=dtype, device=device, noncontiguous=True)
r = make_tensor((5, 20), dtype=dtype, device=device, noncontiguous=True)
w = make_tensor((15, 10, 20), dtype=dtype, device=device)
self._check_einsum("bn,anm,bm->ba", l, w, r)
# with strided tensors
self._check_einsum("bn,Anm,bm->bA", l[:, ::2], w[:, ::2, ::2], r[:, ::2])
# test multiple inputs
self._check_einsum("...,be,b...,beg,gi,bc...->bi...", A, B, C, D, E, F)
@dtypes(torch.double, torch.cdouble)
def test_einsum_sublist_format(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = make_tensor((7,), dtype=dtype, device=device)
A = make_tensor((3, 5), dtype=dtype, device=device)
B = make_tensor((2, 5), dtype=dtype, device=device)
C = make_tensor((2, 1, 3, 1, 4), dtype=dtype, device=device)
self._check_einsum(x, [0])
self._check_einsum(x, [0], [])
self._check_einsum(x, [0], y, [1], [0, 1])
self._check_einsum(A, [0, 1], [1, 0])
self._check_einsum(A, [0, 1], x, [1], [0])
self._check_einsum(A, [0, 1], B, [2, 1])
self._check_einsum(A, [0, 1], B, [2, 1], [0, 2])
self._check_einsum(C, [0, 1, 2, 1, Ellipsis], [0, 2, 1, Ellipsis])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0], [1, Ellipsis])
self._check_einsum(A.t(), [0, Ellipsis], B, [1, 0], [Ellipsis])
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), dtype=dtype, device=device, noncontiguous=True)
r = make_tensor((5, 20), dtype=dtype, device=device, noncontiguous=True)
w = make_tensor((15, 10, 20), dtype=dtype, device=device)
self._check_einsum(l, [40, 41], w, [2, 41, 50], r, [40, 50], [40, 2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_random(self, device, dtype):
def convert_label(label):
if label == ...:
return '...'
elif label < 26:
return chr(ord('A') + label)
else:
return chr(ord('a') + label - 26)
def convert_sublist(sublist):
return ''.join(convert_label(label) for label in sublist)
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=4, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, dtype=dtype, device=device))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = list(itertools.chain.from_iterable(zip(operands, sublists)))
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(500)
@dtypes(torch.float)
def test_einsum_output_layout(self, device, dtype):
batch, in_dim, out_dim = 2, 3, 5
x = make_tensor((batch, in_dim), dtype=dtype, device=device)
w = make_tensor((out_dim, in_dim), dtype=dtype, device=device)
result = torch.einsum("fd,bd->bf", w, x)
expected = x.matmul(w.t())
self.assertEqual(result, expected)
self.assertTrue(result.is_contiguous())
self.assertEqual(result.stride(), expected.stride())
def test_einsum_corner_cases(self, device):
def check(equation, *operands, expected_output):
tensors = [torch.tensor(operand, device=device, dtype=torch.float32) if not isinstance(operand, tuple)
else make_tensor(operand, dtype=torch.float32, device=device) for operand in operands]
output = torch.einsum(equation, tensors)
self.assertEqual(output, torch.tensor(expected_output, dtype=torch.float32, device=device))
# Test equation variations
check(' ', 1, expected_output=1)
check(' -> ', 1, expected_output=1)
check(' , ', 2, 2, expected_output=4)
check(' , , ', 2, 2, 2, expected_output=8)
check(' , -> ', 2, 2, expected_output=4)
check(' i ', [1], expected_output=[1])
check(' i -> ', [1], expected_output=1)
check(' i -> i ', [1], expected_output=[1])
check(' i , i ', [2], [2], expected_output=4)
check(' i , i -> i ', [2], [2], expected_output=[4])
# Test tensors with 0 size dimensions
check('i', [], expected_output=[])
check(' i j -> j', [[], []], expected_output=[])
check('ij->i', [[], []], expected_output=[0., 0.])
check(' i j k , k -> i j ', (3, 0, 6), (6,), expected_output=[[], [], []])
# Test broadcasting
check('i,j', [2], [1, 2], expected_output=[[2, 4]])
check('i,ij->ij', [1, 2], [[1, 2, 3], [2, 3, 4]], expected_output=[[1, 2, 3], [4, 6, 8]])
# Test ellipsis broadcasting
check('...', 1, expected_output=1)
check('...->', 1, expected_output=1)
check('...->...', 1, expected_output=1)
check('...', [1], expected_output=[1])
check('...->', [1], expected_output=1)
check('z...->z', [1], expected_output=[1])
check('Z...->...Z', [1], expected_output=[1])
check('...a->', [[2], [4]], expected_output=6)
check('a...b->ab', [[[1], [2]], [[3], [4]]], expected_output=[[3], [7]])
def test_einsum_error_cases(self, device):
def check(*args, regex, exception=RuntimeError):
with self.assertRaisesRegex(exception, r'einsum\(\):.*' + regex):
torch.einsum(*args)
x = make_tensor((2,), dtype=torch.float32, device=device)
y = make_tensor((2, 3), dtype=torch.float32, device=device)
check('', [], regex=r'at least one operand', exception=ValueError)
check('. ..', [x], regex=r'found \'.\' for operand 0 that is not part of any ellipsis')
check('... ...', [x], regex=r'found \'.\' for operand 0 for which an ellipsis was already found')
check('1', [x], regex=r'invalid subscript given at index 0')
check(',', [x], regex=r'fewer operands were provided than specified in the equation')
check('', [x, x], regex=r'more operands were provided than specified in the equation')
check('', [x], regex=r'the number of subscripts in the equation \(0\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai', [x], regex=r'the number of subscripts in the equation \(2\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai...', [x], regex=r'the number of subscripts in the equation \(2\) is more than the number '
r'of dimensions \(1\) for operand 0')
check('a->... .', [x], regex=r'found \'.\' for output but an ellipsis \(...\) was already found')
check('a->..', [x], regex=r'found \'.\' for output that is not part of any ellipsis \(...\)')
check('a->1', [x], regex=r'invalid subscript given at index 3')
check('a->aa', [x], regex=r'output subscript a appears more than once in the output')
check('a->i', [x], regex=r'output subscript i does not appear in the equation for any input operand')
check('aa', [y], regex=r'subscript a is repeated for operand 0 but the sizes don\'t match, 3 != 2')
check('...,...', [x, y], regex=r'does not broadcast')
check('a,a', [x, make_tensor((3,), dtype=torch.float32, device=device)], regex=r'does not broadcast')
check('a, ba', [x, y], regex=r'subscript a has size 3 for operand 1 which does not broadcast with previously'
r' seen size 2')
check(x, [-1], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
check(x, [52], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
def _gen_shape_inputs_linalg_triangular_solve(self, shape, dtype, device, well_conditioned=False):
make_arg = partial(make_tensor, dtype=dtype, device=device)
make_fullrank = partial(make_fullrank_matrices_with_distinct_singular_values, dtype=dtype, device=device)
b, n, k = shape
for left, uni, expand_a, tr_a, conj_a, expand_b, tr_b, conj_b in product((True, False), repeat=8):
# expand means that we generate a batch of matrices with a stride of zero in the batch dimension
if (conj_a or conj_b) and not dtype.is_complex:
continue
# We just expand on the batch size
if (expand_a or expand_b) and b == 1:
continue
size_a = (b, n, n) if left else (b, k, k)
size_b = (b, n, k) if not tr_b else (b, k, n)
# If expand_a or expand_b, we'll expand them to the correct size later
if b == 1 or expand_a:
size_a = size_a[1:]
if b == 1 or expand_b:
size_b = size_b[1:]
if well_conditioned:
PLU = torch.linalg.lu(make_fullrank(*size_a))
if uni:
# A = L from PLU
A = PLU[1].transpose(-2, -1).contiguous()
else:
# A = U from PLU
A = PLU[2].contiguous()
else:
A = make_arg(size_a)
A.triu_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_b)
if tr_a:
A.transpose_(-2, -1)
if tr_b:
B.transpose_(-2, -1)
if conj_a:
A = A.conj()
if conj_b:
B = B.conj()
if expand_a:
A = A.expand(b, *size_a)
if expand_b:
B = B.expand(b, n, k)
yield A, B, left, not tr_a, uni
def _test_linalg_solve_triangular(self, A, B, upper, left, uni):
X = torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni)
if left:
self.assertEqual(A @ X, B)
else:
self.assertEqual(X @ A, B)
out = B
# B may be expanded
if not B.is_contiguous() and not B.transpose(-2, -1).is_contiguous():
out = B.clone()
torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni, out=out)
self.assertEqual(X, out)
# Tolerances dictated by widest acceptable range on CPU before failure
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3 if TEST_WITH_ROCM else 1e-1,
torch.float64: 1e-8,
torch.complex64: 1e-1,
torch.complex128: 1e-8})
def test_linalg_solve_triangular(self, device, dtype):
# This exercises the API + BLAS CPU + batched cuBLAS
ks = (3, 1, 0)
ns = (5, 0)
bs = (1, 2, 0)
gen_inputs = self._gen_shape_inputs_linalg_triangular_solve
for b, n, k in product(bs, ns, ks):
for A, B, left, upper, uni in gen_inputs((b, n, k), dtype, device, well_conditioned=True):
self._test_linalg_solve_triangular(A, B, upper, left, uni)
@slowTest
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Test fails for float64 on GPU (P100, V100) on Meta infra")
@onlyCUDA
@skipCUDAIfNoMagma # Magma needed for the PLU decomposition
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular_large(self, device, dtype):
# Exercises magma and cublas
magma = (9, 513, 1)
iterative_cublas = (2, 64, 1)
gen_inputs = self._gen_shape_inputs_linalg_triangular_solve
for shape in (magma, iterative_cublas):
for A, B, left, upper, uni in gen_inputs(shape, dtype, device, well_conditioned=True):
self._test_linalg_solve_triangular(A, B, upper, left, uni)
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular_broadcasting(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
sizes = (((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)),
((2, 1, 3, 4, 4), (4, 6)),
((4, 4), (2, 1, 3, 4, 2)),
((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)))
for size_A, size_B in sizes:
for left, upper, uni in itertools.product([True, False], repeat=3):
A = make_arg(size_A)
if upper:
A.triu_()
else:
A.tril_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_B)
if not left:
B.transpose_(-2, -1)
X = torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni)
if left:
B_other = A @ X
else:
B_other = X @ A
self.assertEqual(*torch.broadcast_tensors(B, B_other))
def triangular_solve_test_helper(self, A_dims, b_dims, upper, unitriangular,
device, dtype):
triangle_function = torch.triu if upper else torch.tril
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = torch.randn(*A_dims, dtype=dtype, device=device)
# create positive definite matrix
A = torch.matmul(A, A.mT)
A_triangular = triangle_function(A)
if unitriangular:
A_triangular.diagonal(dim1=-2, dim2=-1).fill_(1.)
return b, A_triangular
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipIfTorchDynamo("flaky, needs investigation")
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve(self, device, dtype):
ks = [0, 1, 3]
ns = [0, 5]
for k, n, (upper, unitriangular, transpose) in itertools.product(ks, ns,
itertools.product([True, False], repeat=3)):
b, A = self.triangular_solve_test_helper((n, n), (n, k), upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper, unitriangular=unitriangular, transpose=transpose)[0]
if transpose:
self.assertEqual(b, np.matmul(A.t().cpu(), x.cpu()))
else:
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched(self, device, dtype):
def triangular_solve_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.triangular_solve(b[i], A[i], upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0] # Actual output
self.assertEqual(x_act, x_exp) # Equality check
if transpose:
A = A.mT
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
def triangular_solve_zero_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0]
self.assertTrue(x.shape == b.shape)
for upper, unitriangular, transpose in itertools.product([True, False], repeat=3):
batchsize = 3
triangular_solve_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
# test empty input
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 10),
upper, unitriangular, transpose)
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 0),
upper, unitriangular, transpose)
# test zero batch case
batchsize = 0
triangular_solve_zero_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched_many_batches(self, device, dtype):
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test batched A case
b, A = self.triangular_solve_test_helper((256, 256, 5, 5), (5, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A,
upper=upper, transpose=transpose, unitriangular=unitriangular)
if transpose:
A = A.mT
Ax = torch.matmul(A, x)
rtol = 1e-2 if dtype in [torch.float32, torch.complex64] else self.precision
self.assertEqual(Ax, b.expand_as(Ax), atol=self.precision, rtol=rtol)
# test batched b case
b, A = self.triangular_solve_test_helper((3, 3), (512, 512, 3, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A, upper=upper, transpose=transpose,
unitriangular=unitriangular)
if transpose:
A = A.mT
self.assertEqual(torch.matmul(A, x), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@skipIfTorchDynamo("flaky, needs investigation")
@dtypes(*floating_and_complex_types())
def test_triangular_solve_batched_broadcasting(self, device, dtype):
from scipy.linalg import solve_triangular as tri_solve
def scipy_tri_solve_batched(A, B, upper, trans, diag):
batch_dims_A, batch_dims_B = A.shape[:-2], B.shape[:-2]
single_dim_A, single_dim_B = A.shape[-2:], B.shape[-2:]
expand_dims = tuple(torch._C._infer_size(torch.Size(batch_dims_A),
torch.Size(batch_dims_B)))
expand_A = np.broadcast_to(A, expand_dims + single_dim_A)
expand_B = np.broadcast_to(B, expand_dims + single_dim_B)
flat_A = expand_A.reshape((-1,) + single_dim_A)
flat_B = expand_B.reshape((-1,) + single_dim_B)
flat_X = np.vstack([tri_solve(a, b, lower=(not upper), trans=int(trans), unit_diagonal=diag)
for a, b in zip(flat_A, flat_B)])
return flat_X.reshape(expand_B.shape)
def run_test(A_dims, b_dims, device, upper, transpose, unitriangular):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp = torch.as_tensor(scipy_tri_solve_batched(A.cpu().numpy(), b.cpu().numpy(),
upper, transpose, unitriangular))
x = torch.triangular_solve(b, A, upper=upper, transpose=transpose, unitriangular=unitriangular)[0]
self.assertEqual(x, x_exp.to(device))
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test against scipy.linalg.solve_triangular
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), device, upper, transpose, unitriangular) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), device, upper, transpose, unitriangular) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), device, upper, transpose, unitriangular) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), device, upper, transpose, unitriangular) # broadcasting A & b
@onlyCUDA
@dtypes(torch.float)
def test_triangular_solve_large(self, device, dtype):
# Repro for https://github.com/pytorch/pytorch/issues/79191
A = torch.randn(1, 2, 2, device=device, dtype=dtype).tril_()
B = torch.randn(1, 2, 524281, device=device, dtype=dtype)
X = torch.linalg.solve_triangular(A, B, upper=False)
self.assertEqual(A @ X, B)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_triangular_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty_like(b).to(torch.int)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty_like(b)
clone_a = clone_a.to(torch.int)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty(0, dtype=dtype, device=device)
clone_a = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
# Trigger the WARN_ONCE deprecation error
torch.triangular_solve(b, a)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
clone_a = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.triangular_solve(b, a, out=(out, clone_a))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[0].message))
self.assertTrue("An output with one or more elements was resized" in str(w[1].message))
def check_single_matmul(self, x, y):
def assertEqual(answer, expected):
if x.dtype.is_floating_point or x.dtype.is_complex:
k = max(x.shape[-1], 1) # Scale the atol with the size of the matrix
self.assertEqual(answer, expected,
msg=f"{x.shape} x {y.shape} = {answer.shape}",
atol=k * 5e-5,
rtol=1e-4)
else:
self.assertEqual(answer, expected, msg=f"{x.shape} x {y.shape} = {answer.shape}")
# test x @ y
expected = np.matmul(x.cpu(), y.cpu())
ans = torch.matmul(x, y)
self.assertTrue(ans.is_contiguous())
assertEqual(ans, expected)
# test out
out = torch.empty_like(ans)
ans = torch.matmul(x, y, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
assertEqual(ans, expected)
def gen_sizes_matmul(self, x_dim, y_dim=4, matrix_size=4, batch_size=3):
"""
Generates sequences of tuples (x, y) of with size(x) = x_dim and
size(y) <= y_dim that are compatible wrt. matmul
"""
assert x_dim >= 1
assert y_dim >= 2
x = x_dim
for y in range(1, y_dim + 1):
for batch, mn in product(product(range(batch_size), repeat=max(x - 2, y - 2, 0)),
product(range(matrix_size), repeat=min(y, 2))):
if x == 1:
size_x = mn[:1]
size_y = batch + mn
yield size_x, size_y
else:
for k in range(matrix_size):
size_x = (k,) + mn[:1]
if x > 2:
size_x = batch[-(x - 2):] + size_x
size_y = mn
if y > 2:
size_y = batch[-(y - 2):] + size_y
yield size_x, size_y
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
@setBlasBackendsToDefaultFinally
def test_matmul_small_brute_force_1d_Nd(self, device, dtype):
for backend in ["cublas", "cublaslt"]:
if torch.device(device).type == 'cuda':
torch.backends.cuda.preferred_blas_library(backend)
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(1), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
@setBlasBackendsToDefaultFinally
def test_matmul_small_brute_force_2d_Nd(self, device, dtype):
for backend in ["cublas", "cublaslt"]:
if torch.device(device).type == 'cuda':
torch.backends.cuda.preferred_blas_library(backend)
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(2), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU
@dtypes(torch.int64, torch.float, torch.complex64)
@setBlasBackendsToDefaultFinally
def test_matmul_small_brute_force_3d_Nd(self, device, dtype):
for backend in ["cublas", "cublaslt"]:
if torch.device(device).type == 'cuda':
torch.backends.cuda.preferred_blas_library(backend)
make_arg = partial(make_tensor, device=device, dtype=dtype)
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(3), (True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
@onlyCUDA
@skipCUDAIfNotRocm # Skipping due to SM89 OOM in CI, UT doesn't do much on NV anyways
@dtypes(*floating_types_and(torch.half))
@precisionOverride({torch.float16: 1e-1}) # TunableOp may occasionally find less precise solution
def test_matmul_small_brute_force_tunableop(self, device, dtype):
import os
# disable tunableop buffer rotation for all tests everywhere, it can be slow
# We set the TunableOp numerical check environment variable here because it is
# possible to hit some invalid numerical solutions due to the small matrix sizes.
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
# Numerical check adds significant overhead, unsure if this is needed
# or if there was a transient problem at the time.
# if dtype is torch.half:
# os.environ["PYTORCH_TUNABLEOP_NUMERICAL_CHECK"] = "1"
ordinal = torch.cuda.current_device()
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_duration(1)
torch.cuda.tunable.set_max_tuning_iterations(1)
make_arg = partial(make_tensor, device=device, dtype=dtype)
# Using gen_sizes_matmul(2) to ensure we cover
# 'NN', 'TN', 'TT', and 'NN' cases.
for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(2, y_dim=3),
(True, False), (True, False)):
x = make_arg(size_x, noncontiguous=nctg_x)
y = make_arg(size_y, noncontiguous=nctg_y)
self.check_single_matmul(x, y)
filename1 = torch.cuda.tunable.get_filename()
unique_id = self.id().split(".")[-1]
ordinal = torch.cuda.current_device()
assert filename1 == f"tunableop_results_{unique_id}_{ordinal}.csv"
assert len(torch.cuda.tunable.get_results()) > 0
self.assertTrue(os.path.exists(filename1))
# We need to reset the filename to the default value so we can properly
# clean up intermediate files
self._set_tunableop_defaults()
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.half)
def test_matmul_offline_tunableop(self, device, dtype):
import os
# Main offline tunableop test
# NOTE: The offline tuning does not support certain tensor
# shapes as noted below. Submatrics / matrix slices are
# not supported at all.
def has_any_dim_size_one(tensor: torch.Tensor):
"""Check if any dimension of a PyTorch tensor has size 1."""
return any(dim == 1 for dim in tensor.shape)
def is_mm_compatible(A, B):
"""Check if two matrices A and B are compatible for torch.mm."""
return A.dim() == 2 and B.dim() == 2 and A.shape[1] == B.shape[0]
def is_bmm_compatible(A, B):
"""Check if two 3D tensors are compatible for torch.bmm."""
return (
A.dim() == 3 and B.dim() == 3 and
A.shape[0] == B.shape[0] and # Batch size must match
A.shape[2] == B.shape[1] # Inner dimensions must align
)
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
ordinal = torch.cuda.current_device()
# record GEMM
torch.cuda.tunable.tuning_enable(False)
torch.cuda.tunable.record_untuned_enable(True)
self.assertTrue(torch.cuda.tunable.record_untuned_is_enabled())
make_arg = partial(make_tensor, device=device, dtype=dtype)
# offline tuning only handles matmuls on two dimensional tensors
# matmul that require broadcasting are
# not supported either.
# Below we check the different transA and transB combinations.
for (size_x, size_y) in self.gen_sizes_matmul(x_dim=2, y_dim=2, matrix_size=4):
x = make_arg(size_x, noncontiguous=False)
y = make_arg(size_y, noncontiguous=False)
if is_mm_compatible(x, y):
self.check_single_matmul(x, y)
else:
continue
if is_mm_compatible(x.t(), y):
self.check_single_matmul(x.t(), y)
else:
continue
if is_mm_compatible(x, y.t()):
self.check_single_matmul(x, y.t())
else:
continue
if is_mm_compatible(x.t(), y.t()):
self.check_single_matmul(x.t(), y.t())
else:
continue
# offline tuning only handles batched matmuls on
# three dimensional tensors
# matmul that require broadcasting are
# not supported either.
# Below we check the different transA and transB combinations.
for (size_x, size_y) in self.gen_sizes_matmul(x_dim=3, y_dim=3, matrix_size=4):
x = make_arg(size_x, noncontiguous=False)
y = make_arg(size_y, noncontiguous=False)
if has_any_dim_size_one(x) or has_any_dim_size_one(y):
continue
if is_bmm_compatible(x, y):
self.check_single_matmul(x, y)
else:
continue
if is_bmm_compatible(x.transpose(1, 2), y):
self.check_single_matmul(x.transpose(1, 2), y)
else:
continue
if is_bmm_compatible(x, y.transpose(1, 2)):
self.check_single_matmul(x, y.transpose(1, 2))
else:
continue
if is_bmm_compatible(x.transpose(1, 2), y.transpose(1, 2)):
self.check_single_matmul(x.transpose(1, 2), y.transpose(1, 2))
else:
continue
self.assertTrue(torch.cuda.tunable.is_enabled())
self.assertTrue(torch.cuda.tunable.tuning_is_enabled() is False)
untuned_filename = get_tunableop_untuned_filename()
# tuning the untuned GEMMs in file
torch.cuda.tunable.tuning_enable(True)
torch.cuda.tunable.record_untuned_enable(False)
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_duration(1)
torch.cuda.tunable.set_max_tuning_iterations(1)
ref_results = len(torch.cuda.tunable.get_results())
torch.cuda.tunable.tune_gemm_in_file(untuned_filename)
new_results = len(torch.cuda.tunable.get_results())
self.assertGreater(new_results - ref_results, 0)
results_filename = torch.cuda.tunable.get_filename()
self.assertTrue(os.path.exists(results_filename))
# Compare Param Signature of untuned and tuned results
ok = self._compare_untuned_tuned_entries()
self.assertTrue(ok)
@onlyCUDA
@skipCUDAIfNotRocm
@runOnRocmArch(MI300_ARCH)
@dtypes(torch.torch.float8_e4m3fnuz, torch.float8_e5m2fnuz)
def test_scaled_gemm_offline_tunableop(self, device, dtype):
import os
# This test is the offline version of test_scaled_gemm_tunableop
with self._tunableop_ctx():
ordinal = torch.cuda.current_device()
torch.cuda.tunable.set_rotating_buffer_size(0)
# record GEMM
torch.cuda.tunable.tuning_enable(False)
torch.cuda.tunable.record_untuned_enable(True)
self.assertTrue(torch.cuda.tunable.record_untuned_is_enabled())
# Scaled GEMM parameters
fillA = 0.25
fillB = 0.75
n = 16
m = 32
k = 64
scaleA = torch.tensor(0.8, device=device)
scaleB = torch.tensor(0.9, device=device)
dtypeA = dtypeB = dtype
matA = torch.full((m, k), fillA, dtype=dtypeA, device=device)
matB = torch.full((n, k), fillB, dtype=dtypeB, device=device).t()
# Summary of bias types that are supported:
# - bias vector not supported when out_dtype = fp32
# - bias_dtype allowed in PyTorch are Half or BFloat16
# - bias_dtype in hipBLASLt restrictions can be found here:
# https://rocm.docs.amd.com/projects/hipBLASLt/en/develop/api-reference.html
fillbias = 0.10
biasf16 = torch.full((n,), fillbias, dtype=torch.half, device=device)
biasbf16 = torch.full((n,), fillbias, dtype=torch.bfloat16, device=device)
# out_dtype = dtype
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=dtype)
# out_dtype = dtype with bias vector
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=dtype, bias=biasf16)
# out_dtype = float32
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.float32)
# out_dtype = bfloat16
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.bfloat16)
# out_dtype = bfloat16 with bias vector
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.bfloat16, bias=biasbf16)
# out_dtype = float16
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.half)
# rowwise scaling, only supported for this dtype combination
if dtype is torch.torch.float8_e4m3fnuz:
scaleA = torch.ones((matA.shape[0], 1), device=device)
scaleB = torch.ones((1, matB.shape[1]), device=device)
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.bfloat16)
self.assertTrue(torch.cuda.tunable.is_enabled())
self.assertTrue(torch.cuda.tunable.tuning_is_enabled() is False)
untuned_filename = get_tunableop_untuned_filename()
# tuning the untuned GEMMs in file
torch.cuda.tunable.tuning_enable(True)
torch.cuda.tunable.record_untuned_enable(False)
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_duration(1)
torch.cuda.tunable.set_max_tuning_iterations(1)
ref_results = len(torch.cuda.tunable.get_results())
torch.cuda.tunable.tune_gemm_in_file(untuned_filename)
new_results = len(torch.cuda.tunable.get_results())
# This stores total number of cumulative results
total_num_results = new_results - ref_results
# Rowwise case will have an extra solution
if dtype is torch.torch.float8_e4m3fnuz: # rowwise
count = 7
else:
count = 6
self.assertEqual(total_num_results, count)
results_filename = torch.cuda.tunable.get_filename()
self.assertTrue(os.path.exists(results_filename))
# Compare Param Signature of untuned and tuned results
ok = self._compare_untuned_tuned_entries()
self.assertTrue(ok)
@unittest.skipIf(not TEST_MULTIGPU, "Requires at least 2 GPUs")
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float)
def test_matmul_offline_mgpu_tunableop(self, device, dtype):
# Offline tuning with multiple GPUs.
# Case where you record GEMMs on one GPU, but then tune
# on multiple GPUs
import os
with self._tunableop_ctx():
# Use all available GPUs for this test
total_gpus = torch.cuda.device_count()
ordinal = torch.cuda.current_device()
# Untuned filename has unique id, but results file
# does not because it is executed in a subprocess
untuned_filename = get_tunableop_untuned_filename()
torch.cuda.tunable.set_filename(f"tunableop_results{ordinal}.csv")
# turn on untuned GEMM recording and turn off tuning
torch.cuda.tunable.tuning_enable(False)
torch.cuda.tunable.record_untuned_enable(True)
# Choose matrix sizes that have not been used before
m = n = k = 23
# Create at least one GEMM per GPU, so when the GEMMs
# are distributed to the GPUs there is at least one
# GEMM per GPU.
for g in range(1, total_gpus + 1):
A = torch.rand(m * g, k * g, device=device, dtype=dtype)
B = torch.rand(k * g, n * g, device=device, dtype=dtype)
C = torch.matmul(A, B)
# check the untuned file was written and make sure that it is not zero
self.assertTrue(os.path.exists(untuned_filename))
self.assertGreater(os.path.getsize(untuned_filename), 0)
# Perform multi-GPU tuning
torch.cuda.tunable.mgpu_tune_gemm_in_file(untuned_filename, total_gpus)
# check the results files where written, one per gpu
# Check that the results file is not empty and store
# that in a local variable for the next loop.
for i in range(total_gpus):
result_filename = f"tunableop_results{i}.csv"
self.assertTrue(os.path.exists(result_filename))
self.assertGreater(os.path.getsize(result_filename), 0)
if i == 0: # Store for next loop
result_size = os.path.getsize(result_filename)
# Check the full results files was written, one per gpu
# check that the size of the full results file for
# GPU 0 is greater than that of the individual results
# for GPU 0.
# Lastly, check that all tunableop_results_full{i} have
# the same size as tunableop_results_full0.
for i in range(total_gpus):
result_full_filename = f"tunableop_results_full{i}.csv"
self.assertTrue(os.path.exists(result_full_filename))
if i == 0: # Store for next subsequent iterations
result_full_size = os.path.getsize(result_full_filename)
self.assertGreater(result_full_size, result_size)
self.assertEqual(os.path.getsize(result_full_filename), result_full_size)
@onlyCUDA
@dtypes(torch.float)
def test_rotating_buffer_tunableop(self, device, dtype):
# Test the TunableOp rotating buffer API
# Test the default value, will return the l2_cache_size
self._set_tunableop_defaults()
l2_cache_size = torch.cuda.tunable.get_rotating_buffer_size()
self.assertGreater(l2_cache_size, 0)
# Test zero
torch.cuda.tunable.set_rotating_buffer_size(0)
self.assertEqual(torch.cuda.tunable.get_rotating_buffer_size(), 0)
# Test one MB
torch.cuda.tunable.set_rotating_buffer_size(1)
self.assertEqual(torch.cuda.tunable.get_rotating_buffer_size(), 1024 * 1024)
# Test negative value, which will return the l2 cache size
torch.cuda.tunable.set_rotating_buffer_size(-1)
self.assertEqual(torch.cuda.tunable.get_rotating_buffer_size(), l2_cache_size)
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float)
def test_bmm_tunableop_rocm(self, device, dtype):
# buffer rotation (on by default) with strided batched gemm tunableop was causing a mem fault
with self._tunableop_ctx():
torch.cuda.tunable.set_max_tuning_iterations(10)
# Make sure the rotating buffer is not zero, otherwise this test does nothing useful.
rotating_buffer = torch.cuda.tunable.get_rotating_buffer_size()
self.assertGreater(rotating_buffer, 0)
# the following 3 cases cover all previous failure cases and are here to catch regressions
B = 16
N = M = K = 256
dtype = torch.bfloat16
device = torch.device("cuda:0")
# case 1
i1 = torch.randn((B, N, M), device=device, dtype=dtype)
i2 = torch.randn((B, M, K), device=device, dtype=dtype)
out = torch.bmm(i1, i2)
# case 2
i1 = torch.randn((B, N, M), device=device, dtype=dtype)
i1 = torch.permute(i1, (1, 2, 0))
i2 = torch.randn((B, M, K), device=device, dtype=dtype)
i2 = torch.permute(i2, (1, 0, 2))
out = torch.bmm(i1, i2)
# case 3
i1 = torch.randn((N, B, M), device=device, dtype=dtype)
i1 = torch.permute(i1, (1, 0, 2))
i2 = torch.randn((M, B, K), device=device, dtype=dtype)
i2 = torch.permute(i2, (1, 2, 0))
out = torch.bmm(i1, i2)
# case 4
input_tensor = torch.rand((1920, 1, 100), device=device, dtype=dtype)
input_tensor = torch.as_strided(
input_tensor, size=(1920, 1, 100), stride=(100, 100, 1)
)
batch1_tensor = torch.rand((1920, 256, 512), device=device, dtype=dtype)
batch1_tensor = torch.as_strided(
batch1_tensor, size=(1920, 256, 512), stride=(512, 983040, 1)
)
batch2_tensor = torch.rand((1920, 512, 100), device=device, dtype=dtype)
batch2_tensor = torch.as_strided(
batch2_tensor, size=(1920, 512, 100), stride=(51200, 100, 1)
)
out = torch.baddbmm(input_tensor, batch1_tensor, batch2_tensor)
# case 5
q = torch.randn([16, 16, 1024, 64], device=device, dtype=dtype)
k = torch.randn([16, 16, 1024, 64], device=device, dtype=dtype)
q_chunks = q.split(512, dim=-2)
k_chunks = k.split(64, dim=-2)
C = torch.matmul(q_chunks[0], k_chunks[0])
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.bfloat16)
def test_numeric_check_leak_tunableop_rocm(self, device, dtype):
from torch.testing._internal.common_utils import CudaMemoryLeakCheck
# run operator first without tuning to ensure all rocm libs are loaded,
# otherwise false positive mem leak
B = 5
N = M = K = 29
device = torch.device("cuda:0")
i1 = torch.randn((B, N, M), device=device, dtype=dtype)
i2 = torch.randn((B, M, K), device=device, dtype=dtype)
out = torch.bmm(i1, i2)
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
# enable tunableop numeric check via API.
torch.cuda.tunable.set_numerical_check_tolerances(True, 0.1, 0.1)
ordinal = torch.cuda.current_device()
iterations = torch.cuda.tunable.get_max_tuning_iterations()
torch.cuda.tunable.set_max_tuning_iterations(10)
with CudaMemoryLeakCheck(self):
out = torch.bmm(i1, i2)
torch.cuda.tunable.set_max_tuning_iterations(iterations)
torch.cuda.tunable.enable(False)
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float)
def test_validator_tunableop_rocm(self, device, dtype):
# Test that the validator on ROCM has exactly 5 lines
# Format of the Validator is as follows:
# Validator,PT_VERSION,X.Y.Z.
# Validator,ROCBLAS_VERSION,X.Y,Z
# Validator,HIPBLASLT_VERSION,X,Y.Z
# Validator,ROCM_Version,X,Y.Z
# Validator,GCN_ARCH_NAME,<architecture name>
validator_num_lines = 5
with self._tunableop_ctx():
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_iterations(1)
N = M = K = 4
A = torch.randn(N, K, device=device, dtype=dtype)
B = torch.randn(K, M, device=device, dtype=dtype)
C = torch.matmul(A, B)
self.assertEqual(len(torch.cuda.tunable.get_validators()), validator_num_lines)
validators = get_tunableop_validators()
# Check for rocBLAS and hipBLASLt
self.assertTrue("ROCBLAS_VERSION" in validators)
# format: [major].[minor].[patch].[tweak].[commit id]
self.assertTrue(re.match(r'^\d+[a-z0-9.]+$', validators["ROCBLAS_VERSION"]))
self.assertTrue("HIPBLASLT_VERSION" in validators)
self.assertTrue(re.match(r'^\d+-[a-z0-9]+$', validators["HIPBLASLT_VERSION"]))
@onlyCUDA
@dtypes(torch.half)
def test_minimum_tuning_iteration_tunableop(self, device, dtype):
# Make sure that there is at least one tuning iteration occurs
# when the max tuning duration and max tuning iteration are set
# to zero.
with self._tunableop_ctx():
# Tune a single GEMM and verify that we get a new tuning result
torch.cuda.tunable.set_max_tuning_duration(0)
torch.cuda.tunable.set_max_tuning_iterations(0)
# Reference number of results
ref_num_results = len(torch.cuda.tunable.get_results())
N = M = K = 8
A = torch.randn(N, K, device=device, dtype=dtype)
B = torch.randn(K, M, device=device, dtype=dtype)
C = torch.matmul(A, B)
# This stores total number of cumulative results
total_num_results = len(torch.cuda.tunable.get_results())
# There must be a new tuning result
self.assertEqual((total_num_results - ref_num_results), 1)
@onlyCUDA
@dtypes(torch.half)
def test_matmul_check_entries_tunableop(self, device, dtype):
# Tune a couple of matrix multiplies
# Verify we get the correct number of results
with self._tunableop_ctx():
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_iterations(1)
# Reference number of results
ref_num_results = len(torch.cuda.tunable.get_results())
# Execute matrix multiplies. We intentionally throw in M list the same index
# twice. The CSV file should only get unique GEMMs
count_matmul = 4
K = 64
for M in [32, 64, 32]:
for N in [32, 64]:
A = torch.randn(N, K, device=device, dtype=dtype)
B = torch.randn(K, M, device=device, dtype=dtype)
C = torch.matmul(A, B)
# This stores total number of cumulative results
total_num_results = len(torch.cuda.tunable.get_results())
# Take the difference to calculate the number of results from
# the this test and verify that it agrees with the number of
# GEMMs.
self.assertEqual((total_num_results - ref_num_results), count_matmul)
@onlyCUDA
@dtypes(torch.float)
def test_disable_tuning_tunableop(self, device, dtype):
# Test that the Python API for disabling tuning stops
# additional tunings even when TunableOp is enabled.
# In other words, test that:
# PYTORCH_TUNABLEOP_ENABLED=1
# PYTORCH_TUNABLEOP_TUNING=0
# is no longer tuning GEMMs.
with self._tunableop_ctx():
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_iterations(1)
# Reference number of results
ref_num_results = len(torch.cuda.tunable.get_results())
# Tune one GEMMs to make sure TunableOp is enabled
M = 11
N = 13
K = 17
A = torch.randn(N, K, device=device, dtype=dtype)
B = torch.randn(K, M, device=device, dtype=dtype)
C = torch.matmul(A, B)
# This stores total number of cumulative results
total_num_results = len(torch.cuda.tunable.get_results())
# Take the difference to calculate the number of results from
# this test. There should be one additional tuned GEMM
self.assertEqual((total_num_results - ref_num_results), 1)
# New total number of results becomes new reference result
ref_num_results = total_num_results
# Now disable further tuning, while keeping TunableOp Enabled
torch.cuda.tunable.tuning_enable(False)
# Try to tune one more GEMM
M = 11
N = 13
K = 18
A = torch.randn(N, K, device=device, dtype=dtype)
B = torch.randn(K, M, device=device, dtype=dtype)
C = torch.matmul(A, B)
# Take the difference to calculate the number of results from
# this test. There should be no change in the number of results
# since tuning is disable.
self.assertEqual((total_num_results - ref_num_results), 0)
@onlyCUDA
@dtypes(torch.float)
def test_dump_results_on_exit_tunableop(self, device, dtype):
# Test that the TunableOp results file is created
# and is NOT empty.
# To test this we create a subprocess and then
# execute a matmul from within the subprocess
import os
import multiprocessing as mp
with self._tunableop_ctx():
filename = torch.cuda.tunable.get_filename()
# force=True needed according to:
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method
# This is because a different test in this process could have
# already set the start method
mp.set_start_method("spawn", force=True)
p = mp.Process(target=tunableop_matmul, args=(device, dtype, filename, False))
p.start()
p.join()
# Make sure the results file exists and that it is not zero.
self.assertTrue(os.path.exists(filename))
self.assertTrue(os.path.getsize(filename) > 0)
@onlyCUDA
@dtypes(torch.bfloat16)
def test_gemm_bias_tunableop(self, device, dtype):
# Test GEMM and bias tuning
with self._tunableop_ctx():
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_iterations(1)
# Reference number of results
ref_num_results = len(torch.cuda.tunable.get_results())
m = 3
n = 5
k = 7
# 'TN' case
X = torch.rand(m, k, dtype=dtype, device=device)
matA = torch.rand(n, k, dtype=dtype, device=device)
bias = torch.rand(n, dtype=dtype, device=device)
torch.nn.functional.linear(X, matA, bias)
# 'NT' case
X = torch.rand(k, m, dtype=dtype, device=device).t()
matA = torch.rand(k, n, dtype=dtype, device=device).t()
bias = torch.rand(n, dtype=dtype, device=device)
torch.nn.functional.linear(X, matA, bias)
# This stores total number of cumulative results
total_num_results = len(torch.cuda.tunable.get_results())
# There must be a new tuning result
self.assertEqual((total_num_results - ref_num_results), 2)
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.bfloat16)
def test_gemm_bias_offline_tunableop(self, device, dtype):
import os
# This test is the offline version of test_gemm_bias_tunableop
ordinal = torch.cuda.current_device()
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
# record GEMM
torch.cuda.tunable.tuning_enable(False)
torch.cuda.tunable.record_untuned_enable(True)
self.assertTrue(torch.cuda.tunable.record_untuned_is_enabled())
m = 5
n = 7
k = 9
# 'TN' case
X = torch.rand(m, k, dtype=dtype, device=device)
matA = torch.rand(n, k, dtype=dtype, device=device)
bias = torch.rand(n, dtype=dtype, device=device)
torch.nn.functional.linear(X, matA, bias)
# 'NT' case
X = torch.rand(k, m, dtype=dtype, device=device).t()
matA = torch.rand(k, n, dtype=dtype, device=device).t()
bias = torch.rand(n, dtype=dtype, device=device)
torch.nn.functional.linear(X, matA, bias)
self.assertTrue(torch.cuda.tunable.is_enabled())
self.assertTrue(torch.cuda.tunable.tuning_is_enabled() is False)
untuned_filename = get_tunableop_untuned_filename()
# tuning the untuned GEMMs in file
torch.cuda.tunable.tuning_enable(True)
torch.cuda.tunable.record_untuned_enable(False)
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_duration(1)
torch.cuda.tunable.set_max_tuning_iterations(1)
ref_results = len(torch.cuda.tunable.get_results())
torch.cuda.tunable.tune_gemm_in_file(untuned_filename)
new_results = len(torch.cuda.tunable.get_results())
# This stores total number of cumulative results
total_num_results = new_results - ref_results
# There must be a new tuning results
self.assertEqual(total_num_results, 2)
results_filename = torch.cuda.tunable.get_filename()
self.assertTrue(os.path.exists(results_filename))
# Compare Param Signature of untuned and tuned results
ok = self._compare_untuned_tuned_entries()
self.assertTrue(ok)
@onlyCUDA
@skipCUDAIfNotRocm
@runOnRocmArch(MI300_ARCH)
@dtypes(torch.torch.float8_e4m3fnuz, torch.float8_e5m2fnuz)
def test_scaled_gemm_tunableop(self, device, dtype):
# Test Scaled GEMM tuning.
# We do not test the full set of scaled GEMM parameters, since
# hipBLASLt does not support all combinations.
# Here is a short list of extra parameters that are not tested
# - amax
# - use_fast_accum
# - bias dtype that are different than torch.half
#
# Refer to test/test_matmul_cuda for support combinations that are
# tested by PyTorch
with self._tunableop_ctx():
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_rotating_buffer_size(0)
torch.cuda.tunable.set_max_tuning_iterations(1)
# Reference number of results
ref_num_results = len(torch.cuda.tunable.get_results())
# Scaled GEMM parameters
fillA = 0.25
fillB = 0.75
n = 64
m = 16
k = 32
scaleA = torch.tensor(0.8, device=device)
scaleB = torch.tensor(0.9, device=device)
dtypeA = dtypeB = dtype
matA = torch.full((m, k), fillA, dtype=dtypeA, device=device)
matB = torch.full((n, k), fillB, dtype=dtypeB, device=device).t()
# Summary of bias types that are supported:
# - bias vector not supported when out_dtype = fp32
# - bias_dtype allowed in PyTorch are Half or BFloat16
# - bias_dtype in hipBLASLt restrictions can be found here:
# https://rocm.docs.amd.com/projects/hipBLASLt/en/develop/api-reference.html
fillbias = 0.10
biasf16 = torch.full((n,), fillbias, dtype=torch.half, device=device)
biasbf16 = torch.full((n,), fillbias, dtype=torch.bfloat16, device=device)
# out_dtype = dtype
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=dtype)
# out_dtype = dtype with bias vector
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=dtype, bias=biasf16)
# out_dtype = float32
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.float32)
# out_dtype = bfloat16
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.bfloat16)
# out_dtype = bfloat16 with bias vector
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.bfloat16, bias=biasbf16)
# out_dtype = float16
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.half)
# rowwise scaling, only supported for this dtype combination
if dtype is torch.torch.float8_e4m3fnuz:
scaleA = torch.ones((matA.shape[0], 1), device=device)
scaleB = torch.ones((1, matB.shape[1]), device=device)
torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.bfloat16)
# This stores total number of cumulative results
total_num_results = len(torch.cuda.tunable.get_results())
# Rowwise case will have an extra solution
if dtype is torch.torch.float8_e4m3fnuz: # rowwise
count = 7
else:
count = 6
self.assertEqual((total_num_results - ref_num_results), count)
@onlyCUDA
@skipCUDAIfNotRocm
@runOnRocmArch(MI300_ARCH)
@dtypes(torch.float)
def test_tf32_tunableop(self, device, dtype):
try:
with self._tunableop_ctx():
torch.backends.cuda.matmul.allow_tf32 = True
torch.cuda.tunable.set_rotating_buffer_size(0)
# Reference number of results
ref_num_results = len(torch.cuda.tunable.get_results())
N = M = K = 37
A = torch.randn(N, K, device=device, dtype=dtype)
B = torch.randn(K, M, device=device, dtype=dtype)
C = torch.matmul(A, B)
# This stores total number of cumulative results
total_num_results = len(torch.cuda.tunable.get_results())
# There must be a new tuning result
self.assertEqual((total_num_results - ref_num_results), 1)
# The results must NOT be from rocBLAS
# result can be either Default or Hipblaslt
# Additionally, the Op Signature must be tf32
last_result = torch.cuda.tunable.get_results()
found_result = find_tunableop_result(last_result,
'GemmTunableOp_tf32_NN',
'nn_37_37_37_ld_37_37_37')
self.assertTrue(found_result is not None)
self.assertTrue('Rocblas' not in found_result)
# Now disable TF32
torch.backends.cuda.matmul.allow_tf32 = False
# Update the number of reference results
ref_num_results = total_num_results
# Tune the same GEMM again
C = torch.matmul(A, B)
# This stores total number of cumulative results
total_num_results = len(torch.cuda.tunable.get_results())
# There must be a new tuning result
self.assertEqual((total_num_results - ref_num_results), 1)
# The new tuning result must be of type float
last_result = torch.cuda.tunable.get_results()
found_result = find_tunableop_result(last_result,
'GemmTunableOp_float_NN',
'nn_37_37_37_ld_37_37_37')
self.assertTrue(found_result is not None)
finally:
# Disable TF32
torch.backends.cuda.matmul.allow_tf32 = False
@onlyCUDA
@skipCUDAIfNotRocm
@runOnRocmArch(MI300_ARCH)
@dtypes(torch.float)
def test_tf32_offline_tunableop(self, device, dtype):
# This test is the offline version of test_tf32_tunableop
import os
try:
with self._tunableop_ctx():
torch.backends.cuda.matmul.allow_tf32 = True
ordinal = torch.cuda.current_device()
torch.cuda.tunable.set_rotating_buffer_size(0)
# record GEMM
torch.cuda.tunable.tuning_enable(False)
torch.cuda.tunable.record_untuned_enable(True)
self.assertTrue(torch.cuda.tunable.record_untuned_is_enabled())
N = M = K = 41
A = torch.randn(N, K, device=device, dtype=dtype)
B = torch.randn(K, M, device=device, dtype=dtype)
C = torch.matmul(A, B)
# Now disable TF32
torch.backends.cuda.matmul.allow_tf32 = False
C = torch.matmul(A, B)
untuned_filename = get_tunableop_untuned_filename()
self.assertTrue(os.path.exists(untuned_filename))
# tuning the untuned GEMMs in file
torch.cuda.tunable.tuning_enable(True)
torch.cuda.tunable.record_untuned_enable(False)
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_duration(1)
torch.cuda.tunable.set_max_tuning_iterations(1)
ref_results = len(torch.cuda.tunable.get_results())
torch.cuda.tunable.tune_gemm_in_file(untuned_filename)
new_results = len(torch.cuda.tunable.get_results())
# This stores total number of cumulative results
total_num_results = new_results - ref_results
# There must be a new tuning results
self.assertEqual(total_num_results, 2)
last_result = torch.cuda.tunable.get_results()
found_result = find_tunableop_result(last_result,
'GemmTunableOp_tf32_NN',
'nn_41_41_41_ld_41_41_41')
self.assertTrue(found_result is not None)
found_result = find_tunableop_result(last_result,
'GemmTunableOp_float_NN',
'nn_41_41_41_ld_41_41_41')
self.assertTrue(found_result is not None)
results_filename = torch.cuda.tunable.get_filename()
self.assertTrue(os.path.exists(results_filename))
# Compare Param Signature of untuned and tuned results
ok = self._compare_untuned_tuned_entries()
self.assertTrue(ok)
finally:
# Disable TF32
torch.backends.cuda.matmul.allow_tf32 = False
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float16)
def test_blaslog_tunableop(self, device, dtype):
# Test that PYTORCH_TUNABLEOP_BLAS_LOG=1 gives
# an additional column of data with the BLAS
# parameters in offline and online tuning.
#
# We record GEMMs and then check that the
# BLAS_PARAMS appear in
# tunableop_untuned CSV file
# and
# tunableop_results CSV file
#
# NOTE: This is done in a subproceses
# because in the main process
# PYTORCH_TUNABLEOP_BLAS_LOG has already
# been deactivated and its value is sticky
import os
import multiprocessing as mp
with self._tunableop_ctx():
os.putenv("PYTORCH_TUNABLEOP_BLAS_LOG", "1")
# TunableOp is running in a subprocess
# online tuning needs filename set through API
# offline tuning needs filename set through environment variable
result_filename = torch.cuda.tunable.get_filename()
untuned_filename = get_tunableop_untuned_filename()
# Offline Tuning case in a subprocess
# force=True needed according to:
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method
# This is because a different test in this process could have
# already set the start method
mp.set_start_method("spawn", force=True)
p = mp.Process(target=tunableop_matmul, args=(device, dtype, None, True))
p.start()
p.join()
# Make sure the results file exists and that it is not zero
self.assertTrue(os.path.exists(untuned_filename))
self.assertTrue(os.path.getsize(untuned_filename) > 0)
# Check that the BLAS PARAMS are in the CSV file
import csv
with open(untuned_filename) as file:
reader = csv.reader(file)
first_row = next(reader)
# Check for extra column
self.assertGreater(len(first_row), 3)
# Check for YAML entry to the right of
# BLAS PARAMS
self.assertTrue("{ function:" in first_row[2])
# Online tuning case in a subprocess
# force=True needed according to:
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method
# This is because a different test in this process could have
# already set the start method
mp.set_start_method("spawn", force=True)
p = mp.Process(target=tunableop_matmul, args=(device, dtype, result_filename, False))
p.start()
p.join()
# Make sure the results file exists and that it is not zero
self.assertTrue(os.path.exists(result_filename))
self.assertGreater(os.path.getsize(result_filename), 0)
# Check that there BLAS PARAMS are in the CSV file
with open(result_filename) as file:
reader = csv.reader(file)
for _ in range(5): # Skip the first 5 lines for the validator
next(reader, None)
# Check for extra column
first_row = next(reader)
self.assertGreater(len(first_row), 5)
# Check for YAML entry to the right of
# BLAS PARAMS
self.assertTrue("{ function:" in first_row[4])
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float)
def test_mm_submatrix_offline_tunableop(self, device, dtype):
import os
# Test offline tuning with submatrices
# Covers GEMM, ScaledGEMM, and GEMM+bias.
ordinal = torch.cuda.current_device()
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_duration(1)
torch.cuda.tunable.set_max_tuning_iterations(1)
# record GEMM
torch.cuda.tunable.tuning_enable(False)
torch.cuda.tunable.record_untuned_enable(True)
self.assertTrue(torch.cuda.tunable.record_untuned_is_enabled())
lda = 12
ldb = 10
ldc = 14
n = 8
m = 4
k = 2
# Covers GEMM and Scaled GEMM cases
# Scaled GEMM is a subset of GEMM cases
# There might be less confusing ways create submatrices, but this works
# just fine and covers the four transA, transB combinations.
# 'TN'
matA = torch.rand(ldc, lda, dtype=dtype, device=device)
matB = torch.rand(ldc, ldb, dtype=dtype, device=device).t()
subA = matA[:m, :k]
subB = matB[:k, :n]
torch.mm(subA, subB)
# 'NN'
matA = torch.rand(lda, ldc, dtype=dtype, device=device)
matB = torch.rand(ldc, ldb, dtype=dtype, device=device)
subA = matA[:m, :k]
subB = matB[:k, :n]
torch.mm(subA, subB)
# 'NT'
matA = torch.rand(ldc, lda, dtype=dtype, device=device).t()
matB = torch.rand(ldc, ldb, dtype=dtype, device=device)
subA = matA[:m, :k]
subB = matB[:k, :n]
torch.mm(subA, subB)
# 'TT'
matA = torch.rand(k, lda, dtype=dtype, device=device).t()
matB = torch.rand(ldb, k, dtype=dtype, device=device).t()
subA = matA[:k, :m]
subB = matB[:n, :k]
torch.mm(subA, subB)
# Cover GEMM+bias case. Also mostly a subset of the regular
# GEMM case but with a implicit transpose which makes code
# path slightly different.
# 'TN'
X = torch.rand(ldc, lda, dtype=dtype, device=device)
matA = torch.rand(ldc, ldb, dtype=dtype, device=device)
subX = X[:m, :k]
subA = matA[:n, :k]
bias = torch.rand(n, dtype=dtype, device=device)
torch.nn.functional.linear(subX, subA, bias)
# 'NT'
X = torch.rand(ldc, lda, dtype=dtype, device=device).t()
matA = torch.rand(ldc, ldb, dtype=dtype, device=device).t()
subX = X[:m, :k]
subA = matA[:n, :k]
bias = torch.rand(n, dtype=dtype, device=device)
torch.nn.functional.linear(subX, subA, bias)
# Strided batch GEMM.
# 'TN'
b = 3
matA = torch.rand(b, ldc, lda, dtype=dtype, device=device)
matB = torch.rand(b, ldc, ldb, dtype=dtype, device=device).transpose(1, 2)
subA = matA[:b, :m, :k]
subB = matB[:b, :k, :n]
torch.bmm(subA, subB)
# 'NN'
matA = torch.rand(b, lda, ldc, dtype=dtype, device=device)
matB = torch.rand(b, ldc, ldb, dtype=dtype, device=device)
subA = matA[:b, :m, :k]
subB = matB[:b, :k, :n]
torch.bmm(subA, subB)
# 'NT'
matA = torch.rand(b, ldc, lda, dtype=dtype, device=device).transpose(1, 2)
matB = torch.rand(b, ldc, ldb, dtype=dtype, device=device)
subA = matA[:b, :m, :k]
subB = matB[:b, :k, :n]
torch.bmm(subA, subB)
# 'TT'
matA = torch.rand(b, k, lda, dtype=dtype, device=device).transpose(1, 2)
matB = torch.rand(b, ldb, k, dtype=dtype, device=device).transpose(1, 2)
subA = matA[:b, :k, :m]
subB = matB[:b, :n, :k]
torch.bmm(subA, subB)
self.assertTrue(torch.cuda.tunable.is_enabled())
self.assertTrue(torch.cuda.tunable.tuning_is_enabled() is False)
untuned_filename = get_tunableop_untuned_filename()
# tuning the untuned GEMMs in file
torch.cuda.tunable.tuning_enable(True)
torch.cuda.tunable.record_untuned_enable(False)
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_duration(1)
torch.cuda.tunable.set_max_tuning_iterations(1)
ref_results = len(torch.cuda.tunable.get_results())
torch.cuda.tunable.tune_gemm_in_file(untuned_filename)
new_results = len(torch.cuda.tunable.get_results())
# This stores total number of cumulative results
total_num_results = new_results - ref_results
# There must be a new tuning results
self.assertEqual(total_num_results, 10)
results_filename = torch.cuda.tunable.get_filename()
self.assertTrue(os.path.exists(results_filename))
# Compare Param Signature of untuned and tuned results
ok = self._compare_untuned_tuned_entries()
self.assertTrue(ok)
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float32)
def test_ops_append_to_existing_file_tunableop(self, device, dtype):
"""If a TunableOp results file already exists (with matching Validator),
new results should be appended (not overwritten)."""
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
# Seed the existing results file with Validator lines + 1 result line
results_filename = torch.cuda.tunable.get_filename()
validators = torch.cuda.tunable.get_validators() # Iterable[Tuple[str, str]]
seed_lines = []
# Each (k, v) becomes a "Validator" line
for k, v in validators:
seed_lines.append(f"Validator,{k},{v}")
# One arbitrary, plausible matmul result line
seed_lines.append(
"GemmAndBiasTunableOp_float_TN,tn_768_32_1024_ld_1024_1024_768,"
"Gemm_Hipblaslt_220580,0.0103395"
)
with open(results_filename, "w") as f:
f.write("\n".join(seed_lines) + "\n")
# Count initial (non-Validator) lines
with open(results_filename) as f:
initial_content = f.read()
initial_lines = [
l for l in initial_content.split("\n")
if l and not l.startswith("Validator")
]
initial_count = len(initial_lines)
self.assertGreater(initial_count, 0) # we seeded 1 result line
# Perform ONE simple matmul
A = torch.randn(27, 43, device=device, dtype=dtype)
B = torch.randn(43, 39, device=device, dtype=dtype)
_ = torch.matmul(A, B)
# Verify that new results were appended to the same file
with open(results_filename) as f:
final_content = f.read()
final_lines = [
l for l in final_content.split("\n")
if l and not l.startswith("Validator")
]
final_count = len(final_lines)
self.assertGreater(final_count, initial_count)
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float32)
def test_offline_tuning_append_to_existing_file_tunableop(self, device, dtype):
"""If an offline tuning untuned file already exists,
new untuned GEMMs should be appended (not overwritten).
"""
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
# Enable offline tuning recording mode (record untuned, no tuning)
torch.cuda.tunable.tuning_enable(False)
torch.cuda.tunable.record_untuned_enable(True)
self.assertTrue(torch.cuda.tunable.record_untuned_is_enabled())
# Get the untuned file path
untuned_filename = get_tunableop_untuned_filename()
# Seed the existing untuned file with 1 entry
seed_lines = [
"GemmTunableOp_float_NT,nt_768_1024_512_ld_1024_1024_768"
]
with open(untuned_filename, "w") as f:
f.write("\n".join(seed_lines) + "\n")
# Count initial entries
with open(untuned_filename) as f:
initial_content = f.read()
initial_lines = [l.strip() for l in initial_content.split("\n") if l.strip()]
initial_count = len(initial_lines)
self.assertGreater(initial_count, 0) # we seeded 1 entry
# Perform a matmul with different dimensions
A = torch.randn(41, 59, device=device, dtype=dtype)
B = torch.randn(59, 31, device=device, dtype=dtype)
_ = torch.matmul(A, B)
# Verify that new untuned entries were appended to the same file
with open(untuned_filename) as f:
final_content = f.read()
final_lines = [l.strip() for l in final_content.split("\n") if l.strip()]
final_count = len(final_lines)
# The file should have more entries (appended), not the same or fewer (overwritten)
self.assertGreater(final_count, initial_count)
# Verify the seeded entry is still present (proving it wasn't overwritten)
self.assertIn(seed_lines[0], final_content)
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float32)
def test_matmul_empty_existing_file_tunableop(self, device, dtype):
""" Test that if an existing results file is empty/corrupted, then the default behaviour should hold """
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
results_filename = torch.cuda.tunable.get_filename()
# Pre-create an empty results file
with open(results_filename, 'w') as f:
pass # Empty file
# Use unique random inputs for this test
A = torch.randn(37, 53, device=device, dtype=dtype)
B = torch.randn(53, 29, device=device, dtype=dtype)
# Direct matmul
C = torch.matmul(A, B)
with open(results_filename) as f:
content = f.read()
self.assertIn("Validator", content)
result_lines = [l for l in content.split('\n')
if l and not l.startswith('Validator')]
self.assertGreater(len(result_lines), 0)
@onlyCUDA
@skipCUDAIfNotRocm
@runOnRocmArch(MI300_ARCH)
@dtypes(torch.torch.float8_e4m3fnuz)
def test_rowwise_scaled_gemm_numerics_tunableop(self, device, dtype):
# Test Scaled GEMM rowwise numerics
# Compute rowwise scaled_gemm via non-TunableOp code path
# compare it with rowwise scaled_gemm via TunableOp Default
# code path.
n = m = k = 16
matA = torch.randn((m, k), dtype=torch.half, device=device).to(dtype)
matB = torch.randn((n, k), dtype=torch.half, device=device).to(dtype).t()
scaleA = torch.randn((matA.shape[0], 1), device=device)
scaleB = torch.randn((1, matB.shape[1]), device=device)
ref_scaled_mm = torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.bfloat16)
with self._tunableop_ctx():
# Deactivate Tuning so that rowwise scaledGEMM fallbacks to Default
# code path in TunableOp.
torch.cuda.tunable.tuning_enable(False)
tuned_default_scaled_mm = torch._scaled_mm(matA, matB, scale_a=scaleA, scale_b=scaleB, out_dtype=torch.bfloat16)
delta = tuned_default_scaled_mm - ref_scaled_mm
self.assertTrue(torch.all(delta == 0))
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float)
def test_call_count_tunableop(self, device, dtype):
# Test that after tuning a GEMM in TunableOp, we only call the GEMM kernel once
# per PyTorch API invocation.
# We use the torch profiler to get the call counts on the kernels
# Supported only for: MM, batch MM, and GEMM with bias (linear)
from torch.profiler import profile, ProfilerActivity
with self._tunableop_ctx():
# set these to single iterations to keep it short but still exercise the code
torch.cuda.tunable.set_max_tuning_iterations(1)
b = 2
M = 10
# MM
A = torch.rand(M, M, device=device)
C = torch.mm(A, A)
# Linear - GEMM BIAS
X = torch.rand(M, M, device='cuda')
bias = torch.rand(M, device='cuda')
Y = torch.nn.functional.linear(X, A, bias)
# BMM
batch_A = torch.rand((b, M, M), device='cuda')
batch_C = torch.bmm(batch_A, batch_A)
kernel_count = 0
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof:
C = torch.mm(A, A)
Y = torch.nn.functional.linear(X, A, bias)
batch_C = torch.bmm(batch_A, batch_A)
# Check that after tuning, there was only one kernel
# launched per PyTorch API. The kernels have string
# that always starts with `Cijk*`
mm_key = 'Cijk'
events = prof.key_averages()
for evt in events:
if mm_key in evt.key:
self.assertEqual(evt.count, 1)
kernel_count = kernel_count + 1
# There must be exactly three kernels only
self.assertEqual(kernel_count, 3)
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float16)
def test_numerical_check_python_binding_tunableop(self, device, dtype):
with self._tunableop_ctx():
torch.cuda.tunable.enable(True)
torch.cuda.tunable.set_numerical_check_tolerances(True)
a = torch.randn(128, 128, device='cuda')
b = torch.randn(128, 128, device='cuda')
_ = a @ b
with self._tunableop_ctx():
torch.cuda.tunable.enable(True)
with self.assertRaisesRegex(RuntimeError, r"positive"):
torch.cuda.tunable.set_numerical_check_tolerances(True, -1e-5, 1e5)
with self.assertRaisesRegex(RuntimeError, r"positive"):
torch.cuda.tunable.set_numerical_check_tolerances(True, 1e-5, -1e5)
with self.assertRaisesRegex(RuntimeError, r"positive"):
torch.cuda.tunable.set_numerical_check_tolerances(True, -1e-5, -1e5)
@onlyCUDA
@skipCUDAIfNotRocm
@dtypes(torch.float16, torch.float32)
def test_numerical_check_accuracy_tunableop(self, device, dtype):
shapes = [(127, 193, 61), (251, 317, 73), (89, 149, 41)]
atol, rtol = 1e-2, 1e-1
for (m, k, n) in shapes:
a = torch.randn(m, k, device='cuda')
b = torch.randn(k, n, device='cuda')
torch.cuda.tunable.enable(False)
torch.cuda.tunable.set_numerical_check_tolerances(False)
C_baseline = a @ b
with self._tunableop_ctx():
torch.cuda.tunable.enable(True)
torch.cuda.tunable.set_numerical_check_tolerances(True, atol, rtol)
C_numeric = a @ b
self.assertTrue(torch.allclose(C_baseline, C_numeric, atol=atol, rtol=rtol))
@dtypes(torch.float, torch.complex64)
def test_matmul_out_kernel_errors_with_autograd(self, device, dtype):
a = torch.empty((256, 512), device=device, dtype=dtype, requires_grad=True).unsqueeze(0)
b = torch.empty((4, 128, 512), device=device, dtype=dtype, requires_grad=True).transpose(-1, -2)
c = torch.empty((256, 4, 128), device=device, dtype=dtype).movedim(1, 0)
torch.matmul(a.detach(), b.detach(), out=c)
with self.assertRaisesRegex(RuntimeError, "functions with out=... arguments don't support automatic differentiation"):
torch.matmul(a, b, out=c)
with torch.no_grad():
torch.matmul(a, b, out=c)
@dtypes(torch.float, torch.complex64)
def test_tensordot_out_kernel_errors_with_autograd(self, device, dtype):
a = torch.empty((4, 2), device=device, dtype=dtype, requires_grad=True)
b = torch.empty((2, 4), device=device, dtype=dtype, requires_grad=True)
c = torch.empty((2, 2), device=device, dtype=dtype, requires_grad=True)
d = torch.empty((4, 4), device=device, dtype=dtype, requires_grad=False)
err_msg = "the 'out' tensor was specified and requires gradients"
with torch.set_grad_enabled(True), self.assertRaisesRegex(RuntimeError, err_msg):
torch.tensordot(a, b, dims=([1], [0]), out=c)
with torch.set_grad_enabled(True):
torch.tensordot(a, b, dims=([1], [0]), out=d)
with torch.set_grad_enabled(False), warnings.catch_warnings(record=True) as w:
# Hack to avoid resize error for CUDA tensors as resize_cuda_ is different to resize_.
c.requires_grad = False
torch.tensordot(a, b, dims=([1], [0]), out=c)
self.assertEqual(len(w), 1)
# 4GB should do, but we run tests in parallel in CI, so let's be generous
@onlyCUDA
@largeTensorTest('16GB', device='cuda')
def test_large_bmm_mm_backward(self, device):
A = torch.randn([1024, 2, 1024], device="cuda").mT.contiguous().mT
B = torch.randn([1024, 65536], device="cuda", requires_grad=True)
G = torch.randn([1024, 2, 65536], device="cuda")
# Should not create an intermediary tensor of size [1024, 1024, 65536] (256GB of memory) and OOM
(A @ B).backward(G)
# 4GB should do, but we run tests in parallel in CI, so let's be generous
@onlyCUDA
@largeTensorTest('16GB', device='cuda')
def test_large_bmm_backward(self, device):
A = torch.randn([1024, 2, 1024], device="cuda").mT.contiguous().mT
B = torch.randn([1, 1024, 65536], device="cuda", requires_grad=True)
G = torch.randn([1024, 2, 65536], device="cuda")
# Should not create an intermediary tensor of size [1024, 1024, 65536] (256GB of memory) and OOM
(A @ B).backward(G)
def test_linear_algebra_scalar_raises(self, device) -> None:
m = torch.randn(5, 5, device=device)
v = torch.randn(5, device=device)
s = torch.tensor(7, device=device)
self.assertRaises(RuntimeError, lambda: torch.mv(m, s))
self.assertRaises(RuntimeError, lambda: torch.addmv(v, m, s))
@dtypes(torch.float32, torch.complex64)
def test_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.cross(x, y)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.cross(x, y, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# test for broadcastable inputs
x = torch.rand(1, 3, 2, dtype=dtype, device=device)
y = torch.rand(4, 3, 1, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float32, torch.complex64)
def test_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.cross(x, y, dim=1)
res2 = torch.cross(x, y, dim=-1)
res3 = torch.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.linalg.cross(x, y, dim=-1)
res3 = torch.linalg.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
def test_cross_error(self, device):
x = torch.randn(4, 3, device=device)
y = torch.randn(4, 3, device=device)
with self.assertRaisesRegex(RuntimeError, "input tensor and the written-to tensor refer to a single memory location"):
torch.cross(x, y, out=x)
with self.assertRaisesRegex(RuntimeError, "input tensor and the written-to tensor refer to a single memory location"):
torch.cross(y, x, out=x)
with self.assertRaisesRegex(RuntimeError, "input tensor and the written-to tensor refer to a single memory location"):
torch.linalg.cross(x, y, out=x)
with self.assertRaisesRegex(RuntimeError, "input tensor and the written-to tensor refer to a single memory location"):
torch.linalg.cross(y, x, out=x)
def test_renorm(self, device):
m1 = torch.randn(20, 20, device=device) # big enough to exercise vectorized path
res1 = torch.tensor((), device=device)
def renorm(matrix, value, dim, max_norm):
m1 = matrix.transpose(dim, 0).contiguous()
# collapse non-dim dimensions.
m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0))))
norms = m2.norm(value, 1, True)
# clip
new_norms = norms.clone()
new_norms[torch.gt(norms, max_norm)] = max_norm
new_norms.div_(norms.add_(1e-7))
# renormalize
m1.mul_(new_norms.expand_as(m1))
return m1.transpose(dim, 0)
# note that the axis fed to torch.renorm is different (2~=1)
maxnorm = m1.norm(2, 1).mean()
m2 = renorm(m1, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
self.assertEqual(m1, m2, atol=1e-5, rtol=0)
self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), atol=1e-5, rtol=0)
m1 = torch.randn(3, 4, 5, device=device)
m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
maxnorm = m2.norm(2, 0).mean()
m2 = renorm(m2, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
self.assertEqual(m3, m2)
self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_ormqr(self, device, dtype):
def run_test(batch, m, n, fortran_contiguous):
A = make_tensor((*batch, m, n), dtype=dtype, device=device)
reflectors, tau = torch.geqrf(A)
if not fortran_contiguous:
self.assertTrue(reflectors.mT.is_contiguous())
reflectors = reflectors.contiguous()
# Q is of size m x m
Q, _ = torch.linalg.qr(A, mode='complete')
C_right = make_tensor((*batch, m, n), dtype=dtype, device=device)
C_left = make_tensor((*batch, n, m), dtype=dtype, device=device)
expected = Q @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=False)
self.assertEqual(expected, actual)
expected = C_left @ Q
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=False)
self.assertEqual(expected, actual)
expected = Q.mH @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=True)
self.assertEqual(expected, actual)
expected = C_left @ Q.mH
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=True)
self.assertEqual(expected, actual)
# if tau is all zeros then the implicit matrix Q is the identity matrix
# so the actual result should be C_right in this case
zero_tau = torch.zeros_like(tau)
actual = torch.ormqr(reflectors, zero_tau, C_right, left=True, transpose=False)
self.assertEqual(C_right, actual)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n), fortran_contiguous in product(batches, product(ns, ns), [True, False]):
run_test(batch, m, n, fortran_contiguous)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_ormqr_errors_and_warnings(self, device, dtype):
test_cases = [
# input1 size, input2 size, input3 size, left, error regex
((10,), (2,), (2,), True, r"input must have at least 2 dimensions"),
((2, 2), (2,), (2,), True, r"other must have at least 2 dimensions"),
((6, 6), (5,), (5, 5), True, r"other.shape\[-2\] must be equal to input.shape\[-2\]"),
((1, 2, 2), (2, 2), (1, 2, 2), True, r"batch dimensions of tau to be equal to input.shape\[:-2\]"),
((1, 2, 2), (1, 2), (2, 2, 2), True, r"batch dimensions of other to be equal to input.shape\[:-2\]"),
((2, 4, 3), (2, 2), (2, 3, 10), True, r"torch.ormqr: other.shape\[-2\] must be equal to input.shape\[-2\]"),
((2, 4, 3), (2, 2), (2, 3, 10), False, r"torch.ormqr: other.shape\[-1\] must be equal to input.shape\[-2\]")
]
for a_size, tau_size, c_size, left, error_regex in test_cases:
a = make_tensor(a_size, dtype=dtype, device=device)
tau = make_tensor(tau_size, dtype=dtype, device=device)
c = make_tensor(c_size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.ormqr(a, tau, c, left)
def test_blas_empty(self, device):
def fn(torchfn, *args, test_out=False, **kwargs):
def call_torch_fn(*args, **kwargs):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args), **kwargs)
result = call_torch_fn(*args, **kwargs)
if not test_out:
return result
else:
out = torch.full_like(result, math.nan)
out1 = call_torch_fn(*args, **kwargs, out=out)
return out
# mm, addmm
self.assertEqual((0, 0), fn(torch.mm, (0, 0), (0, 0)).shape)
self.assertEqual((0, 5), fn(torch.mm, (0, 0), (0, 5)).shape)
self.assertEqual((5, 0), fn(torch.mm, (5, 0), (0, 0)).shape)
self.assertEqual((3, 0), fn(torch.mm, (3, 2), (2, 0)).shape)
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6)))
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6), test_out=True))
self.assertEqual((0, 0), fn(torch.addmm, (0, 0), (0, 0), (0, 0)).shape)
self.assertEqual((0, 1), fn(torch.addmm, (1, ), (0, 17), (17, 1)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6)))
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6), test_out=True))
# mv, addmv
self.assertEqual((0,), fn(torch.mv, (0, 0), (0,)).shape)
self.assertEqual((0,), fn(torch.mv, (0, 2), (2,)).shape)
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,)))
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,), test_out=True))
self.assertEqual((0,), fn(torch.addmv, (0,), (0, 0), (0,)).shape)
t = torch.randn((3,), device=device)
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,)))
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,), test_out=True))
# bmm, baddbmm
self.assertEqual((0, 0, 0), fn(torch.bmm, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.bmm, (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.bmm, (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6)))
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6), test_out=True))
self.assertEqual((0, 0, 0), fn(torch.baddbmm, (0, 0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.baddbmm, (3, 0, 5), (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.baddbmm, (0, 5, 6), (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual((3, 5, 6), fn(torch.baddbmm, (3, 5, 6), (3, 5, 0), (3, 0, 6)).shape)
c = torch.arange(30, dtype=torch.float32, device=device).reshape(3, 2, 5)
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2)) # Issue #33467
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2, test_out=True)) # Issue #33467
# addbmm
self.assertEqual((0, 0), fn(torch.addbmm, (0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((0, 5), fn(torch.addbmm, (0, 5), (3, 0, 0), (3, 0, 5)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6)))
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6), test_out=True))
# matmul
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,), test_out=True))
self.assertEqual((0, 0), fn(torch.matmul, (0, 0), (0, 0)).shape)
self.assertEqual((0, 0, 0), fn(torch.matmul, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((5, 0, 0), fn(torch.matmul, (5, 0, 0), (5, 0, 0)).shape)
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4)))
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4), test_out=True))
# dot
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,), test_out=True))
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_and_complex_types_and(
torch.half,
*[torch.bfloat16] if SM53OrLater else []
))
@dtypes(*all_types_and_complex_and(torch.bfloat16))
def test_corner_cases_of_cublasltmatmul(self, device, dtype):
# common case
M = torch.randn(128, device=device).to(dtype)
m1 = torch.randn(2048, 2400, device=device).to(dtype)
m2 = torch.randn(128, 2400, device=device).to(dtype)
torch.nn.functional.linear(m1, m2, M)
# Ntrans_B has ld >> rows
m1 = torch.rand([128, 2400]).to(dtype).to(device).t()
m2 = torch.rand([2048, 25272]).to(dtype).to(device).t()[21940:24340]
M = torch.rand([128]).to(dtype).to(device)
torch.addmm(M, m2.t(), m1)
# trans_A has ld >> rows
m1 = torch.rand([128, 25272]).to(dtype).to(device)[:, 21940:24340].t()
m2 = torch.randn(2048, 2400, device=device).to(dtype)
M = torch.rand([128]).to(dtype).to(device)
torch.addmm(M, m2, m1)
# large tensor dim > 65535
M = torch.randn(16, device=device).to(dtype)
m1 = torch.randn(32, 131071 , device=device).to(dtype)
m2 = torch.randn(16, 131071, device=device).to(dtype)
torch.nn.functional.linear(m1, m2, M)
@dtypesIfCUDA(*floating_and_complex_types_and(
torch.half,
*[torch.bfloat16] if SM53OrLater else []
))
@dtypes(*all_types_and_complex_and(torch.bfloat16, torch.half))
def test_blas_alpha_beta_empty(self, device, dtype):
# This test is disabled on CUDA 9 due to:
# See: https://github.com/pytorch/pytorch/issues/31006
if dtype is torch.bfloat16 and self.device_type == 'xla':
# TODO (@zasdfgbnm): this causes the following error on test
# TestTorchDeviceTypeXLA.test_blas_alpha_beta_empty_xla_bfloat16:
#
# RuntimeError: _th_equal not supported on CPUType for BFloat16
return
# ensure beta is respected
value = 11
input = torch.full((2,), value, dtype=dtype, device=device)
mat = torch.ones((2, 0), dtype=dtype, device=device)
vec = torch.ones((0,), dtype=dtype, device=device)
out = torch.empty((2,), dtype=dtype, device=device)
if dtype.is_complex:
alpha = 6 + 7j
beta = 3 + 4j
else:
alpha = 6
beta = 3
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta, out=out))
# torch.addmm
input = torch.full((2, 3), value, dtype=dtype, device=device)
mat2 = torch.ones((0, 3), dtype=dtype, device=device)
out = torch.empty((2, 3), dtype=dtype, device=device)
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta, out=out))
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_blas_nan_out(self, device, dtype):
# These functions should work correctly with NaN filled outputs,
# but need special handling, see [NOTE: cpu_zero]
b = 3
n = 5
m = 7
p = 11
# torch.mv
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), float('nan'), device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
self.assertEqual(0, torch.isnan(torch.mv(nm, _m)).sum())
# torch.mm
mp = torch.randn((p, m), device=device).t()
np_out = torch.full((n, p), float('nan'), device=device)
self.assertEqual(torch.mm(nm, mp), torch.mm(nm, mp, out=np_out))
# torch.bmm
bnm = torch.randn((b, m, n), device=device).transpose(1, 2)
bmp = torch.randn((b, p, m), device=device).transpose(1, 2)
bnp_out = torch.full((b, n, p), float('nan'), device=device)
self.assertEqual(torch.bmm(bnm, bmp), torch.bmm(bnm, bmp, out=bnp_out))
@onlyCPU # not supported by CUBLAS
def test_blas_mv_large_input(self, device):
# This would previously fail if the allocated output had NaNs, see:
# https://github.com/pytorch/pytorch/issues/31663 and [NOTE: cpu_zero]
n = 3000
m = 200
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), 0., device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
@onlyCPU
def test_renorm_ps(self, device):
# full reduction
x = torch.randn(5, 5)
xn = x.numpy()
for p in [1, 2, 3, 4, inf]:
res = x.renorm(p, 1, 1)
expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)
self.assertEqual(res, expected, msg=f"renorm failed for {p}-norm")
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_householder_product(self, device, dtype):
def generate_reflectors_and_tau(A):
"""
This function uses numpy.linalg.qr with mode "raw" to extract output of LAPACK's geqrf.
There is torch.geqrf function but it doesn't work with complex-valued input.
"""
if A.numel() > 0:
A_cpu = A.cpu()
flattened_batch_shape = [-1, *A_cpu.shape[-2:]]
reflectors = torch.empty_like(A_cpu).view(*flattened_batch_shape)
tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]]
tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1])
for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau):
reflectors_tmp, tau_i[:] = (
torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in np.linalg.qr(A_i, mode='raw')
)
reflectors_i[:] = reflectors_tmp.T
reflectors = reflectors.view(*A_cpu.shape)
tau = tau.view(tau_shape)
return reflectors.to(A.device), tau.to(A.device)
reflectors = torch.empty_like(A)
tau = torch.empty(*A.shape[:-2], A.shape[-1], dtype=dtype, device=device)
return reflectors, tau
def run_test(shape):
A = torch.randn(*shape, dtype=dtype, device=device)
reflectors, tau = generate_reflectors_and_tau(A)
expected, _ = torch.linalg.qr(A)
actual = torch.linalg.householder_product(reflectors, tau)
# torch.linalg.qr does not work correctly for zero batch dimension tensors
# see https://github.com/pytorch/pytorch/issues/50576
if (A.numel() > 0):
self.assertEqual(expected, actual)
else:
self.assertTrue(actual.shape == shape)
# if tau is empty and A is not the result should be a matrix with ones on the diagonal
if (A.numel() > 0):
tau_empty = torch.empty(*shape[:-2], 0, dtype=dtype, device=device)
identity_mat = torch.zeros_like(reflectors)
identity_mat.diagonal(dim1=-1, dim2=-2)[:] = 1
actual = torch.linalg.householder_product(reflectors, tau_empty)
self.assertEqual(actual, identity_mat)
out = torch.empty_like(A)
ans = torch.linalg.householder_product(reflectors, tau, out=out)
self.assertEqual(ans, out)
if (A.numel() > 0):
self.assertEqual(expected, out)
shapes = [(0, 0), (5, 0), # Empty matrix
(5, 5), (5, 3), # Single matrix
(0, 0, 0), (0, 5, 5), (0, 5, 3), # Zero batch dimension tensors
(2, 5, 5), (2, 5, 3), # 3-dim tensors
(2, 1, 5, 5), (2, 1, 5, 3)] # 4-dim tensors
for shape in shapes:
run_test(shape)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
def test_householder_product_errors_and_warnings(self, device):
test_cases = [
# input1 size, input2 size, error regex
((10,), (2,), r"input must have at least 2 dimensions"),
((10, 6), (20,), r"input.shape\[-1\] must be greater than or equal to tau.shape\[-1\]"),
((6, 10), (5,), r"input.shape\[-2\] must be greater than or equal to input.shape\[-1\]"),
]
for a_size, tau_size, error_regex in test_cases:
a = torch.rand(*a_size, device=device)
tau = torch.rand(*tau_size, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.linalg.householder_product(a, tau)
# if out tensor with wrong shape is passed a warning is given
reflectors = torch.randn(3, 3, device=device)
tau = torch.randn(3, device=device)
out = torch.empty(2, 3, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.householder_product(reflectors, tau, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(reflectors).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.householder_product(reflectors, tau, out=out)
with self.assertRaisesRegex(RuntimeError, "tau dtype Int does not match input dtype"):
torch.linalg.householder_product(reflectors, tau.to(torch.int))
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(reflectors).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau, out=out)
# device of tau and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
tau = tau.to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau)
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2})
@skipCUDAIfNoMagmaAndNoCusolver
@skipIfTorchDynamo("Runtime error with torch._C._linalg.linalg_lu_factor")
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_linalg_lu_family(self, device, dtype):
# Tests torch.lu
# torch.linalg.lu_factor
# torch.linalg.lu_factor_ex
# torch.lu_unpack
# torch.linalg.lu_solve
# torch.linalg.solve
make_arg_full = partial(make_fullrank_matrices_with_distinct_singular_values, device=device, dtype=dtype)
make_arg = partial(make_tensor, device=device, dtype=dtype)
def run_test(A, pivot, singular, fn):
k = min(A.shape[-2:])
batch = A.shape[:-2]
check_errors = (fn == torch.linalg.lu_factor)
if singular and check_errors:
# It may or may not throw as the LU decomposition without pivoting
# may still succeed for singular matrices
try:
LU, pivots = fn(A, pivot=pivot)
except RuntimeError:
return
else:
LU, pivots = fn(A, pivot=pivot)[:2]
self.assertEqual(LU.size(), A.shape)
self.assertEqual(pivots.size(), batch + (k,))
if not pivot:
self.assertEqual(pivots, torch.arange(1, 1 + k, device=device, dtype=torch.int32).expand(batch + (k, )))
P, L, U = torch.lu_unpack(LU, pivots, unpack_pivots=pivot)
self.assertEqual(P @ L @ U if pivot else L @ U, A)
PLU = torch.linalg.lu(A, pivot=pivot)
self.assertEqual(P, PLU.P)
self.assertEqual(L, PLU.L)
self.assertEqual(U, PLU.U)
if not singular and A.size(-2) == A.size(-1):
nrhs = ((), (1,), (3,))
for left, rhs in product((True, False), nrhs):
# Vector case when left = False is not allowed
if not left and rhs == ():
continue
if left:
shape_B = A.shape[:-1] + rhs
else:
shape_B = A.shape[:-2] + rhs + A.shape[-1:]
B = make_arg(shape_B)
# Test linalg.lu_solve. It does not support vectors as rhs
# See https://github.com/pytorch/pytorch/pull/74045#issuecomment-1112304913
if rhs != ():
for adjoint in (True, False):
X = torch.linalg.lu_solve(LU, pivots, B, left=left, adjoint=adjoint)
A_adj = A.mH if adjoint else A
if left:
self.assertEqual(B, A_adj @ X)
else:
self.assertEqual(B, X @ A_adj)
# Test linalg.solve
X = torch.linalg.solve(A, B, left=left)
X_ = X.unsqueeze(-1) if rhs == () else X
B_ = B.unsqueeze(-1) if rhs == () else B
if left:
self.assertEqual(B_, A @ X_)
else:
self.assertEqual(B_, X_ @ A)
sizes = ((3, 3), (5, 5), (4, 2), (3, 4), (0, 0), (0, 1), (1, 0))
batches = ((0,), (), (1,), (2,), (3,), (1, 0), (3, 5))
# Non pivoting just implemented for CUDA
pivots = (True, False) if self.device_type == "cuda" else (True,)
fns = (partial(torch.lu, get_infos=True), torch.linalg.lu_factor, torch.linalg.lu_factor_ex)
for ms, batch, pivot, singular, fn in itertools.product(sizes, batches, pivots, (True, False), fns):
shape = batch + ms
A = make_arg(shape) if singular else make_arg_full(*shape)
# Just do one of them on singular matrices
if A.numel() == 0 and not singular:
continue
run_test(A, pivot, singular, fn)
# Reproducer of a magma bug,
# see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
# This is also a bug in cuSOLVER < 11.3
if (dtype == torch.double
and singular):
A = torch.ones(batch + ms, dtype=dtype, device=device)
run_test(A, pivot, singular, fn)
# Info should be positive for rank deficient matrices
A = torch.ones(5, 3, 3, device=device)
self.assertTrue((torch.linalg.lu_factor_ex(A, pivot=True).info >= 0).all())
if self.device_type == 'cpu':
# Error checking, no pivoting variant on CPU
fns = [torch.lu, torch.linalg.lu_factor, torch.linalg.lu_factor_ex, torch.linalg.lu]
for f in fns:
with self.assertRaisesRegex(RuntimeError, 'LU without pivoting is not implemented on the CPU'):
f(torch.empty(1, 2, 2), pivot=False)
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2})
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@setLinalgBackendsToDefaultFinally
@dtypes(*floating_and_complex_types())
def test_linalg_lu_solve(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
backends = ["default"]
if torch.device(device).type == 'cuda':
if torch.cuda.has_magma:
backends.append("magma")
if has_cusolver():
backends.append("cusolver")
def gen_matrices():
rhs = 3
ns = (5, 2, 0)
batches = ((), (0,), (1,), (2,), (2, 1), (0, 2))
for batch, n in product(batches, ns):
yield make_arg(batch + (n, n)), make_arg(batch + (n, rhs))
# Shapes to exercise all the paths
shapes = ((1, 64), (2, 128), (1025, 2))
for b, n in shapes:
yield make_arg((b, n, n)), make_arg((b, n, rhs))
for A, B in gen_matrices():
LU, pivots = torch.linalg.lu_factor(A)
for backend in backends:
torch.backends.cuda.preferred_linalg_library(backend)
for left, adjoint in product((True, False), repeat=2):
B_left = B if left else B.mT
X = torch.linalg.lu_solve(LU, pivots, B_left, left=left, adjoint=adjoint)
A_adj = A.mH if adjoint else A
if left:
self.assertEqual(B_left, A_adj @ X)
else:
self.assertEqual(B_left, X @ A_adj)
@onlyCPU
@dtypes(*floating_and_complex_types())
def test_linalg_lu_cpu_errors(self, device, dtype):
# Square tests
sample = torch.randn(3, 2, 2, device=device, dtype=dtype)
B = torch.randn(3, 2, 2, device=device, dtype=dtype)
LU, pivots = torch.linalg.lu_factor(sample)
# This should run without issues
torch.linalg.lu_solve(LU, pivots, B, adjoint=True)
torch.lu_unpack(LU, pivots)
pivots[0] = 0
with self.assertRaisesRegex(RuntimeError, r"greater or equal to 1"):
torch.linalg.lu_solve(LU, pivots, B, adjoint=True)
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
pivots[0] = 3
with self.assertRaisesRegex(RuntimeError, r"smaller or equal to LU.size\(-2\)"):
torch.linalg.lu_solve(LU, pivots, B, adjoint=True)
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
# Rectangular tests
sample = torch.randn(3, 4, 2, device=device, dtype=dtype)
B = torch.randn(3, 4, 2, device=device, dtype=dtype)
LU, pivots = torch.linalg.lu_factor(sample)
# This should run without issues
torch.lu_unpack(LU, pivots)
pivots[0] = 0
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
pivots[0] = 5
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
# Rectangular tests
sample = torch.randn(2, 3, 5, device=device, dtype=dtype)
B = torch.randn(2, 3, 5, device=device, dtype=dtype)
LU, pivots = torch.linalg.lu_factor(sample)
# This should run without issues
torch.lu_unpack(LU, pivots)
pivots[0] = 0
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
pivots[0] = 4
with self.assertRaisesRegex(RuntimeError, r"between 1 and LU.size\(-2\)."):
torch.lu_unpack(LU, pivots)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double)
def test_lu_unpack_check_input(self, device, dtype):
x = torch.rand(5, 5, 5, device=device, dtype=dtype)
lu_data, lu_pivots = torch.linalg.lu_factor(x)
with self.assertRaisesRegex(RuntimeError, "torch.int32 dtype"):
torch.lu_unpack(lu_data, lu_pivots.long())
# check that once flags are unset, Nones are returned
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False)
self.assertTrue(l.numel() == 0 and u.numel() == 0)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_pivots=False)
self.assertTrue(p.numel() == 0)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False, unpack_pivots=False)
self.assertTrue(p.numel() == 0 and l.numel() == 0 and u.numel() == 0)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lobpcg_basic(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'basic')
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lobpcg_ortho(self, device, dtype):
if torch.version.hip:
torch.backends.cuda.preferred_linalg_library('magma')
self._test_lobpcg_method(device, dtype, 'ortho')
if torch.version.hip:
torch.backends.cuda.preferred_linalg_library('default')
def _test_lobpcg_method(self, device, dtype, method):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
def test_tracker(worker):
k = worker.iparams['k']
nc = worker.ivars['converged_count']
# Regression test for PR #152789 (fixes issue #101075)
# Ensure rerr is non-negative at each iteration
rerr = worker.tvars['rerr']
self.assertGreaterEqual(rerr.min(), 0.)
if k <= nc:
tol = worker.fparams['tol']
rerr = worker.tvars['rerr']
X = worker.X
E = worker.E
B = worker.B
A = worker.A
dtype = X.dtype
device = X.device
# Check convergence
self.assertLessEqual(rerr[:k].max(), tol)
# Check B-orthogonality
I = torch.eye(k, k, dtype=dtype, device=device)
self.assertEqual(qform(B, X[:, :k]), I)
# Check block equation
self.assertEqual(qform(A, X[:, :k]) / E[:k], I, atol=0.2, rtol=0)
orig_lobpcg = lobpcg
def lobpcg(*args, **kwargs):
kwargs['tracker'] = test_tracker
kwargs['niter'] = 1000
kwargs['method'] = method
kwargs['tol'] = 1e-8
return orig_lobpcg(*args, **kwargs)
prec = 5e-4
mm = torch.matmul
# Regression test for PR #152789 (fixes issue #101075)
# https://github.com/pytorch/pytorch/issues/101075#issuecomment-1548483685
# Demonstrates the original bug: negative residuals in the 2nd iteration
A = torch.Tensor([
[-0.56142016, 0.29639858, -0.16059532],
[0.29639858, -0.69093563, 0.26248195],
[-0.16059532, 0.26248195, -0.40236716]
])
B = torch.Tensor([
[1.89193057, -0.08174309, -0.3557846],
[-0.08174309, 1.64589643, -0.46436347],
[-0.3557846, -0.46436347, 1.67404367]
])
X = torch.Tensor([[0.61591334, 0.63823109, 0.46185694]]).T
E, V = lobpcg(A=A, B=B, X=X, k=1)
self.assertEqual(matmul(A, V), mm(matmul(B, V), E.diag_embed()), atol=prec, rtol=0)
# check dense input
for batches in [(), (2,), (2, 3)]:
for m, n, k in [
(9, 3, 1),
(9, 3, 2),
(9, 2, 2),
(100, 15, 5),
]:
# skip tests that are known to fail with the basic
# LOBPCG method due to calling cholesky on singular
# input
if method == 'basic' and (m, n, k) in [(9, 2, 2), (100, 15, 5)]:
continue
A = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
B = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E.shape, batches + (k,))
self.assertEqual(V.shape, batches + (m, k))
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
e = torch.linalg.eigvalsh(A)
e_smallest = e[..., :k]
self.assertEqual(E, e_smallest)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
e_largest, _ = torch.sort(e[..., -k:], descending=True)
self.assertEqual(E, e_largest, atol=prec, rtol=0)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), mm(matmul(B, V), E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
# check sparse input
for m, n, k, density in [
(5, 1, 1, 0.8),
(9, 3, 2, 0.5),
(100, 1, 1, 0.1),
(1000, 7, 3, 0.01),
]:
# skip tests that are known to fail with the basic LOBCG
# method due to insufficient accuracy
if method == 'basic' and (m, n, k, density) in [(1000, 7, 3, 0.01)]:
continue
A = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
B = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
A_eigenvalues = torch.arange(1, m + 1, dtype=dtype) / m
e_smallest = A_eigenvalues[..., :k]
e_largest, _ = torch.sort(A_eigenvalues[..., -k:], descending=True)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E, e_smallest)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
self.assertEqual(E, e_largest)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), matmul(B, mm(V, E.diag_embed())), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_torchscript(self, device, dtype):
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
lobpcg = torch.jit.script(torch.lobpcg)
m = 500
k = 5
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
E1, V1 = lobpcg(A1, X=X1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
self.assertLess(eq_err, 1e-6)
@unittest.skipIf(not TEST_SCIPY or (TEST_SCIPY and version.parse(scipy.__version__) < version.parse('1.4.1')),
"Scipy not found or older than 1.4.1")
@skipCPUIfNoLapack
@skipIfTorchDynamo("fails in tracing scipy.sparse.lobpcg")
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_scipy(self, device, dtype):
"""Compare torch and scipy.sparse.linalg implementations of lobpcg
"""
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
def toscipy(A):
if A.layout == torch.sparse_coo:
values = A.coalesce().values().cpu().numpy().copy()
indices = A.coalesce().indices().cpu().numpy().copy()
return scipy.sparse.coo_matrix((values, (indices[0], indices[1])), A.shape)
return A.cpu().numpy().copy()
niter = 1000
repeat = 10
m = 500 # size of the square matrix
k = 7 # the number of requested eigenpairs
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
B1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
A2 = toscipy(A1)
B2 = toscipy(B1)
X2 = toscipy(X1)
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
tol = 1e-8
# tol for scipy lobpcg will be chosen so that the number of
# iterations will be equal or very close to pytorch lobpcg
# (that is around 170-180)
# Standard eigenvalue problem
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=1.1 * tol)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
E2a, V2a = scipy_lobpcg(A2, X2, maxiter=niter, largest=False)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # std
self.assertLess(eq_err_scipy, 1e-6) # std
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Generalized eigenvalue problem
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, B=B1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=39 * tol)
E2a, V2a = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=False)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
eq_err = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # general
self.assertLess(eq_err_scipy, 1e-6) # general
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Timings
elapsed_ortho = 0
elapsed_ortho_general = 0
elapsed_scipy = 0
elapsed_general_scipy = 0
for _ in range(repeat):
start = time.time()
torch.lobpcg(A1, X=X1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho += end - start
start = time.time()
torch.lobpcg(A1, X=X1, B=B1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho_general += end - start
start = time.time()
scipy_lobpcg(A2, X2, maxiter=niter, tol=1.1 * tol)
end = time.time()
elapsed_scipy += end - start
start = time.time()
scipy_lobpcg(A2, X2, B=B2, maxiter=niter, tol=39 * tol)
end = time.time()
elapsed_general_scipy += end - start
elapsed_ortho_ms = 1000.0 * elapsed_ortho / repeat
elapsed_ortho_general_ms = 1000.0 * elapsed_ortho_general / repeat
elapsed_scipy_ms = 1000.0 * elapsed_scipy / repeat
elapsed_general_scipy_ms = 1000.0 * elapsed_general_scipy / repeat
print(f'''
CPU timings: torch.lobpcg vs scipy.sparse.linalg.lobpcg
-------------------------------------------------------
| standard | generalized | method
torch.lobpcg | {elapsed_ortho_ms:10.2f} | {elapsed_ortho_general_ms:10.2f} | ortho
scipy_lobpcg | {elapsed_scipy_ms:10.2f} | {elapsed_general_scipy_ms:10.2f} | N/A
-(input size: {m:4}, eigenpairs:{k:2}, units: ms per call)-
''')
# Handling of very small tolerance
tol = 1e-100
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1 = len(lambdas1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2 = len(lambdas2)
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [standard]:', msg)
iters2 = -1
eq_err_scipy = -1
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, B=B1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1_general = len(lambdas1)
eq_err_general = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2_general = len(lambdas2)
eq_err_general_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [generalized]:', msg)
iters2_general = -1
eq_err_general_scipy = -1
print(f'''\
Handling of small tol={tol:6.0e}: torch.lobpcg vs scipy.sparse.linalg.lobpcg
----------------------------------------------------------------------------
| standard | generalized | niter | method
torch.lobpcg | {eq_err:10.2e} | {eq_err_general:10.2e} | {iters1:6} | ortho
scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:6} | N/A
---(input size: {m:4}, eigenpairs:{k:2}, units: relative error, maxiter={niter:4})---
''')
def _test_addmm_addmv(self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False, activation=None):
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16, torch.half}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
if activation == "gelu":
res1 = f(t, m, v, alpha=alpha, beta=beta, use_gelu=True)
else:
res1 = f(t, m, v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, math.nan)
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
if activation == "gelu":
f(t, m, v, alpha=alpha, beta=beta, out=res2, use_gelu=True)
else:
f(t, m, v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
if activation == "relu":
res3 = res3 * (res3 > 0)
elif activation == "gelu":
res3_t = torch.from_numpy(res3).to(dtype)
approximate = "tanh" if t.is_cuda else "none"
res3_t = torch.nn.functional.gelu(res3_t, approximate=approximate)
res3 = res3_t.to(numpy_dtype).cpu().numpy()
else:
assert activation is None, f"unsupported activation {activation}"
res3 = torch.from_numpy(res3).to(dtype)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@precisionOverride({torch.bfloat16: 1e-0, torch.half: 1e-3, torch.float: 1e-4, torch.double: 1e-8,
torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.bfloat16] if TEST_WITH_ROCM or SM53OrLater else [],
torch.half))
@dtypes(torch.bfloat16, torch.half, torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_addmv(self, device, dtype):
# have to use torch.randn(...).to(bfloat16) instead of
# torch.randn(..., dtype=bfloat16). randn does not support
# bfloat16 yet.
# "*0.2" to reduce errors for low precision
ts = [
0.2 * torch.randn(50, device=device).to(dtype),
0.2 * torch.randn(1, device=device).to(dtype).expand(50),
]
vs = [
0.2 * torch.randn(100, device=device).to(dtype),
0.2 * torch.ones(1, device=device).to(dtype).expand(100), # to reduce errors for low precision
]
ms = [
# 0d
0.2 * torch.ones((), device=device).to(dtype).expand(50, 100), # to reduce errors for low precision
# 1d
0.2 * torch.randn((1, 100), device=device).to(dtype).expand(50, 100),
# this initialization reduces errors for low precision for broadcasted matrices
# by making sure that intermediate and result values are exactly representable
# in low precision type
0.2 * torch.randint(3, (50, 1), dtype=torch.float, device=device).to(dtype).expand(50, 100),
# 2d
0.2 * torch.randn((50, 100), device=device).to(dtype),
0.2 * torch.randn((100, 50), device=device).to(dtype).t(),
]
for m, v, t in itertools.product(ms, vs, ts):
self._test_addmm_addmv(torch.addmv, t, m, v)
# Test beta=0, t=nan
t = torch.full((50,), math.nan, device=device).to(dtype)
for m, v in itertools.product(ms, vs):
self._test_addmm_addmv(torch.addmv, t, m, v, beta=0)
@dtypesIfCUDA(*floating_types_and(*[torch.bfloat16] if TEST_WITH_ROCM or
SM53OrLater else []))
@dtypes(torch.float, torch.double)
def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype):
# tests (o, s)*(s). o is output size, s is summed size.
o = 5
s = 3
a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s)
x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype)
y_data = torch.ones(o, device=device, dtype=dtype)
control = torch.tensor([15., 33., 51., 69., 87.], device=device, dtype=dtype)
def _test(row_major, incx, incy, lda_tail):
if row_major:
a_storage = torch.full((o, s + lda_tail), float('nan'), device=device, dtype=dtype)
else:
a_storage = torch.full((s, o + lda_tail), float('nan'), device=device, dtype=dtype).permute(1, 0)
a = a_storage[:o, :s].copy_(a_data)
x_storage = torch.full((s, incx), float('nan'), device=device, dtype=dtype)
x = x_storage[:, 0].copy_(x_data)
y_storage = torch.full((o, incy), float('nan'), device=device, dtype=dtype)
y = y_storage[:, 0].copy_(y_data)
self._test_addmm_addmv(torch.addmv, y, a, x)
for row_major, incx, incy, lda_tail in itertools.product((False, True), (1, 2), (1, 2), (0, 1)):
_test(row_major, incx, incy, lda_tail)
def _test_addmm_impl(self, func, activation, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, activation=activation)
# vector (or with 1-len dims in shape[:-1])/matrix-shaped bias
# and beta=1 result in epilogue fusion in CUDA
V = torch.randn(25, device=device).to(dtype)
for c in (V, V.unsqueeze(0), M):
self._test_addmm_addmv(func, c, m1, m2, beta=1, activation=activation)
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, activation=activation)
# Test beta=0, M=nan
M = torch.full((10, 25), math.nan, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(func, M, m1, m2, beta=0, activation=activation)
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
for c, beta in itertools.product((M, V, V.unsqueeze(0)), (0, 1)):
# beta=1 to test epilogue fusions with either vector or matrix input
self._test_addmm_addmv(func, c, m1, m2, beta=beta, transpose_out=t4, activation=activation)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfMPS(torch.float32)
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.bfloat16] if TEST_WITH_ROCM or SM53OrLater else []))
@dtypes(*floating_and_complex_types_and(torch.bfloat16, torch.half))
@tf32_on_and_off(0.05)
@reduced_f32_on_and_off(0.05)
def test_addmm(self, device, dtype):
self._test_addmm_impl(torch.addmm, None, device, dtype)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 5e-2,
torch.half: 5e-2, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(
*[torch.bfloat16, torch.half] if TEST_WITH_ROCM or SM53OrLater else []))
@dtypes(*floating_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
@reduced_f32_on_and_off(0.05)
def test_addmm_relu(self, device, dtype):
self._test_addmm_impl(torch._addmm_activation, "relu", device, dtype)
@onlyCUDA
@skipCUDAIfNotRocm
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 5e-2,
torch.half: 5e-2, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(
*[torch.bfloat16, torch.half] if TEST_WITH_ROCM or SM53OrLater else []))
@dtypes(*floating_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
@reduced_f32_on_and_off(0.05)
def test_addmm_relu_tunableop_rocm(self, device, dtype):
with self._tunableop_ctx():
torch.cuda.tunable.set_rotating_buffer_size(0)
torch.cuda.tunable.set_max_tuning_iterations(1)
self._test_addmm_impl(torch._addmm_activation, "relu", device, dtype)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 5e-2,
torch.half: 5e-2, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(
*[torch.bfloat16, torch.half] if TEST_WITH_ROCM or SM53OrLater else []))
@dtypes(*floating_types_and(torch.bfloat16))
@tf32_on_and_off(0.05)
@reduced_f32_on_and_off(0.05)
def test_addmm_gelu(self, device, dtype):
self._test_addmm_impl(torch._addmm_activation, "gelu", device, dtype)
@skipIfRocmArch(MI300_ARCH)
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(*floating_and_complex_types())
@tf32_on_and_off(0.005)
@reduced_f32_on_and_off(0.005)
def test_addmm_sizes(self, device, dtype):
for m in [0, 1, 25]:
for n in [0, 1, 10]:
for k in [0, 1, 8]:
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
m1 = torch.randn(n, k + 1, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@dtypes(torch.half)
@onlyCUDA
def test_addmm_baddbmm_overflow(self, device, dtype):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
inp = torch.zeros(128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(1000, 128, dtype=torch.half, device=device) * 100
out = torch.addmm(inp, mat1, mat2, alpha=0.001, beta=0.)
# just check for no overflow on ROCM
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
inp = torch.zeros(3, 128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(3, 128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(3, 1000, 128, dtype=torch.half, device=device) * 100
out = torch.baddbmm(inp, mat1, mat2, alpha=0.001, beta=0.)
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
@dtypes(torch.float)
def test_baddbmm_nan_input_with_zero_beta(self, device, dtype):
for shape in [[3, 2, 2], [2, 20, 20]]:
mat1, mat2 = (torch.randn(shape, dtype=dtype, device=device) for _ in range(2))
inputs = [torch.randn(shape, dtype=dtype, device=device),
torch.randn(shape, dtype=dtype, device=device).fill_(torch.nan)]
outs = [None, torch.randn(shape, dtype=dtype, device=device),
torch.randn(shape, dtype=dtype, device=device).fill_(torch.nan)]
options = itertools.product(inputs, outs)
for input, out in options:
y_ref = torch.bmm(mat1, mat2)
y = torch.baddbmm(input, mat1, mat2, beta=0.0, out=out)
self.assertEqual(y_ref, y)
@dtypes(torch.int16, torch.int32, torch.int64, torch.float16, torch.float32, torch.float64)
def test_baddbmm_input_dtypes_compatibility(self, device, dtype):
batch1 = torch.rand((1, 2, 2), dtype=torch.float32, device=device)
batch2 = torch.rand((1, 2, 2), dtype=torch.float32, device=device)
input_tensor = torch.rand((1, 2, 2), device=device).to(dtype)
if dtype != torch.float32:
with self.assertRaisesRegex(RuntimeError, "Input dtypes must be the same"):
y = torch.baddbmm(input_tensor, batch1, batch2, beta=0.0)
else:
out = torch.randn((1, 2, 2), dtype=dtype, device=device).fill_(torch.nan)
y_ref = torch.bmm(batch1, batch2)
y = torch.baddbmm(input_tensor, batch1, batch2, beta=0.0, out=out)
self.assertEqual(out, y_ref)
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
def test_matmul_45724(self, device):
# https://github.com/pytorch/pytorch/issues/45724
a = torch.rand(65537, 22, 64, device=device, dtype=torch.half)
b = torch.rand(65537, 64, 22, device=device, dtype=torch.half)
c = torch.full((65537, 22, 22), math.nan, dtype=torch.half, device=device)
cpu_result = torch.matmul(a.cpu().float(), b.cpu().float()).cuda().half()
torch.matmul(a, b, out=c)
self.assertEqual(c, cpu_result)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(SM90OrLater and not TEST_WITH_ROCM, "Expected failure on sm90")
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
@parametrize("k", [16, 32])
@parametrize("n", [16, 32])
@parametrize("use_transpose_a", [True, False])
@parametrize("use_transpose_b", [True, False])
def test__int_mm(self, device, k, n, use_transpose_a, use_transpose_b):
def genf_int_float(x, y, use_transpose):
if use_transpose:
x, y = y, x
x_int8 = torch.randint(-10, 10, (x, y), dtype=torch.int8, device=device)
x_float = x_int8.to(torch.float32)
if use_transpose:
return x_int8.t(), x_float.t()
return x_int8, x_float
def _test(m, k, n, transpose_a, transpose_b, test_equal=True):
a_int8, a_float = genf_int_float(m, k, transpose_a)
b_int8, b_float = genf_int_float(k, n, transpose_b)
c_int32 = torch._int_mm(a_int8, b_int8)
self.assertTrue(c_int32.dtype is torch.int32)
self.assertEqual(c_int32.device, torch.device(device))
if test_equal:
self.assertEqual(c_int32.float(), torch.mm(a_float, b_float))
else:
self.assertNotEqual(c_int32.float(), torch.mm(a_float, b_float))
c_int32_result = c_int32.new_empty(c_int32.size())
# Checking out variant
torch._int_mm(a_int8, b_int8, out=c_int32_result)
if test_equal:
self.assertEqual(c_int32_result.float(), torch.mm(a_float, b_float))
else:
self.assertNotEqual(c_int32_result.float(), torch.mm(a_float, b_float))
# NOTE: We're just exercising terrible failures here.
version = _get_torch_cuda_version()
SM80OrLater = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)
SM70 = torch.cuda.is_available() and torch.cuda.get_device_capability() == (7, 0)
SM75 = torch.cuda.is_available() and torch.cuda.get_device_capability() == (7, 5)
if TEST_WITH_ROCM:
_test(17, k, n, use_transpose_a, use_transpose_b, True)
else:
if not use_transpose_a and use_transpose_b:
if SM80OrLater or (version >= (12, 3) and (SM70 or SM75)):
_test(17, k, n, use_transpose_a, use_transpose_b, version > (11, 7))
else:
with self.assertRaisesRegex(RuntimeError,
"CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when calling cublasLtMatmul"):
_test(17, k, n, use_transpose_a, use_transpose_b)
if use_transpose_a and not use_transpose_b:
with self.assertRaisesRegex(RuntimeError,
"CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when calling cublasLtMatmul"):
_test(17, k, n, use_transpose_a, use_transpose_b)
if use_transpose_a and use_transpose_b:
with self.assertRaisesRegex(RuntimeError,
"CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when calling cublasLtMatmul"):
_test(17, k, n, use_transpose_a, use_transpose_b)
if not use_transpose_a and not use_transpose_b:
if SM80OrLater or (version >= (12, 3) and (SM70 or SM75)):
_test(17, k, n, use_transpose_a, use_transpose_b)
else:
with self.assertRaisesRegex(RuntimeError,
"CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when calling cublasLtMatmul"):
_test(17, k, n, use_transpose_a, use_transpose_b)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
def test__int_mm_errors(self, device):
def genf_int(x, y):
return torch.empty((x, y), dtype=torch.int8, device=device)
def _gen_pair(m, k, n):
return genf_int(m, k), genf_int(k, n)
self.assertRaisesRegex(RuntimeError,
r"self.size\(0\) needs to be greater than 16, but got 16",
lambda: torch._int_mm(*_gen_pair(16, 8, 32)))
self.assertRaisesRegex(RuntimeError,
r"self.size\(1\) needs to be greater than 0 and a multiple of 8, but got 7",
lambda: torch._int_mm(*_gen_pair(17, 7, 32)))
self.assertRaisesRegex(RuntimeError,
r"self.size\(1\) needs to match mat2.size\(0\) but got 8 and 7",
lambda: torch._int_mm(genf_int(17, 8), genf_int(7, 32)))
self.assertRaisesRegex(RuntimeError,
r"mat2.size\(1\) needs to be greater than 0 and a multiple of 8, but got 31",
lambda: torch._int_mm(*_gen_pair(17, 8, 31)))
self.assertRaisesRegex(RuntimeError,
r"expected scalar type Char but found Float",
lambda: torch._int_mm(genf_int(17, 8).float(), genf_int(8, 32)))
self.assertRaisesRegex(RuntimeError,
r"expected scalar type Char but found Float",
lambda: torch._int_mm(genf_int(17, 8), genf_int(8, 32).float()))
self.assertRaisesRegex(RuntimeError,
r"Expected result dtype to be of type kInt but got float",
lambda: torch._int_mm(genf_int(17, 8), genf_int(8, 32), out=genf_int(16, 32).float()))
self.assertRaisesRegex(RuntimeError,
r"Expected result.size\(0\) to be 17 but got 15",
lambda: torch._int_mm(genf_int(17, 8), genf_int(8, 32), out=genf_int(15, 32).int()))
self.assertRaisesRegex(RuntimeError,
r"Expected result.size\(0\) to be 17 but got 16",
lambda: torch._int_mm(genf_int(17, 8), genf_int(8, 32), out=genf_int(16, 31).int()))
@onlyCPU
@parametrize("m", [0, 8, 17])
@parametrize("k", [0, 16, 32])
@parametrize("n", [16, 32])
@parametrize("use_transpose_a", [True, False])
@parametrize("use_transpose_b", [True, False])
@parametrize("non_contig_type", [0, 1, 2])
def test__int_mm_cpu(self, device, m, k, n, use_transpose_a, use_transpose_b, non_contig_type):
# non_contig_type:
# 0: the whole data buffer is contiguous (can be transposed)
# 1: stride of one dimension is 1, but the whole buffer is not contiguous
# 2: Neither stride is 1
def genf_int_float(x, y, use_transpose, non_contig_type):
if use_transpose:
x, y = y, x
if non_contig_type != 0:
y = y * 2
x_int8 = torch.randint(-128, 127, (x, y), dtype=torch.int8, device=device)
x_float = x_int8.to(torch.float32)
if non_contig_type == 1:
x_int8 = x_int8[:, : y // 2]
x_float = x_float[:, : y // 2]
elif non_contig_type == 2:
x_int8 = x_int8[:, ::2]
x_float = x_float[:, ::2]
if use_transpose:
return x_int8.t(), x_float.t()
return x_int8, x_float
if non_contig_type != 0 and (m == 0 or k == 0):
return
a_int8, a_float = genf_int_float(m, k, use_transpose_a, non_contig_type)
b_int8, b_float = genf_int_float(k, n, use_transpose_b, non_contig_type)
c_int32 = torch._int_mm(a_int8, b_int8)
self.assertTrue(c_int32.dtype is torch.int32)
self.assertEqual(c_int32.device, torch.device(device))
self.assertEqual(c_int32.float(), torch.mm(a_float, b_float))
c_int32_result = c_int32.new_empty(c_int32.size())
# Checking out variant
torch._int_mm(a_int8, b_int8, out=c_int32_result)
self.assertEqual(c_int32_result.float(), torch.mm(a_float, b_float))
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyNativeDeviceTypes
@parametrize("m", [32, 64])
@parametrize("k", [32, 64])
@parametrize("n", [48, 64])
def test__int4_mm(self, device, m, k, n):
if self.device_type == 'cuda' and not SM80OrLater:
self.skipTest("requires SM80 or later")
if TEST_WITH_ROCM:
if not CDNA2OrLater():
self.skipTest("_int4_mm is supported only for CDNA2 or later")
q_group = 32
inner_k_tiles = 2
torch.manual_seed(1)
a_bf16 = torch.rand((m, k), dtype=torch.bfloat16, device=device)
b_bf16 = torch.rand((k, n), dtype=torch.bfloat16, device=device)
def convert_weight_to_int4pack(b):
b_tmp, b_scales_and_zeros = _group_quantize_tensor(
b, n_bit=4, q_group_size=q_group
)
if self.device_type == 'cpu':
b_int4pack = torch._convert_weight_to_int4pack_for_cpu(
b_tmp, inner_k_tiles
)
else:
b_int4pack = torch._convert_weight_to_int4pack(
b_tmp, inner_k_tiles
)
return b_int4pack, b_scales_and_zeros
def weight_int4pack_mm(a, b_int4pack, b_scales_and_zeros):
if self.device_type == 'cpu':
self.assertTrue(b_int4pack.dtype is torch.uint8)
self.assertTrue(b_int4pack.dim() == 2)
c = torch._weight_int4pack_mm_for_cpu(
a, b_int4pack, q_group, b_scales_and_zeros
)
# test wrapper
q_group_t = torch.tensor(q_group, dtype=torch.int64, device=device)
c_2 = torch.ops.quantized.int4mm_packed_weight_cpu(
a, b_int4pack, q_group_t, b_scales_and_zeros
)
assert torch.equal(c, c_2)
return c
else:
self.assertTrue(b_int4pack.dtype is torch.int32)
self.assertTrue(b_int4pack.dim() == 4)
return torch._weight_int4pack_mm(
a, b_int4pack, q_group, b_scales_and_zeros
)
b_int4pack, b_scales_and_zeros_bf16 = convert_weight_to_int4pack(b_bf16)
for dtype in [torch.bfloat16] + ([torch.float16, torch.float32] if device == "cpu" else []):
a = a_bf16.to(dtype=dtype)
b = b_bf16.to(dtype=dtype)
b_scales_and_zeros = b_scales_and_zeros_bf16.to(dtype=dtype)
ref = torch.mm(a, b)
res = weight_int4pack_mm(a, b_int4pack, b_scales_and_zeros)
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyNativeDeviceTypes
@parametrize("m", [32, 64])
@parametrize("k", [32, 64])
@parametrize("n", [48, 64])
def test_compile_int4_mm(self, device, m, k, n):
if self.device_type == 'cuda' and not SM80OrLater:
self.skipTest("requires SM80 or later")
if TEST_WITH_ROCM:
if not CDNA2OrLater():
self.skipTest("_int4_mm is supported only for CDNA2 or later")
q_group = 32
inner_k_tiles = 2
torch.manual_seed(1)
a = torch.rand((m, k), dtype=torch.bfloat16, device=device)
b = torch.rand((k, n), dtype=torch.bfloat16, device=device)
b_tmp, b_scales_and_zeros = _group_quantize_tensor(
b, n_bit=4, q_group_size=q_group
)
@torch.compile
def int4_mm(a, b_tmp, b_scales_and_zeros):
if self.device_type == 'cpu':
b_int4pack = torch._convert_weight_to_int4pack_for_cpu(
b_tmp, inner_k_tiles
)
self.assertTrue(b_int4pack.dtype is torch.uint8)
self.assertTrue(b_int4pack.dim() == 2)
return torch._weight_int4pack_mm_for_cpu(
a, b_int4pack, q_group, b_scales_and_zeros
)
else:
b_int4pack = torch._convert_weight_to_int4pack(
b_tmp, inner_k_tiles
)
self.assertTrue(b_int4pack.dtype is torch.int32)
self.assertTrue(b_int4pack.dim() == 4)
return torch._weight_int4pack_mm(
a, b_int4pack, q_group, b_scales_and_zeros
)
res = int4_mm(a, b_tmp, b_scales_and_zeros)
ref = torch.mm(a, b)
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@unittest.skipIf(TEST_WITH_ROCM and IS_REMOTE_GPU, "ROCM is unsupported")
@onlyNativeDeviceTypes
@parametrize("k", [64, 256])
@parametrize("n", [32, 48, 64, 128])
def test__dyn_quant_pack_4bit_weight(self, device, k, n):
# TODO: Fix https://github.com/pytorch/pytorch/issues/131425 and use OpInfo instead
# Weight shape is [K x N]
if self.device_type == "cuda":
self.skipTest("CUDA Backend is unsupported")
torch.manual_seed(1)
block_size = 32
b = torch.rand((k, n), dtype=torch.bfloat16, device=device)
in_features = b.size(0)
out_features = b.size(1)
b_uint8, b_scales_and_zeros = _group_quantize_tensor_symmetric(
b, n_bit=4, groupsize=block_size
)
b_int4pack = torch._dyn_quant_pack_4bit_weight(
b_uint8, b_scales_and_zeros, None, block_size, in_features, out_features
)
b_int4pack_meta = torch._dyn_quant_pack_4bit_weight(
b_uint8, b_scales_and_zeros, None, block_size, in_features, out_features
)
self.assertEqual(b_int4pack.shape, b_int4pack_meta.shape)
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@unittest.skipIf(TEST_WITH_ROCM and IS_REMOTE_GPU, "ROCM is unsupported")
@onlyNativeDeviceTypes
@parametrize("m", [1, 32])
@parametrize("k", [64, 128])
@parametrize("n", [4096, 11008])
def test__dyn_quant_matmul_4bit(self, device, m, k, n):
if self.device_type == "cuda":
self.skipTest("CUDA is unsupported")
q_group = 32
torch.manual_seed(1)
a_float32 = torch.rand((m, k), dtype=torch.float32, device=device)
b_float32 = torch.rand((k, n), dtype=torch.float32, device=device)
in_features = b_float32.size(0)
out_features = b_float32.size(1)
def dyn_quant_pack_4bit_weight(b, in_features, out_features):
b_uint8, b_scales_and_zeros = _group_quantize_tensor_symmetric(
b, n_bit=4, groupsize=q_group
)
if q_group == in_features:
b_scales_and_zeros = b_scales_and_zeros.to(torch.float)
else:
b_scales_and_zeros = b_scales_and_zeros.to(torch.bfloat16)
b_int4pack = torch._dyn_quant_pack_4bit_weight(
b_uint8, b_scales_and_zeros, None, q_group, in_features, out_features
)
return b_int4pack, b_scales_and_zeros
def dyn_quant_matmul_4bit(
a, b_int4pack, q_group, in_features, out_features
):
return torch._dyn_quant_matmul_4bit(
a,
b_int4pack,
q_group,
in_features,
out_features,
)
b_int4pack, b_scales_and_zeros = dyn_quant_pack_4bit_weight(
b_float32, in_features, out_features
)
dtypes = [torch.float32]
for dtype in dtypes:
a = a_float32.to(dtype=dtype)
b = b_float32.to(dtype=dtype)
ref = torch.mm(a, b)
res = dyn_quant_matmul_4bit(
a,
b_int4pack,
q_group,
in_features,
out_features,
)
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
elementwise_diff = (res - ref).abs()
elementwise_relative_error = elementwise_diff / ref.abs().clamp(
min=torch.finfo(ref.dtype).eps
)
all_elements_within_threshold = torch.all(elementwise_relative_error < 0.06)
self.assertTrue(
all_elements_within_threshold, "Some elements have error >= 0.06"
)
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@unittest.skipIf(TEST_WITH_ROCM and IS_REMOTE_GPU, "ROCM is unsupported")
@onlyNativeDeviceTypes
@parametrize("m", [1, 32])
@parametrize("k", [64, 128])
@parametrize("n", [4096, 11008])
def test_compile_dyn_quant_matmul_4bit(self, device, m, k, n):
if self.device_type == "cuda":
self.skipTest("CUDA is unsupported")
q_group = 32
torch.manual_seed(1)
a_float32 = torch.rand((m, k), dtype=torch.float32, device=device)
b_float32 = torch.rand((k, n), dtype=torch.float32, device=device)
in_features = b_float32.size(0)
out_features = b_float32.size(1)
b_uint8, b_scales_and_zeros = _group_quantize_tensor_symmetric(
b_float32, n_bit=4, groupsize=q_group
)
if q_group == in_features:
b_scales_and_zeros = b_scales_and_zeros.to(dtype=torch.float)
else:
b_scales_and_zeros = b_scales_and_zeros.to(dtype=torch.bfloat16)
@torch.compile
def dyn_quant_matmul_4bit(
a, b_uint8, b_scales_and_zeros, q_group, in_features, out_features
):
b_int4pack = torch._dyn_quant_pack_4bit_weight(
b_uint8, b_scales_and_zeros, None, q_group, in_features, out_features
)
return torch._dyn_quant_matmul_4bit(
a,
b_int4pack,
q_group,
in_features,
out_features,
)
res = dyn_quant_matmul_4bit(
a_float32,
b_uint8,
b_scales_and_zeros,
q_group,
in_features,
out_features,
)
ref = torch.mm(a_float32, b_float32)
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
elementwise_diff = (res - ref).abs()
elementwise_relative_error = elementwise_diff / ref.abs().clamp(
min=torch.finfo(ref.dtype).eps
)
all_elements_within_threshold = torch.all(elementwise_relative_error < 0.06)
self.assertTrue(
all_elements_within_threshold, "Some elements have error >= 0.06"
)
@onlyNativeDeviceTypes
@parametrize("m", [32, 64])
@parametrize("k", [32, 64])
@parametrize("n", [48, 64])
@parametrize("compile", [True, False])
@parametrize("slice", [True, False])
def test__int8_mm(self, device, m, k, n, compile, slice):
torch.manual_seed(1)
if slice:
# logits are generated from LLaMA LM head like this -
# the activation to LM head is a slice of final hidden state
# of shape (batch_size, sequence_length, hidden dim),
# but is non-contiguous
# Using arbitrary batch-size here, since it'd be converted to 2D
batch_size = 4
a = torch.rand((batch_size, m, k), dtype=torch.bfloat16, device=device)
# Make a non-contiguous
a = a[:, -1:, :]
a = a.view(-1, a.size(-1))
else:
a = torch.rand((m, k), dtype=torch.bfloat16, device=device)
b = torch.rand((n, k), dtype=torch.bfloat16, device=device)
def convert_weight_to_int8pack(b):
b_int8pack, b_scales, _ = _dynamically_quantize_per_channel(
b, -128, 127, torch.int8
)
return b_int8pack, b_scales
def weight_int8pack_mm(a, b_int8pack, b_scales):
return torch._weight_int8pack_mm(
a, b_int8pack, b_scales
)
b_int8pack, b_scales = convert_weight_to_int8pack(b)
if compile:
mod = torch.compile(weight_int8pack_mm)
else:
mod = weight_int8pack_mm
res = mod(a, b_int8pack, b_scales)
ref = torch.mm(a, b.transpose(0, 1))
mean_err = ((res - ref).abs() / ref).mean()
self.assertTrue(mean_err < 0.05)
@slowTest
@onlyCPU
@largeTensorTest('12GB', device='cpu')
def test__int8_mm_large_shape(self, device):
torch.manual_seed(1)
m = 65536
k = 64
n = 50400
a = torch.rand((m, k), dtype=torch.bfloat16, device=device)
b = torch.rand((n, k), dtype=torch.bfloat16, device=device)
def convert_weight_to_int8pack(b):
b_int8pack, b_scales, _ = _dynamically_quantize_per_channel(
b, -128, 127, torch.int8
)
return b_int8pack, b_scales
def weight_int8pack_mm(a, b_int8pack, b_scales):
return torch._weight_int8pack_mm(
a, b_int8pack, b_scales
)
b_int8pack, b_scales = convert_weight_to_int8pack(b)
# should pass without segfault
weight_int8pack_mm(a, b_int8pack, b_scales)
@onlyCPU
@parametrize("m", [32, 35, 36, 40, 64])
@parametrize("k", [32, 35, 36, 40, 64])
# NOTE: This is intended to cover fp16_gemv_trans in
# BlasKernel.cpp. Currently, bounds being divisible by 32, 8-but-not-32, and 4-but-not-8
# all matter.
def test_fp16_mv_transposed_first_argument_arm_cpu(self, device, m, k):
torch.manual_seed(1)
a = torch.rand((m, k), dtype=torch.half, device=device)
b = torch.rand((1, k), dtype=torch.half, device=device)
prev = torch._C._get_cpu_allow_fp16_reduced_precision_reduction()
try:
torch._C._set_cpu_allow_fp16_reduced_precision_reduction(False)
ref = torch.mm(a, b.t())
try:
torch._C._set_cpu_allow_fp16_reduced_precision_reduction(True)
except RuntimeError as e:
raise unittest.SkipTest from e
res = torch.mm(a, b.t())
torch.testing.assert_close(res, ref, atol=1e-2, rtol=1e-2)
finally:
torch._C._set_cpu_allow_fp16_reduced_precision_reduction(prev)
@slowTest
@onlyNativeDeviceTypes
# bfloat16 doesn't have sufficient precision to pass this test
@dtypes(torch.half, torch.float32, torch.float64, torch.int32, torch.int64, torch.cfloat, torch.cdouble)
@dtypesIfCUDA(torch.float32, torch.float64, torch.cfloat, torch.cdouble)
@tf32_on_and_off(0.01)
@reduced_f32_on_and_off(0.01)
def test_mm(self, device, dtype):
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
dtype_ = torch.float if dtype == torch.half else dtype
if dtype == torch.half:
mat1 = mat1.float()
mat2 = mat2.float()
res = torch.zeros(n, p, dtype=dtype_, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res.half() if dtype == torch.half else res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
def genf_bfloat(x, y):
return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
def genf_Half(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
for (n, m, p) in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:
if (dtype == torch.int32) or (dtype == torch.int64):
genf = genf_int
elif (dtype == torch.bfloat16):
genf = genf_bfloat
elif (dtype == torch.half):
genf = genf_Half
else:
genf = genf_float
_test_mm(n, m, p, dtype, genf)
@onlyNativeDeviceTypes
def test_mm_bmm_non_memory_dense(self, device):
def _slice(tensor, fn):
return fn(tensor)[..., ::2]
A = torch.randn(3, 6, dtype=torch.cfloat, device=device)
B = torch.randn(3, 3, dtype=torch.cfloat, device=device)
out = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
out1 = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
A_conj = _slice(A, torch.conj)
A_conj_physical = _slice(A, torch.conj_physical)
self.assertEqual(torch.mm(A_conj, B, out=out), torch.mm(A_conj_physical, B, out=out))
self.assertEqual(torch.mm(A_conj.t(), B, out=out), torch.mm(A_conj_physical.t(), B, out=out))
Ab = torch.randn(2, 3, 6, dtype=torch.cfloat, device=device)
Bb = torch.randn(2, 3, 3, dtype=torch.cfloat, device=device)
Bb_ = torch.randn(1, 3, 3, dtype=torch.cfloat, device=device).expand(2, 3, 3)
out_b = torch.empty(2, 3, 3, device=device, dtype=torch.complex64).mT
Ab_conj = _slice(Ab, torch.conj)
Ab_conj_physical = _slice(Ab, torch.conj_physical)
def t_b(tensor):
return tensor.mT
self.assertEqual(torch.bmm(Ab_conj, Bb, out=out_b), torch.bmm(Ab_conj_physical, Bb, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb, out=out_b))
# test broadcasting
self.assertEqual(torch.bmm(Ab_conj, Bb_, out=out_b), torch.bmm(Ab_conj_physical, Bb_, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb_, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb_, out=out_b))
@onlyNativeDeviceTypes
def test_mm_conjtranspose(self, device):
A = torch.randn(3, 3, dtype=torch.cfloat, device=device)
B = torch.randn(3, 3, dtype=torch.cfloat, device=device)
# A conjtranspose
out1 = torch.mm(A.t().conj(), B)
out1_ref = torch.mm(A.t().conj_physical(), B)
self.assertEqual(out1, out1_ref)
# B conjtranspose
out1 = torch.mm(A, B.t().conj())
out1_ref = torch.mm(A, B.t().conj_physical())
self.assertEqual(out1, out1_ref)
# A&B conjtranspose
out1 = torch.mm(A.t().conj(), B.t().conj())
out1_ref = torch.mm(A.t().conj_physical(), B.t().conj_physical())
self.assertEqual(out1, out1_ref)
@onlyNativeDeviceTypes
def test_mm_empty_inputs_mixed_dtype_errors(self, device):
a = torch.randint(0, 10, [1, 10], dtype=torch.int16, device=device)
b = torch.randn(10, 20, dtype=torch.float32, device=device)
with self.assertRaisesRegex(RuntimeError, "expected .* and .* to have the same dtype, but got:"):
torch.mm(a, b)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_strided_mm_bmm(self, device, dtype):
# Tests strided view case with stride smaller than corresponding dimension size
x = torch.tensor([[1., 2., 3.], [4., 5., 6.]], dtype=dtype, device=device)
new_shape = [2, 2, 2]
new_stride = [3, 1, 1]
sx = torch.as_strided(x, size=new_shape, stride=new_stride)
torch_fn = lambda x: torch.bmm(x, x) # noqa: E731
np_fn = lambda x: np.matmul(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx)
torch_fn = lambda x: torch.mm(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx[0])
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16, torch.half))
@tf32_on_and_off(0.05)
@reduced_f32_on_and_off(0.05)
def test_bmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined behavior
return
batch_sizes = [1, 10]
M, N, O = 23, 15, 12
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or SM53OrLater
if not is_supported:
for num_batches in batch_sizes:
b1 = torch.randn(num_batches, M, N, device=device).to(dtype)
b2 = torch.randn(num_batches, N, O, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.bmm(b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-0.1, high=0.1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-0.1, high=0.1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-0.1, high=0.1).expand(num_batches, M, N)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-0.1, high=0.1).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):
res1 = torch.bmm(b1, b2)
res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \
.permute(perm3).contiguous().permute(invert_perm(perm3))
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
if self.device_type == 'cuda':
# check that mixed arguments are rejected
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu()))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()))
def _test_addbmm_baddbmm(self, func, b1, b2, ref, out_tensor):
getattr(out_tensor, func + "_")(b1, b2)
self.assertEqual(out_tensor, ref)
res3 = out_tensor.clone()
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1, b1, b2)
self.assertEqual(out_tensor, ref * 2)
getattr(res3, func + "_")(b1, b2, beta=1)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1., .5, b1, b2)
self.assertEqual(out_tensor, ref * 2.5)
getattr(res3, func + "_")(b1, b2, beta=1., alpha=.5)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func} is deprecated"):
self.assertEqual(out_tensor, getattr(torch, func)(1, out_tensor, 0, b1, b2))
res4 = getattr(torch, func)(out_tensor, b1, b2, beta=1, alpha=.5)
self.assertEqual(res4, ref * 3)
nan = torch.full_like(out_tensor, math.nan)
res5 = getattr(torch, func)(nan, b1, b2, beta=0, alpha=1)
self.assertEqual(res5, ref)
if b1.is_complex():
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1j, alpha=.5j)
self.assertEqual(res6, out_tensor * .1j + .5j * ref)
else:
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1, alpha=.5)
self.assertEqual(res6, out_tensor * .1 + .5 * ref)
res7 = torch.full_like(out_tensor, math.nan)
getattr(torch, func)(nan, b1, b2, beta=0, out=res7)
self.assertEqual(res7, ref)
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16, torch.half))
@tf32_on_and_off(0.05)
@reduced_f32_on_and_off(0.05)
def test_addbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined behavior
return
num_batches = 2
M, N, O = 16, 17, 18
is_supported = True
if dtype == torch.bfloat16:
if self.device_type == 'cpu':
self.precision = 1 # 43 vs 43.75
else:
is_supported = TEST_WITH_ROCM or SM53OrLater
if not is_supported:
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
t = make_tensor((M, O), dtype=dtype, device=device, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.addbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
for perm3 in itertools.permutations((0, 1)):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1) * 0.1
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1) * 0.1
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3)
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N) * 0.1
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1) * 0.1
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5})
@onlyNativeDeviceTypes
@dtypes(*floating_and_complex_types_and(torch.bfloat16, torch.half))
@tf32_on_and_off(0.05)
@reduced_f32_on_and_off(0.05)
def test_baddbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined behavior
return
num_batches = 10
M, N, O = 12, 8, 50
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or SM53OrLater
if not is_supported:
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
t = make_tensor((num_batches, M, O), dtype=dtype, device=device, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.baddbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype not in [torch.bfloat16, torch.half] else torch.float32
# transposed tensors
for perm1, perm2, perm3 in itertools.product(itertools.permutations((0, 1, 2)), repeat=3):
b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
out_tensor = out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3))
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, dtype=dtype, device=device, low=-2, high=2)
b2 = make_tensor(shape2, dtype=dtype, device=device, low=-2, high=2)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("baddbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.float32: 5e-3, torch.complex64: 1e-3})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinverse(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def run_test(M):
# Testing against definition for pseudo-inverses
MPI = torch.pinverse(M)
MPI_ = MPI.cpu().numpy()
M_ = M.cpu().numpy()
if M.numel() > 0:
self.assertEqual(M_, np.matmul(np.matmul(M_, MPI_), M_))
self.assertEqual(MPI_, np.matmul(np.matmul(MPI_, M_), MPI_))
self.assertEqual(np.matmul(M_, MPI_), np.matmul(M_, MPI_).swapaxes(-2, -1).conj())
self.assertEqual(np.matmul(MPI_, M_), np.matmul(MPI_, M_).swapaxes(-2, -1).conj())
else:
self.assertEqual(M.shape, MPI.shape[:-2] + (MPI.shape[-1], MPI.shape[-2]))
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5), # square matrices
(3, 2), (5, 3, 2), (7, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (7, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
M = torch.randn(*sizes, dtype=dtype, device=device)
run_test(M)
# Test inverse and pseudo-inverse for invertible matrix
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5)]:
matsize = sizes[-1]
batchdims = sizes[:-2]
M = make_arg(*batchdims, matsize, matsize)
self.assertEqual(torch.eye(matsize, dtype=dtype, device=device).expand(sizes), M.pinverse().matmul(M),
atol=1e-7, rtol=0, msg='pseudo-inverse for invertible matrix')
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_non_negative(self, device, dtype):
def check(*size):
t = make_tensor(size, dtype=dtype, device=device)
for n in range(8):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(1, 1)
check(5, 5)
check(0, 3, 3)
check(2, 3, 3)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_negative(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def check(*size):
t = make_arg(*size)
for n in range(-7, 0):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(5, 5)
check(2, 0, 0)
check(0, 3, 3)
check(2, 3, 3)
check(2, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.complex64)
def test_linalg_matrix_exp_utils(self, device, dtype):
# test linear combination
def run_test(coeff_shape, data_shape):
coeffs = torch.rand(*coeff_shape, device=device, dtype=torch.float)
x = torch.rand(coeff_shape[1], *data_shape, device=device, dtype=dtype)
res1 = torch._compute_linear_combination(x, coeffs)
res2 = (x.unsqueeze(0) * coeffs.view(*coeff_shape, *([1] * len(data_shape)))).sum(1)
self.assertEqual(res1, res2, atol=1e-5, rtol=0.0)
# check `out=` version
res3 = torch.zeros(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res3)
self.assertEqual(res1, res3, atol=1e-5, rtol=0.0)
res4 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res4)
self.assertEqual(res1, res4 - 1.0, atol=1e-5, rtol=0.0)
res5 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
res5_clone = res5.clone()
torch._compute_linear_combination(x, coeffs, out=res5)
self.assertEqual(res1, res5 - res5_clone, atol=1e-5, rtol=0.0)
run_test([1, 3], [2, 2])
run_test([3, 1], [2, 2])
run_test([1, 10], [10, 10])
run_test([10, 1], [10, 10])
run_test([5, 3], [2, 2])
run_test([5, 3], [100, 100])
run_test([3, 4], [3, 3, 3])
run_test([3, 4], [3, 3, 3, 3])
# Regression test for https://github.com/pytorch/pytorch/issues/94124
with self.assertRaises(RuntimeError):
x = torch.rand([], device=device, dtype=dtype)
coeffs = torch.rand([2, 2], device=device, dtype=dtype)
res = torch._compute_linear_combination(x, coeffs)
@onlyCPU
@skipCPUIfNoLapack
@dtypes(torch.complex64)
def test_linalg_matrix_exp_no_warnings(self, device, dtype):
# this tests https://github.com/pytorch/pytorch/issues/80948
with freeze_rng_state():
torch.manual_seed(42)
tens = 0.5 * torch.randn(10, 3, 3, dtype=dtype, device=device)
tens = (0.5 * (tens.transpose(-1, -2) + tens))
with warnings.catch_warnings(record=True) as w:
tens.imag = torch.matrix_exp(tens.imag)
self.assertFalse(len(w))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_linalg_matrix_exp_boundary_cases(self, device, dtype):
expm = torch.linalg.matrix_exp
with self.assertRaisesRegex(RuntimeError, "Expected a floating point or complex tensor"):
expm(torch.randn(3, 3).type(torch.int))
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
expm(torch.randn(3))
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
expm(torch.randn(3, 2, 1))
# check 1x1 matrices
x = torch.randn(3, 3, 1, 1)
self.assertEqual(expm(x), x.exp())
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_matrix_exp_backward_input_validation(self, device, dtype):
scalar_tensor = torch.tensor(1.0, dtype=dtype, device=device)
grad_1d = torch.randn(1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.ops.aten.matrix_exp_backward(scalar_tensor, grad_1d)
non_square = torch.randn(2, 3, dtype=dtype, device=device)
grad_non_square = torch.randn(2, 3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.ops.aten.matrix_exp_backward(non_square, grad_non_square)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_linalg_matrix_exp_perverse_nan_values(self, device, dtype):
expm = torch.linalg.matrix_exp
def with_nan(x):
x[0, 0, 0] = torch.nan
return x
# Check small batches
x = with_nan(torch.randn(1, 1, 1))
self.assertTrue(torch.isnan(expm(x)).any())
x = with_nan(torch.randn(1, 2, 2))
for v in [1, 2, 3, 4, 5, 6, 7, 8, 9, 100, 1000]:
self.assertTrue(torch.isnan(expm(x / v)).any())
# Check large batches
x = with_nan(torch.randn(2, 2, 2))
self.assertTrue(torch.isnan(expm(x)).any())
x = with_nan(torch.randn(4096, 2, 2))
self.assertTrue(torch.isnan(expm(x)).any())
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_analytic(self, device, dtype):
expm = torch.linalg.matrix_exp
# check zero matrix
x = torch.zeros(20, 20, dtype=dtype, device=device)
self.assertTrue((expm(x) == torch.eye(20, 20, dtype=dtype, device=device)).all().item())
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def run_test(*n):
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate input
q = gen_good_cond_number_matrices(*n)
q_ = q.cpu().numpy()
qinv = torch.inverse(q)
qinv_ = qinv.cpu().numpy()
d = torch.randn(n[:-1], dtype=dtype, device=device)
x = torch.from_numpy(
np.matmul(q_, np.matmul(torch.diag_embed(d).cpu().numpy(), qinv_))).to(device)
x_norm, _ = x.abs().sum(-2).max(-1)
# test simple analytic whatever norm generated
mexp = expm(x)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed(d.exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
# matrices to equal norm
for sample_norm in sample_norms:
x_normalized = normalize_to_1_operator_norm(x, sample_norm)
mexp = expm(x_normalized)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed((d / x_norm.unsqueeze(-1) * sample_norm).exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
run_test(100, 100)
run_test(200, 200)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
run_test(3, 100, 100)
run_test(3, 200, 200)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
run_test(3, 3, 100, 100)
run_test(3, 3, 200, 200)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
def test_linalg_matrix_exp_batch(self, device, dtype):
def run_test(*n):
tensors_batch = torch.zeros(n, dtype=dtype, device=device)
tensors_batch = tensors_batch.view(-1, n[-2], n[-1])
num_matrices = tensors_batch.size(0)
tensors_list = []
for _ in range(num_matrices):
tensors_list.append(torch.randn(n[-2], n[-1], dtype=dtype, device=device))
for i in range(num_matrices):
tensors_batch[i, ...] = tensors_list[i]
tensors_exp_map = (torch.linalg.matrix_exp(x) for x in tensors_list)
tensors_exp_batch = torch.linalg.matrix_exp(tensors_batch)
for i, tensor_exp in enumerate(tensors_exp_map):
self.assertEqual(tensors_exp_batch[i, ...], tensor_exp)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_compare_with_taylor(self, device, dtype):
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def get_taylor_approximation(a, deg):
a_ = a.cpu().numpy()
identity = torch.eye(a.size(-2), a.size(-1), dtype=dtype, device=device).expand_as(a)
res = identity.cpu().numpy()
taylor_term = identity.cpu().numpy()
for i in range(1, deg + 1):
taylor_term = np.matmul(a_, taylor_term) / i
res = res + taylor_term
return res
def scale_square(a, deg):
if a.abs().pow(2).sum().sqrt() < 1.0:
return get_taylor_approximation(a, 12)
else:
s = int(torch.log2(a.abs().pow(2).sum().sqrt()).ceil().item())
b = a / (2 ** s)
b = get_taylor_approximation(b, 18)
for _ in range(s):
b = np.matmul(b, b)
return torch.from_numpy(b).to(a.device)
def run_test(*n):
degs = [1, 2, 4, 8, 12, 18]
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
degs = [degs[0]] + degs
for sample_norm, deg in zip(sample_norms, degs):
x = gen_good_cond_number_matrices(*n)
x = normalize_to_1_operator_norm(x, sample_norm)
mexp = torch.linalg.matrix_exp(x)
mexp_taylor = scale_square(x, deg)
self.assertEqual(mexp, mexp_taylor, atol=1e-2, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_slogdet(self, device, dtype):
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: hermitian, hermitian_psd, hermitian_pd, singular, non_singular
def run_test(matsize, batchdims, mat_chars):
num_matrices = np.prod(batchdims)
list_of_matrices = []
if num_matrices != 0:
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'hermitian':
list_of_matrices.append(random_hermitian_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_psd':
list_of_matrices.append(random_hermitian_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_pd':
list_of_matrices.append(random_hermitian_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'singular':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_singular':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
else:
full_tensor = torch.randn(*batchdims, matsize, matsize, dtype=dtype, device=device)
actual_value = torch.linalg.slogdet(full_tensor)
expected_value = np.linalg.slogdet(full_tensor.cpu().numpy())
self.assertEqual(expected_value[0], actual_value[0], atol=self.precision, rtol=self.precision)
self.assertEqual(expected_value[1], actual_value[1], atol=self.precision, rtol=self.precision)
# test out=variant
sign_out = torch.empty_like(actual_value[0])
logabsdet_out = torch.empty_like(actual_value[1])
ans = torch.linalg.slogdet(full_tensor, out=(sign_out, logabsdet_out))
self.assertEqual(ans[0], sign_out)
self.assertEqual(ans[1], logabsdet_out)
self.assertEqual(sign_out, actual_value[0])
self.assertEqual(logabsdet_out, actual_value[1])
for matsize, batchdims in itertools.product([0, 3, 5], [(0,), (3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['hermitian_pd'])
run_test(matsize, batchdims, mat_chars=['singular'])
run_test(matsize, batchdims, mat_chars=['non_singular'])
run_test(matsize, batchdims, mat_chars=['hermitian', 'hermitian_pd', 'hermitian_psd'])
run_test(matsize, batchdims, mat_chars=['singular', 'non_singular'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_slogdet_errors_and_warnings(self, device, dtype):
# slogdet requires the input to be a square matrix or batch of square matrices
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.slogdet(a)
# slogdet requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.slogdet(a)
a = torch.randn(2, 2, device=device, dtype=torch.bfloat16)
with self.assertRaisesRegex(RuntimeError, r'Low precision dtypes not supported'):
torch.linalg.slogdet(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(2, 3, 3, device=device, dtype=dtype)
sign_out = torch.empty(1, device=device, dtype=dtype)
real_dtype = a.real.dtype if dtype.is_complex else dtype
logabsdet_out = torch.empty(1, device=device, dtype=real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
sign_out = torch.empty(0, device=wrong_device, dtype=dtype)
logabsdet_out = torch.empty(0, device=wrong_device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# FIXME One of the backends of lu_factor fails in windows. I haven't investigated which or why
# https://github.com/pytorch/pytorch/issues/75225
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet(self, device, dtype):
def reference_slogdet(M):
sdet, logabsdet = np.linalg.slogdet(M.detach().cpu().numpy())
return M.new_tensor(sdet), M.new_tensor(logabsdet)
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg=f'{desc} (det)')
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg=f'{desc} (slogdet)')
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg=f'{desc} (linalg_slogdet)')
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), f'{desc} (logdet negative case)')
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-6, rtol=0, msg=f'{desc} (logdet non-negative case)')
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
def test(M):
assert M.size(0) >= 5, 'this helper fn assumes M to be at least 5x5'
M = M.to(device)
ref_M_sdet, ref_M_logabsdet = reference_slogdet(M)
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'basic')
if ref_M_logabsdet.exp().item() >= 1e-6: # skip singular
M_inv = M.inverse()
test_single_det(M_inv, reference_slogdet(M_inv), 'inverse')
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'transpose')
for x in [0, 2, 4]:
for scale in [-2, -0.1, 0, 10]:
if scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(scale)
elif scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-scale)
# dim 0
M_clone = M.clone()
M_clone[:, x] *= scale
test_single_det(M_clone, target, 'scale a row')
# dim 1
M_clone = M.clone()
M_clone[x, :] *= scale
test_single_det(M_clone, target, 'scale a column')
for x1, x2 in [(0, 3), (4, 1), (3, 2)]:
assert x1 != x2, 'x1 and x2 needs to be different for this test'
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
# dim 0
M_clone = M.clone()
M_clone[:, x2] = M_clone[:, x1]
test_single_det(M_clone, target, 'two rows are same')
# dim 1
M_clone = M.clone()
M_clone[x2, :] = M_clone[x1, :]
test_single_det(M_clone, target, 'two columns are same')
for scale1, scale2 in [(0.3, -1), (0, 2), (10, 0.1)]:
det_scale = scale1 * scale2 * -1
if det_scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(det_scale)
elif det_scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-det_scale)
# dim 0
M_clone = M.clone()
t = M_clone[:, x1] * scale1
M_clone[:, x1] += M_clone[:, x2] * scale2
M_clone[:, x2] = t
test_single_det(M_clone, target, 'exchanging rows')
# dim 1
M_clone = M.clone()
t = M_clone[x1, :] * scale1
M_clone[x1, :] += M_clone[x2, :] * scale2
M_clone[x2, :] = t
test_single_det(M_clone, target, 'exchanging columns')
def get_random_mat_scale(n):
# For matrices with values i.i.d. with 0 mean, unit variance, and
# subexponential tail, we have:
# E[log det(A^2)] \approx log((n-1)!)
#
# Notice:
# log Var[det(A)] = log E[det(A^2)] >= E[log det(A^2)]
#
# So:
# stddev[det(A)] >= sqrt( (n-1)! )
#
# We use this as an intuitive guideline to scale random generated
# matrices so our closeness tests can work more robustly:
# scale by sqrt( (n-1)! )^(-1/n) = ( (n-1)! )^(-1/(2n))
#
# source: https://arxiv.org/pdf/1112.0752.pdf
# TODO: technically we need subexponential distn for this to hold,
# but we mostly use gaussian entries below. Consider switching
# to Chi-sq if this turns out not stable enough, since Chi-sq
# is easy enough to sample from.
return math.factorial(n - 1) ** (-1.0 / (2 * n))
for n in [5, 10, 25]:
scale = get_random_mat_scale(n)
test(torch.randn(n, n, dtype=dtype, device=device) * scale)
r = torch.randn(n, n, dtype=dtype, device=device) * scale
# symmetric psd
test(r.mm(r.t()))
# symmetric pd
r = torch.randn(n, n, dtype=dtype, device=device) * scale
test(r.mm(r.t()) + torch.eye(n, dtype=dtype, device=device) * 1e-6)
# symmetric
r = torch.randn(n, n, dtype=dtype, device=device) * scale
for i in range(n):
for j in range(i):
r[i, j] = r[j, i]
test(r)
# non-contiguous
test((torch.randn(n, n, n + 1, dtype=dtype, device=device) * scale)[:, 2, 1:])
# det = 0
r = torch.randn(n, n, dtype=dtype, device=device) * scale
u, s, v = r.svd()
if reference_slogdet(u)[0] < 0:
u = -u
if reference_slogdet(v)[0] < 0:
v = -v
s[0] *= -1
s[-1] = 0
test(u.mm(s.diag()).mm(v))
# Small values to test numerical stability. Note that we don't scale
# this matrix.
r = torch.randn(512, 512, dtype=dtype, device=device)
u, s, v = r.svd()
s.fill_(1. / (100 * s.numel()))
test(u.mm(s.diag()).mm(v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet_batched(self, device, dtype):
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: sym, sym_psd, sym_pd, sing, non_sym
def run_test(matsize, batchdims, mat_chars):
num_matrices = reduce(operator.mul, batchdims, 1)
list_of_matrices = []
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'sym':
list_of_matrices.append(random_symmetric_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_psd':
list_of_matrices.append(random_symmetric_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_pd':
list_of_matrices.append(random_symmetric_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sing':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_sing':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
# Scaling adapted from `get_random_mat_scale` in _test_det_logdet_slogdet
full_tensor *= (math.factorial(matsize - 1) ** (-1.0 / (2 * matsize)))
for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:
expected_value = []
actual_value = fn(full_tensor)
for full_idx in itertools.product(*(list(range(x)) for x in batchdims)):
expected_value.append(fn(full_tensor[full_idx]))
if fn == torch.slogdet or fn == torch.linalg.slogdet:
sign_value = torch.stack([tup[0] for tup in expected_value], dim=0).reshape(batchdims)
expected_value = torch.stack([tup[1] for tup in expected_value], dim=0).reshape(batchdims)
self.assertEqual(sign_value, actual_value[0])
self.assertEqual(expected_value, actual_value[1])
else:
expected_value = torch.stack(expected_value, dim=0).reshape(batchdims)
self.assertEqual(actual_value, expected_value)
for matsize, batchdims in itertools.product([3, 5], [(3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['sym_pd'])
run_test(matsize, batchdims, mat_chars=['sing'])
run_test(matsize, batchdims, mat_chars=['non_sing'])
run_test(matsize, batchdims, mat_chars=['sym', 'sym_pd', 'sym_psd'])
run_test(matsize, batchdims, mat_chars=['sing', 'non_sing'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, upper, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
L = torch.linalg.cholesky(A)
expected_inverse = torch.inverse(A)
L = L.mH if upper else L
actual_inverse = torch.cholesky_inverse(L, upper)
self.assertEqual(actual_inverse, expected_inverse)
shapes = (0, 3, 5)
batches = ((), (0,), (3, ), (2, 2))
for shape, batch, upper, contiguous in list(itertools.product(shapes, batches, (True, False), (True, False))):
run_test(shape, batch, upper, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 2, dtype=dtype, device=device)
L = torch.linalg.cholesky(A)
# There are two code paths currently for the out= variant
# 1. When 'out' tensor is in Fortran (column-major) memory format
# then the fast route is taken and the storage is reused directly in the computations
# 2. When 'out' tensor is not in Fortran format then a temporary tensor is allocated internally
# and the result is copied from the temporary tensor to 'out' tensor
# This test checks the first code path
out = torch.empty_like(A)
out_t = out.mT.clone(memory_format=torch.contiguous_format)
out = out_t.mT
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
# This test checks the second code path
out = torch.empty_like(A)
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse_errors_and_warnings(self, device, dtype):
# cholesky_inverse requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.cholesky_inverse(a)
# cholesky_inverse requires a square matrix
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.cholesky_inverse(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, device=device, dtype=dtype)
out = torch.empty(2, 3, device=device, dtype=dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.cholesky_inverse(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*a.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_inverse(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.cholesky_inverse(a, out=out)
# cholesky_inverse raises an error for invalid inputs on CPU
# for example if at least one diagonal element is zero
a = torch.randn(3, 3, device=device, dtype=dtype)
a[1, 1] = 0
if self.device_type == 'cpu':
with self.assertRaisesRegex(torch.linalg.LinAlgError, r"cholesky_inverse: The diagonal element 2 is zero"):
torch.cholesky_inverse(a)
# cholesky_inverse on GPU does not raise an error for this case
elif self.device_type == 'cuda':
out = torch.cholesky_inverse(a)
self.assertTrue(out.isinf().any() or out.isnan().any())
def _select_broadcastable_dims(self, dims_full=None):
# select full dimensionality
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
# select actual dimensions for ops:
# larger: full ndims, individual sizes may be reduced
# smaller: possibly reduced ndims, sizes may be reduced
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1: # no reduced singleton dimension
ds = dims_full[i]
dl = dims_full[i]
elif j == 2: # larger may have reduced singleton dimension
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3: # smaller may have reduced singleton dimension
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
def test_broadcast_fused_matmul(self, device):
fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"]
for fn in fns:
batch_dim = random.randint(1, 8)
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
def dims_full_for_fn():
if fn == "baddbmm":
return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addbmm":
return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addmm":
return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])
elif fn == "addmv":
return ([n_dim], [n_dim, m_dim], [m_dim])
elif fn == "addr":
return ([n_dim, m_dim], [n_dim], [m_dim])
else:
raise AssertionError("unknown function")
(t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()
(t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)
t0_small = torch.randn(*t0_dims_small, device=device).float()
t1 = torch.randn(*t1_dims, device=device).float()
t2 = torch.randn(*t2_dims, device=device).float()
t0_full = t0_small.expand(*t0_dims_full).to(device)
fntorch = getattr(torch, fn)
r0 = fntorch(t0_small, t1, t2)
r1 = fntorch(t0_full, t1, t2)
self.assertEqual(r0, r1)
@skipIfRocmArch(MI300_ARCH)
@tf32_on_and_off(0.001)
@reduced_f32_on_and_off(0.001)
def test_broadcast_batched_matmul(self, device):
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
full_batch_dims = [random.randint(1, 3) for i in range(random.randint(1, 3))]
(batch_dims_small, _, _) = self._select_broadcastable_dims(full_batch_dims)
def verify_batched_matmul(full_lhs, one_dimensional):
if not one_dimensional:
lhs_dims = [n_dim, m_dim]
rhs_dims = [m_dim, p_dim]
result_dims = [n_dim, p_dim]
else:
lhs_dims = [n_dim, m_dim] if full_lhs else [m_dim]
rhs_dims = [m_dim, p_dim] if not full_lhs else [m_dim]
result_dims = [n_dim] if full_lhs else [p_dim]
lhs_mat_dims = lhs_dims if len(lhs_dims) != 1 else [1, m_dim]
rhs_mat_dims = rhs_dims if len(rhs_dims) != 1 else [m_dim, 1]
full_mat_dims = lhs_mat_dims if full_lhs else rhs_mat_dims
dim0_dims = rhs_dims if full_lhs else lhs_dims
small_dims = batch_dims_small + (rhs_mat_dims if full_lhs else lhs_mat_dims)
small = torch.randn(*(small_dims), device=device).float()
dim0 = torch.randn(*(dim0_dims), device=device).float()
full = torch.randn(*(full_batch_dims + full_mat_dims), device=device).float()
if not one_dimensional:
(lhsTensors, rhsTensors) = ((full,), (small, dim0)) if full_lhs else ((small, dim0), (full,))
else:
(lhsTensors, rhsTensors) = ((full,), (dim0,)) if full_lhs else ((dim0,), (full,))
def maybe_squeeze_result(l, r, result):
if len(lhs_dims) == 1 and l.dim() != 1:
return result.squeeze(-2)
elif len(rhs_dims) == 1 and r.dim() != 1:
return result.squeeze(-1)
else:
return result
for lhs in lhsTensors:
lhs_expanded = lhs.expand(*(torch.Size(full_batch_dims) + torch.Size(lhs_mat_dims)))
lhs_expanded_matmul_fn = lhs_expanded.matmul
for rhs in rhsTensors:
rhs_expanded = ((rhs if len(rhs_dims) != 1 else rhs.unsqueeze(-1)).
expand(*(torch.Size(full_batch_dims) + torch.Size(rhs_mat_dims))))
truth = maybe_squeeze_result(lhs_expanded, rhs_expanded, lhs_expanded_matmul_fn(rhs_expanded))
for l in (lhs, lhs_expanded):
for r in (rhs, rhs_expanded):
l_matmul_fn = l.matmul
result = maybe_squeeze_result(l, r, l_matmul_fn(r))
self.assertEqual(truth, result)
# test torch.matmul function as well
torch_result = maybe_squeeze_result(l, r, torch.matmul(l, r))
self.assertEqual(truth, torch_result)
# test torch.matmul with out
out = torch.zeros_like(torch_result)
torch.matmul(l, r, out=out)
self.assertEqual(truth, maybe_squeeze_result(l, r, out))
# compare to bmm
bmm_result = (torch.bmm(lhs_expanded.contiguous().view(-1, *lhs_mat_dims),
rhs_expanded.contiguous().view(-1, *rhs_mat_dims)))
self.assertEqual(truth.view(-1, *result_dims), bmm_result.view(-1, *result_dims))
for indices in itertools.product((True, False), repeat=2):
verify_batched_matmul(*indices)
def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
LU_data, LU_pivots, info = torch.linalg.lu_factor_ex(A)
self.assertEqual(info, torch.zeros_like(info))
return b, A, LU_data, LU_pivots
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve(self, device, dtype):
def sub_test(pivot):
for k, n in zip([2, 3, 5], [3, 5, 7]):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper((n, n), (n, k), pivot, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve_batched(self, device, dtype):
def sub_test(pivot):
def lu_solve_batch_test_helper(A_dims, b_dims, pivot):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, pivot, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.lu_solve(b[i], LU_data[i], LU_pivots[i]))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.lu_solve(b, LU_data, LU_pivots) # Actual output
self.assertEqual(x_exp, x_act) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
lu_solve_batch_test_helper((batchsize, 5, 5), (batchsize, 5, 10), pivot)
# Tests tensors with 0 elements
b = torch.randn(3, 0, 3, dtype=dtype, device=device)
A = torch.randn(3, 0, 0, dtype=dtype, device=device)
LU_data, LU_pivots = torch.linalg.lu_factor(A)
self.assertEqual(torch.empty_like(b), b.lu_solve(LU_data, LU_pivots))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@slowTest
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_many_batches(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((65536, 5, 5), (65536, 5, 10))
run_test((262144, 5, 5), (262144, 5, 10))
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_broadcasting(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
def run_test(A_dims, b_dims, pivot=True):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = make_A(*A_batch_dims, A_matrix_size, A_matrix_size)
b = make_tensor(b_dims, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu(), b.cpu())
LU_data, LU_pivots = torch.linalg.lu_factor(A)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
# this tests https://github.com/pytorch/pytorch/issues/36921
def test_lu_solve_large_matrices(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((1, 1), (1, 1, 1025))
@skipCUDAIfNoCusolver
@skipCPUIfNoLapack
def test_pca_lowrank(self, device):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
dtype = torch.double
def run_subtest(guess_rank, actual_rank, matrix_size, batches, device, pca, **options):
density = options.pop('density', 1)
use_svd_lowrank = options.pop('use_svd_lowrank', False)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
if use_svd_lowrank:
m = a_input.mean(dim=-2, keepdim=True)
u, s, v = pca(a_input, q=guess_rank, M=m, **options)
else:
u, s, v = pca(a_input, q=guess_rank, **options)
self.assertEqual(s.shape[-1], guess_rank)
self.assertEqual(u.shape[-2], rows)
self.assertEqual(u.shape[-1], guess_rank)
self.assertEqual(v.shape[-1], guess_rank)
self.assertEqual(v.shape[-2], columns)
A1 = u.matmul(s.diag_embed()).matmul(v.mT)
ones_m1 = torch.ones(batches + (rows, 1), dtype=a.dtype, device=device)
c = a.sum(axis=-2) / rows
c = c.reshape(batches + (1, columns))
A2 = a - ones_m1.matmul(c)
self.assertEqual(A1, A2)
if density == 1:
# actual rank is known only for dense input
detect_rank = (s.abs() > 1e-5).sum(axis=-1)
self.assertEqual(actual_rank * torch.ones(batches, device=device, dtype=torch.int64), detect_rank)
S = torch.linalg.svdvals(A2)
self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [ # noqa: B020
(2, (17, 4), all_batches),
(2, (100, 4), all_batches),
(6, (100, 40), all_batches),
(12, (1000, 1000), [()]),
]:
for batches in all_batches:
for guess_rank in [
actual_rank,
actual_rank + 2,
actual_rank + 6,
]:
if guess_rank <= min(*size):
run_subtest(guess_rank, actual_rank, size, batches, device, torch.pca_lowrank)
run_subtest(guess_rank, actual_rank, size[::-1], batches, device, torch.pca_lowrank)
run_subtest(guess_rank, actual_rank, size, batches, device, torch.svd_lowrank, use_svd_lowrank=True)
run_subtest(guess_rank, actual_rank, size[::-1], batches, device, torch.svd_lowrank, use_svd_lowrank=True)
# sparse input
for guess_rank, size in [
(4, (17, 4)), (4, (4, 17)), (16, (17, 17)),
(21, (100, 40)), (20, (40, 100)), (600, (1000, 1000))]:
for density in [0.005, 0.1]:
run_subtest(guess_rank, None, size, (), device, torch.pca_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.pca_lowrank)
guess_rank, actual_rank, size, batches = 2, 2, (17, 4), ()
run_subtest(guess_rank, actual_rank, size, batches, device, jitted)
# Ensure that nuclear_norm's out variant gives the same result as the non-out
@onlyNativeDeviceTypes
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64)
def test_nuclear_norm_out(self, device, dtype):
test_cases = [
# input size, dim
((25, 25), None),
((25, 25), (0, 1)),
((25, 25), (1, 0)),
((25, 25, 25), (2, 0)),
((25, 25, 25), (0, 1)),
]
for keepdim in [False, True]:
for input_size, dim in test_cases:
msg = f'input_size: {input_size}, dim: {dim}, keepdim: {keepdim}'
x = torch.randn(*input_size, device=device, dtype=dtype)
result_out = torch.empty(0, device=device, dtype=dtype)
if dim is None:
result = torch.nuclear_norm(x, keepdim=keepdim)
torch.nuclear_norm(x, keepdim=keepdim, out=result_out)
else:
result = torch.nuclear_norm(x, keepdim=keepdim, dim=dim)
torch.nuclear_norm(x, keepdim=keepdim, dim=dim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_geqrf(self, device, dtype):
def run_test(shape):
# numpy.linalg.qr with mode = 'raw' computes the same operation as torch.geqrf
# so this test compares against that function
A = make_tensor(shape, dtype=dtype, device=device)
# numpy.linalg.qr doesn't work with batched input
m, n = A.shape[-2:]
tau_size = "n" if m > n else "m"
np_dtype = A.cpu().numpy().dtype
ot = [np_dtype, np_dtype]
numpy_geqrf_batched = np.vectorize(
lambda x: np.linalg.qr(x, mode='raw'),
otypes=ot,
signature=f'(m,n)->(n,m),({tau_size})')
expected = numpy_geqrf_batched(A.cpu())
actual = torch.geqrf(A)
# numpy.linalg.qr returns transposed result
self.assertEqual(expected[0].swapaxes(-2, -1), actual[0])
self.assertEqual(expected[1], actual[1])
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_lapack_empty(self, device):
# FIXME: these are just a selection of LAPACK functions -- we need a general strategy here.
# The LAPACK functions themselves generally do NOT work with zero sized dimensions, although
# numpy/sci often has a direct wrapper (e.g. lu_factor) and a wrapper that "does the right thing"
# (e.g. lu). We often name our functions identically to the lapack function, so it will take work
# to name / migrate-to better wrappers.
def fn(torchfn, *args):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args))
# inverse, pinverse
self.assertEqual((0, 0), fn(torch.inverse, (0, 0)).shape)
self.assertEqual((5, 0), fn(torch.pinverse, (0, 5)).shape)
self.assertEqual((0, 5), fn(torch.pinverse, (5, 0)).shape)
self.assertEqual((0, 0), fn(torch.pinverse, (0, 0)).shape)
# det, logdet, slogdet
self.assertEqual(torch.tensor(1., device=device), fn(torch.det, (0, 0)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.logdet, (0, 0)))
self.assertEqual((torch.tensor(1., device=device), torch.tensor(0., device=device)),
fn(torch.slogdet, (0, 0)))
@skipIfRocmArch(MI300_ARCH)
@tf32_on_and_off(0.005)
@reduced_f32_on_and_off(0.07, 0.005)
def test_tensordot(self, device):
a = torch.arange(60., device=device).reshape(3, 4, 5)
b = torch.arange(24., device=device).reshape(4, 3, 2)
c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=([1, 0], [0, 1])))
self.assertEqual(c, cn)
cout = torch.zeros((5, 2), device=device)
torch.tensordot(a, b, dims=([1, 0], [0, 1]), out=cout).cpu()
self.assertEqual(c, cout)
a = torch.randn(2, 3, 4, 5, device=device)
b = torch.randn(4, 5, 6, 7, device=device)
c = torch.tensordot(a, b, dims=2).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=2))
with self.assertRaisesRegex(RuntimeError, "expects dims >= 0"):
torch.tensordot(a, b, dims=-1)
self.assertEqual(c, cn)
c = torch.tensordot(a, b).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy()))
self.assertEqual(c, cn)
a = torch.tensordot(torch.tensor(0.), torch.tensor(0.), 0)
an = torch.from_numpy(np.tensordot(np.zeros((), dtype=np.float32), np.zeros((), dtype=np.float32), 0))
self.assertEqual(a, an)
# Testing the fast path introduced in #145936,
# i.e. reduction to a scalar has to be of right dim.
a = torch.rand(2, 2, device=device)
a_dims = [-1, -2]
b = torch.rand(2, 2, device=device)
b_dims = [-2, -1]
for res_ndim in range(5):
res_torch = torch.tensordot(a, b, [a_dims, b_dims])
self.assertEqual(res_torch.ndim, res_ndim)
res_numpy = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(), [a_dims, b_dims]))
self.assertEqual(res_torch, res_numpy)
if res_ndim % 2:
b.unsqueeze_(0)
else:
a.unsqueeze_(0)
@skipCUDAIfNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipIfTorchDynamo("flaky, needs investigation")
@dtypes(*floating_and_complex_types())
def test_ldl_factor(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, hermitian):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
actual_factors, actual_pivots, info = torch.linalg.ldl_factor_ex(A, hermitian=hermitian)
actual_L = torch.tril(actual_factors, diagonal=-1)
actual_L.diagonal(0, -2, -1).fill_(1.0)
# This test is designed only for inputs with 1x1 block diagonal matrix D.
# That is for positive definite input matrices, the pivots tensor is always > 0.
# If negative pivots are encountered, it means that the input matrix is not positive definite.
# And matrix D is a 2x2 block diagonal matrix.
self.assertTrue((actual_pivots > 0).all())
# Construct a 1x1 block diagonal matrix D from factors.
actual_D = torch.diag_embed(actual_factors.diagonal(0, -2, -1))
def T(x):
return x.mH if hermitian else x.mT
A_reconstructed = actual_L @ actual_D @ T(actual_L)
def symmetric(A):
return A.tril() + A.tril(-1).mT
self.assertEqual(symmetric(A) if not hermitian else A, A_reconstructed)
# Now test against SciPy implementation
if TEST_SCIPY:
from scipy.linalg import ldl as scipy_ldl
A_np = A.cpu().numpy()
np_dtype = A_np.dtype
scipy_ldl_batched = np.vectorize(
lambda x: scipy_ldl(x, hermitian=hermitian, lower=True),
otypes=[np_dtype, np_dtype, np.dtype('int64')],
signature='(m,m)->(m,m),(m,m),(m)')
expected = scipy_ldl_batched(A_np)
expected_L, expected_D, expected_pivots = expected
if expected_pivots.ndim > 1:
permuted_expected_L = np.stack(
[expected_L[i][expected_pivots[i], :] for i in range(expected_pivots.shape[0])]
)
else:
permuted_expected_L = expected_L[expected_pivots, :]
self.assertEqual(actual_L, permuted_expected_L)
self.assertEqual(actual_D, expected_D)
else:
self.assertEqual(actual_factors.shape, A.shape)
self.assertEqual(actual_pivots.shape, A.shape[:-1])
self.assertEqual(info.shape, A.shape[:-2])
# hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+
magma_254_available = self.device_type == 'cuda' and _get_magma_version() >= (2, 5, 4)
hermitians = (True, False) if dtype.is_complex and (self.device_type == 'cpu' or magma_254_available) else (False,)
shapes = (5,)
batches = ((), (4,),)
for shape, batch, hermitian in itertools.product(shapes, batches, hermitians):
run_test(shape, batch, hermitian)
@skipCUDAIfNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@skipCUDAIfRocm
@dtypes(*floating_and_complex_types())
def test_ldl_solve(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, nrhs, hermitian):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
B = make_tensor((*A.shape[:-1], nrhs), dtype=dtype, device=device)
factors, pivots, info = torch.linalg.ldl_factor_ex(A, hermitian=hermitian)
X = torch.linalg.ldl_solve(factors, pivots, B, hermitian=hermitian)
def symmetric(A):
return A.tril() + A.tril(-1).mT
# verify A @ X == B
expected_B = symmetric(A) @ X if not hermitian else A @ X
self.assertEqual(B, expected_B)
# hermitian=True is not supported on CUDA yet
hermitians = (True, False) if dtype.is_complex and self.device_type == 'cpu' else (False,)
shapes = (5,)
batches = ((), (4,), (2, 2))
nrhss = (1, 7)
for shape, batch, nrhs, hermitian in itertools.product(shapes, batches, nrhss, hermitians):
run_test(shape, batch, nrhs, hermitian)
@onlyCUDA
@skipCUDAIfNoMagma
@skipCUDAIfNoCusolver
@setLinalgBackendsToDefaultFinally
def test_preferred_linalg_library(self):
# The main purpose of this test is to make sure these "backend" calls work normally without raising exceptions.
x = torch.randint(2, 5, (2, 4, 4), device='cuda', dtype=torch.double)
torch.backends.cuda.preferred_linalg_library('cusolver')
out1 = torch.linalg.inv(x)
torch.backends.cuda.preferred_linalg_library('magma')
out2 = torch.linalg.inv(x)
torch.backends.cuda.preferred_linalg_library('default')
# Although linalg preferred flags doesn't affect CPU currently,
# we set this to make sure the flag can switch back to default normally.
out_ref = torch.linalg.inv(x.cpu())
self.assertEqual(out_ref, out1.cpu())
self.assertEqual(out1, out2)
@onlyCUDA
@unittest.skipIf(not blaslt_supported_device(), "blasLt not supported on current device")
@setBlasBackendsToDefaultFinally
def test_preferred_blas_library(self):
# The main purpose of this test is to make sure these "backend" calls work normally without raising exceptions.
m1 = torch.randint(2, 5, (2048, 2400), device='cuda', dtype=torch.float)
m2 = torch.randint(2, 5, (128, 2400), device='cuda', dtype=torch.float)
torch.backends.cuda.preferred_blas_library('cublaslt')
out1 = torch.nn.functional.linear(m1, m2)
torch.backends.cuda.preferred_blas_library('cublas')
out2 = torch.nn.functional.linear(m1, m2)
# Although blas preferred flags doesn't affect CPU currently,
# we set this to make sure the flag can switch back to default normally.
out_ref = torch.nn.functional.linear(m1.cpu(), m2.cpu())
self.assertEqual(out1, out2)
self.assertEqual(out_ref, out2.cpu())
@onlyCUDA
@skipIfRocmArch(NAVI_ARCH)
@skipCUDAIfNotRocm
@unittest.skipIf(not blaslt_supported_device(), "blasLt not supported on current device")
@setBlasBackendsToDefaultFinally
def test_ck_blas_library(self):
m1 = torch.randint(2, 5, (7168, 8192), device='cuda', dtype=torch.float)
m2 = torch.randint(2, 5, (1280, 8192), device='cuda', dtype=torch.float)
torch.backends.cuda.preferred_blas_library('ck')
ck_out = torch.nn.functional.linear(m1, m2)
cpu_out = torch.nn.functional.linear(m1.cpu(), m2.cpu())
self.assertEqual(ck_out, cpu_out)
def test_permute_matmul(self):
a = torch.ones([2, 5, 24, 24])
b = torch.ones([3, 2, 5, 24, 24])
c = a.permute(0, 1, 3, 2).matmul(b)
self.assertEqual([c.min(), c.max(), c.sum()], [24, 24, 414720])
def test_lower_precision_accumulation_with_ref_path(self):
# fix https://github.com/pytorch/pytorch/issues/95125
# and https://github.com/pytorch/pytorch/issues/83863
# for bf16 accumulation in gemm ref path
def check_correctness(fn, dtype, *args):
expected = fn(*args).to(dtype=dtype)
with torch.backends.mkldnn.flags(enabled=False):
def test():
lower_args = (arg.to(dtype=dtype) for arg in args)
tmp_result = fn(*lower_args)
return tmp_result
c = test()
assert (torch.all(c == expected)), "Incorrect result with\n" \
f"expected: {expected}\n" \
f"got: {c}\n"
# test matmul
for dtype in [torch.bfloat16, torch.half]:
for transa in [True, False]:
for transb in [True, False]:
a = torch.ones(300, 300)
b = torch.ones(300, 300)
if transa:
a = a.transpose(0, 1).contiguous().transpose(0, 1)
if transb:
b = b.transpose(0, 1).contiguous().transpose(0, 1)
check_correctness(torch.matmul, dtype, a, b)
# test bmm
a = torch.ones(1, 1, 300)
b = torch.ones(1, 300, 1)
check_correctness(torch.bmm, torch.bfloat16, a, b)
check_correctness(torch.bmm, torch.half, a, b)
# test baddbmm
a = torch.ones(1, 1, 300)
b = torch.ones(1, 300, 1)
c = torch.ones(1, 1, 1)
check_correctness(torch.baddbmm, torch.bfloat16, c, a, b)
check_correctness(torch.baddbmm, torch.half, c, a, b)
# test mv/addmv
for dtype in [torch.bfloat16, torch.half]:
for trans in [True, False]:
c = torch.ones(300) * -300
a = torch.ones(300, 300)
if trans:
a = a.transpose(0, 1).contiguous().transpose(0, 1)
b = torch.ones(300)
check_correctness(torch.mv, dtype, a, b)
check_correctness(torch.addmv, dtype, c, a, b)
# test dot
a = torch.ones(300)
b = torch.ones(300)
check_correctness(torch.dot, torch.bfloat16, a, b)
check_correctness(torch.dot, torch.half, a, b)
@dtypes(torch.float, torch.half, torch.bfloat16)
@parametrize("transpose_a", [True, False])
@parametrize("transpose_b", [True, False])
@parametrize("alpha", [0.0, 0.2, 1.0])
@parametrize("beta", [0.0, 0.5, 1.0])
def test_addmm_mv(self, device, dtype, transpose_a, transpose_b, alpha, beta):
def gen_mat(w, h, use_transpose: bool = False):
if not use_transpose:
return torch.rand(w, h, dtype=dtype, device=device)
return torch.rand(h, w, dtype=dtype, device=device).t()
# Regression tests for https://github.com/pytorch/pytorch/issues/136299
# Should only expose problems on aarch64, but let's be thorough
m, n , k = 1, 8, 32
A = gen_mat(m, k, transpose_a)
B = gen_mat(k, n, transpose_b)
C = torch.ones(m, n, dtype=dtype, device=device)
rc = torch.addmm(C, A, B, alpha=alpha, beta=beta)
ref = alpha * A @ B + beta * C
self.assertEqual(rc, ref)
@dtypes(torch.float, torch.half, torch.bfloat16)
@largeTensorTest('16GB')
def test_matmul_mv(self, device, dtype):
# Regression test for https://github.com/pytorch/pytorch/issues/150637
# Such matrix will take more than 4Gb in memory
n = 50_000
A = torch.ones(n, n, dtype=dtype, device=device)
B = torch.rand(n, dtype=dtype, device=device)
C = torch.matmul(A, B)
self.assertEqual(C, B.sum().expand(B.shape))
@onlyCUDA
@largeTensorTest("40GB")
def test_triu_tril_large_matrix_64bit(self, device):
"""
Test triu/tril with large matrices requiring 64-bit indexing.
Regression test for https://github.com/pytorch/pytorch/issues/136611
"""
# 100k x 100k matrix with 10B elements requires 64-bit indexing
q_len = 100000
causal_mask = torch.full((q_len, q_len), float('-inf'), device=device, dtype=torch.float32)
causal_mask.triu_(1)
# Verify row 42950 is correct (previously failed due to int32 overflow at row*col)
row_42950 = causal_mask[42950]
num_zeros = (row_42950 == 0.0).sum().item()
expected_zeros = 42951
self.assertEqual(num_zeros, expected_zeros)
# Verify last row is correct
last_row = causal_mask[-1]
self.assertTrue((last_row == 0.0).all())
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
def test_triu_tril_extreme_k_values(self, device, dtype):
"""
Test triu/tril with extreme k values to verify overflow fix.
Regression test for https://github.com/pytorch/pytorch/pull/153240
"""
# Create test matrices
a = make_tensor((5, 5), dtype=dtype, device=device)
# Test extreme positive k value
k_max = 9223372036854775807
result_triu_max = torch.triu(a, k_max)
result_tril_max = torch.tril(a, k_max)
# With k = INT64_MAX, triu should return all zeros (since i + k will exceed matrix bounds for all i,j)
# and tril should return the full matrix (since i + k + 1 will exceed matrix bounds for all i,j)
expected_triu_max = torch.zeros_like(a)
expected_tril_max = a.clone()
self.assertEqual(result_triu_max, expected_triu_max)
self.assertEqual(result_tril_max, expected_tril_max)
# Test extreme negative k value
k_min = -9223372036854775808
result_triu_min = torch.triu(a, k_min)
result_tril_min = torch.tril(a, k_min)
# With k = INT64_MIN, triu should return the full matrix (since i + k will be negative for all i,j)
# and tril should return all zeros (since i + k + 1 will be negative for all i,j)
expected_triu_min = a.clone()
expected_tril_min = torch.zeros_like(a)
self.assertEqual(result_triu_min, expected_triu_min)
self.assertEqual(result_tril_min, expected_tril_min)
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 1e-4})
def test_1_sized_with_0_strided(self, device, dtype):
a = make_tensor((8, 1, 64), dtype=dtype, device=device)
a_strided = torch.as_strided(a, size=[8, 1, 64], stride=[64, 0, 1])
b = make_tensor((8, 64, 512), dtype=dtype, device=device)
b_strided = torch.as_strided(b, size=[8, 64, 512], stride=[64, 1, 512])
res = torch.bmm(a_strided, b_strided)
expect = torch.from_numpy(
a_strided.cpu().numpy() @ b_strided.cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res)
@onlyCUDA
def test_logaddexp_cpu_vs_cuda_complex(self, device):
# test logaddexp with complex values produce the same values (up to machine precision) on cpu and CUDA.
input_real = torch.tensor([0.052, -0.2115, 0.6913], dtype=torch.float64)
input_img = torch.tensor([-0.3229, -0.8374, 0.8391], dtype=torch.float64)
input_complex = torch.complex(input_real, input_img).cuda()
other_real = torch.tensor([0.2550, 0.8769, -0.4884], dtype=torch.float64)
other_img = torch.tensor([0.6063, 0.4343, -1.4166], dtype=torch.float64)
other_complex = torch.complex(other_real, other_img).cuda()
out_gpu = torch.logaddexp(input=input_complex, other=other_complex)
out_cpu = torch.logaddexp(input=input_complex.cpu(), other=other_complex.cpu())
torch.testing.assert_close(out_gpu.cpu(), out_cpu, rtol=1e-12, atol=1e-14)
# test extreme cases (infty, -infty, and nan) are handled the same between cuda and cpu
input_complex = torch.complex(torch.tensor(float('inf')), torch.tensor(float('inf')))
other_complex = torch.complex(torch.tensor(float('inf')), torch.tensor(float('inf')))
out_gpu = torch.logaddexp(input=input_complex, other=other_complex)
out_cpu = torch.logaddexp(input=input_complex.cpu(), other=other_complex.cpu())
self.assertEqual(out_gpu.cpu(), out_cpu)
input_complex = torch.complex(torch.tensor(float('inf')), torch.tensor(float('inf')))
other_complex = torch.complex(torch.tensor(float('inf')), torch.tensor(-float('inf')))
out_gpu = torch.logaddexp(input=input_complex, other=other_complex)
out_cpu = torch.logaddexp(input=input_complex.cpu(), other=other_complex.cpu())
self.assertEqual(out_gpu.cpu(), out_cpu)
input_complex = torch.complex(torch.tensor(-float('inf')), torch.tensor(float('inf')))
other_complex = torch.complex(torch.tensor(float('inf')), torch.tensor(float('inf')))
out_gpu = torch.logaddexp(input=input_complex, other=other_complex)
out_cpu = torch.logaddexp(input=input_complex.cpu(), other=other_complex.cpu())
self.assertEqual(out_gpu.cpu(), out_cpu)
input_complex = torch.complex(torch.tensor(-float('inf')), torch.tensor(float('inf')))
other_complex = torch.complex(torch.tensor(-float('inf')), torch.tensor(float('inf')))
out_gpu = torch.logaddexp(input=input_complex, other=other_complex)
out_cpu = torch.logaddexp(input=input_complex.cpu(), other=other_complex.cpu())
self.assertEqual(out_gpu.cpu(), out_cpu)
input_complex = torch.complex(torch.tensor(-float('inf')), torch.tensor(float('inf')))
other_complex = torch.complex(torch.tensor(-float('inf')), torch.tensor(2.))
out_gpu = torch.logaddexp(input=input_complex, other=other_complex)
out_cpu = torch.logaddexp(input=input_complex.cpu(), other=other_complex.cpu())
self.assertEqual(out_gpu.cpu(), out_cpu)
input_complex = torch.complex(torch.tensor(2.), torch.tensor(float('inf')))
other_complex = torch.complex(torch.tensor(float('inf')), torch.tensor(float('inf')))
out_gpu = torch.logaddexp(input=input_complex, other=other_complex)
out_cpu = torch.logaddexp(input=input_complex.cpu(), other=other_complex.cpu())
self.assertEqual(out_gpu.cpu(), out_cpu)
input_complex = torch.complex(torch.tensor(float('nan')), torch.tensor(float('inf')))
other_complex = torch.complex(torch.tensor(float('inf')), torch.tensor(float('inf')))
out_gpu = torch.logaddexp(input=input_complex, other=other_complex)
out_cpu = torch.logaddexp(input=input_complex.cpu(), other=other_complex.cpu())
self.assertEqual(out_gpu.cpu(), out_cpu)
instantiate_device_type_tests(TestLinalg, globals())
if __name__ == '__main__':
TestCase._default_dtype_check_enabled = True
run_tests()
| TestLinalg |
python | readthedocs__readthedocs.org | readthedocs/projects/views/mixins.py | {
"start": 3032,
"end": 4731
} | class ____:
"""Helpers to import a Project."""
def finish_import_project(self, request, project):
"""
Perform last steps to import a project into Read the Docs.
- Add the user from request as maintainer
- Send Django Signal
- Trigger initial build
It requires the Project was already saved into the DB.
:param request: Django Request object
:param project: Project instance just imported (already saved)
:param tags: tags to add to the project
"""
project.users.add(request.user)
log.info(
"Project imported.",
project_slug=project.slug,
user_username=request.user.username,
)
# TODO: this signal could be removed, or used for sync task
project_import.send(sender=project, request=request)
self.trigger_initial_build(project, request.user)
def trigger_initial_build(self, project, user):
"""
Trigger initial build after project is imported.
:param project: project's documentation to be built
:returns: Celery AsyncResult promise
"""
update_docs, build = prepare_build(project)
if (update_docs, build) == (None, None):
return None
from readthedocs.oauth.tasks import attach_webhook
task_promise = chain(
# TODO: Remove user_pk on the next release,
# it's used just to keep backward compatibility with the old task signature.
attach_webhook.si(project.pk, user.pk),
update_docs,
)
async_result = task_promise.apply_async()
return async_result
| ProjectImportMixin |
python | Farama-Foundation__Gymnasium | docs/tutorials/training_agents/mujoco_reinforce.py | {
"start": 6155,
"end": 12098
} | class ____:
"""REINFORCE algorithm."""
def __init__(self, obs_space_dims: int, action_space_dims: int):
"""Initializes an agent that learns a policy via REINFORCE algorithm [1]
to solve the task at hand (Inverted Pendulum v4).
Args:
obs_space_dims: Dimension of the observation space
action_space_dims: Dimension of the action space
"""
# Hyperparameters
self.learning_rate = 1e-4 # Learning rate for policy optimization
self.gamma = 0.99 # Discount factor
self.eps = 1e-6 # small number for mathematical stability
self.probs = [] # Stores probability values of the sampled action
self.rewards = [] # Stores the corresponding rewards
self.net = Policy_Network(obs_space_dims, action_space_dims)
self.optimizer = torch.optim.AdamW(self.net.parameters(), lr=self.learning_rate)
def sample_action(self, state: np.ndarray) -> float:
"""Returns an action, conditioned on the policy and observation.
Args:
state: Observation from the environment
Returns:
action: Action to be performed
"""
state = torch.tensor(np.array([state]))
action_means, action_stddevs = self.net(state)
# create a normal distribution from the predicted
# mean and standard deviation and sample an action
distrib = Normal(action_means[0] + self.eps, action_stddevs[0] + self.eps)
action = distrib.sample()
prob = distrib.log_prob(action)
action = action.numpy()
self.probs.append(prob)
return action
def update(self):
"""Updates the policy network's weights."""
running_g = 0
gs = []
# Discounted return (backwards) - [::-1] will return an array in reverse
for R in self.rewards[::-1]:
running_g = R + self.gamma * running_g
gs.insert(0, running_g)
deltas = torch.tensor(gs)
log_probs = torch.stack(self.probs).squeeze()
# Update the loss with the mean log probability and deltas
# Now, we compute the correct total loss by taking the sum of the element-wise products.
loss = -torch.sum(log_probs * deltas)
# Update the policy network
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Empty / zero out all episode-centric/related variables
self.probs = []
self.rewards = []
# %%
# Now lets train the policy using REINFORCE to master the task of Inverted Pendulum.
#
# Following is the overview of the training procedure
#
# for seed in random seeds
# reinitialize agent
#
# for episode in range of max number of episodes
# until episode is done
# sample action based on current observation
#
# take action and receive reward and next observation
#
# store action take, its probability, and the observed reward
# update the policy
#
# Note: Deep RL is fairly brittle concerning random seed in a lot of common use cases (https://spinningup.openai.com/en/latest/spinningup/spinningup.html).
# Hence it is important to test out various seeds, which we will be doing.
# Create and wrap the environment
env = gym.make("InvertedPendulum-v4")
wrapped_env = gym.wrappers.RecordEpisodeStatistics(env, 50) # Records episode-reward
total_num_episodes = int(5e3) # Total number of episodes
# Observation-space of InvertedPendulum-v4 (4)
obs_space_dims = env.observation_space.shape[0]
# Action-space of InvertedPendulum-v4 (1)
action_space_dims = env.action_space.shape[0]
rewards_over_seeds = []
for seed in [1, 2, 3, 5, 8]: # Fibonacci seeds
# set seed
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Reinitialize agent every seed
agent = REINFORCE(obs_space_dims, action_space_dims)
reward_over_episodes = []
for episode in range(total_num_episodes):
# gymnasium v26 requires users to set seed while resetting the environment
obs, info = wrapped_env.reset(seed=seed)
done = False
while not done:
action = agent.sample_action(obs)
# Step return type - `tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]`
# These represent the next observation, the reward from the step,
# if the episode is terminated, if the episode is truncated and
# additional info from the step
obs, reward, terminated, truncated, info = wrapped_env.step(action)
agent.rewards.append(reward)
# End the episode when either truncated or terminated is true
# - truncated: The episode duration reaches max number of timesteps
# - terminated: Any of the state space values is no longer finite.
#
done = terminated or truncated
reward_over_episodes.append(wrapped_env.return_queue[-1])
agent.update()
if episode % 1000 == 0:
avg_reward = int(np.mean(wrapped_env.return_queue))
print("Episode:", episode, "Average Reward:", avg_reward)
rewards_over_seeds.append(reward_over_episodes)
# %%
# Plot learning curve
# ~~~~~~~~~~~~~~~~~~~
#
df1 = pd.DataFrame(rewards_over_seeds).melt()
df1.rename(columns={"variable": "episodes", "value": "reward"}, inplace=True)
sns.set(style="darkgrid", context="talk", palette="rainbow")
sns.lineplot(x="episodes", y="reward", data=df1).set(
title="REINFORCE for InvertedPendulum-v4"
)
plt.show()
# %%
# .. image:: /_static/img/tutorials/mujoco_reinforce_fig4.png
#
# Author: Siddarth Chandrasekar
#
# License: MIT License
#
# References
# ~~~~~~~~~~
#
# [1] Williams, Ronald J.. “Simple statistical gradient-following
# algorithms for connectionist reinforcement learning.” Machine Learning 8
# (2004): 229-256.
#
| REINFORCE |
python | pypa__pipenv | pipenv/vendor/click/parser.py | {
"start": 8052,
"end": 19067
} | class ____:
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx: t.Optional["Context"] = None) -> None:
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args: bool = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options: bool = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt: t.Dict[str, Option] = {}
self._long_opt: t.Dict[str, Option] = {}
self._opt_prefixes = {"-", "--"}
self._args: t.List[Argument] = []
def add_option(
self,
obj: "CoreOption",
opts: t.Sequence[str],
dest: t.Optional[str],
action: t.Optional[str] = None,
nargs: int = 1,
const: t.Optional[t.Any] = None,
) -> None:
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``append_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(obj, opts, dest, action=action, nargs=nargs, const=const)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(
self, obj: "CoreArgument", dest: t.Optional[str], nargs: int = 1
) -> None:
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
self._args.append(Argument(obj, dest=dest, nargs=nargs))
def parse_args(
self, args: t.List[str]
) -> t.Tuple[t.Dict[str, t.Any], t.List[str], t.List["CoreParameter"]]:
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state: ParsingState) -> None:
pargs, args = _unpack_args(
state.largs + state.rargs, [x.nargs for x in self._args]
)
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state: ParsingState) -> None:
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == "--":
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(
self, opt: str, explicit_value: t.Optional[str], state: ParsingState
) -> None:
if opt not in self._long_opt:
from difflib import get_close_matches
possibilities = get_close_matches(opt, self._long_opt)
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
value = self._get_value_from_state(opt, option, state)
elif explicit_value is not None:
raise BadOptionUsage(
opt, _("Option {name!r} does not take a value.").format(name=opt)
)
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg: str, state: ParsingState) -> None:
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(f"{prefix}{ch}", self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
value = self._get_value_from_state(opt, option, state)
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we recombine the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append(f"{prefix}{''.join(unknown_options)}")
def _get_value_from_state(
self, option_name: str, option: Option, state: ParsingState
) -> t.Any:
nargs = option.nargs
if len(state.rargs) < nargs:
if option.obj._flag_needs_value:
# Option allows omitting the value.
value = _flag_needs_value
else:
raise BadOptionUsage(
option_name,
ngettext(
"Option {name!r} requires an argument.",
"Option {name!r} requires {nargs} arguments.",
nargs,
).format(name=option_name, nargs=nargs),
)
elif nargs == 1:
next_rarg = state.rargs[0]
if (
option.obj._flag_needs_value
and isinstance(next_rarg, str)
and next_rarg[:1] in self._opt_prefixes
and len(next_rarg) > 1
):
# The next arg looks like the start of an option, don't
# use it as the value if omitting the value is allowed.
value = _flag_needs_value
else:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
return value
def _process_opts(self, arg: str, state: ParsingState) -> None:
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if "=" in arg:
long_opt, explicit_value = arg.split("=", 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
self._match_short_opt(arg, state)
return
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| OptionParser |
python | FactoryBoy__factory_boy | tests/test_alchemy.py | {
"start": 8216,
"end": 8739
} | class ____(TransactionTestCase):
def test_create_raises_exception_when_no_session_was_set(self):
with self.assertRaises(RuntimeError):
NoSessionFactory.create()
def test_build_does_not_raises_exception_when_no_session_was_set(self):
NoSessionFactory.reset_sequence() # Make sure we start at test ID 0
inst0 = NoSessionFactory.build()
inst1 = NoSessionFactory.build()
self.assertEqual(inst0.id, 0)
self.assertEqual(inst1.id, 1)
| SQLAlchemyNoSessionTestCase |
python | docker__docker-py | docker/models/images.py | {
"start": 6472,
"end": 18023
} | class ____(Collection):
model = Image
def build(self, **kwargs):
"""
Build an image and return it. Similar to the ``docker build``
command. Either ``path`` or ``fileobj`` must be set.
If you already have a tar file for the Docker build context (including
a Dockerfile), pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is also
compressed, set ``encoding`` to the correct value (e.g ``gzip``).
If you want to get the raw output of the build, use the
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the
low-level API.
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (list): A list of images used for build cache
resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``.
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
(tuple): The first item is the :py:class:`Image` object for the
image that was built. The second item is a generator of the
build logs as JSON-decoded objects.
Raises:
:py:class:`docker.errors.BuildError`
If there is an error during the build.
:py:class:`docker.errors.APIError`
If the server returns any other error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
if isinstance(resp, str):
return self.get(resp)
last_event = None
image_id = None
result_stream, internal_stream = itertools.tee(json_stream(resp))
for chunk in internal_stream:
if 'error' in chunk:
raise BuildError(chunk['error'], result_stream)
if 'stream' in chunk:
match = re.search(
r'(^Successfully built |sha256:)([0-9a-f]+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
last_event = chunk
if image_id:
return (self.get(image_id), result_stream)
raise BuildError(last_event or 'Unknown', result_stream)
def get(self, name):
"""
Gets an image.
Args:
name (str): The name of the image.
Returns:
(:py:class:`Image`): The image.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_image(name))
def get_registry_data(self, name, auth_config=None):
"""
Gets the registry data for an image.
Args:
name (str): The name of the image.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(:py:class:`RegistryData`): The data object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return RegistryData(
image_name=name,
attrs=self.client.api.inspect_distribution(name, auth_config),
client=self.client,
collection=self,
)
def list(self, name=None, all=False, filters=None):
"""
List images on the server.
Args:
name (str): Only show images belonging to the repository ``name``
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.images(name=name, all=all, filters=filters)
return [self.get(r["Id"]) for r in resp]
def load(self, data):
"""
Load an image that was previously saved using
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.load_image(data)
images = []
for chunk in resp:
if 'stream' in chunk:
match = re.search(
r'(^Loaded image ID: |^Loaded image: )(.+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
images.append(image_id)
if 'errorDetail' in chunk:
raise ImageLoadError(chunk['errorDetail']['message'])
return [self.get(i) for i in images]
def pull(self, repository, tag=None, all_tags=False, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
If ``tag`` is ``None`` or empty, it is set to ``latest``.
If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
tags will be pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
low-level API.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags
Returns:
(:py:class:`Image` or list): The image that has been pulled.
If ``all_tags`` is True, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> # Pull the image tagged `latest` in the busybox repo
>>> image = client.images.pull('busybox')
>>> # Pull all tags in the busybox repo
>>> images = client.images.pull('busybox', all_tags=True)
"""
repository, image_tag = parse_repository_tag(repository)
tag = tag or image_tag or 'latest'
if 'stream' in kwargs:
warnings.warn(
'`stream` is not a valid parameter for this method'
' and will be overridden',
stacklevel=1,
)
del kwargs['stream']
pull_log = self.client.api.pull(
repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
)
for _ in pull_log:
# We don't do anything with the logs, but we need
# to keep the connection alive and wait for the image
# to be pulled.
pass
if not all_tags:
sep = '@' if tag.startswith('sha256:') else ':'
return self.get(f'{repository}{sep}{tag}')
return self.list(repository)
def push(self, repository, tag=None, **kwargs):
return self.client.api.push(repository, tag=tag, **kwargs)
push.__doc__ = APIClient.push.__doc__
def remove(self, *args, **kwargs):
self.client.api.remove_image(*args, **kwargs)
remove.__doc__ = APIClient.remove_image.__doc__
def search(self, *args, **kwargs):
return self.client.api.search(*args, **kwargs)
search.__doc__ = APIClient.search.__doc__
def prune(self, filters=None):
return self.client.api.prune_images(filters=filters)
prune.__doc__ = APIClient.prune_images.__doc__
def prune_builds(self, *args, **kwargs):
return self.client.api.prune_builds(*args, **kwargs)
prune_builds.__doc__ = APIClient.prune_builds.__doc__
def normalize_platform(platform, engine_info):
if platform is None:
platform = {}
if 'os' not in platform:
platform['os'] = engine_info['Os']
if 'architecture' not in platform:
platform['architecture'] = engine_info['Arch']
return platform
| ImageCollection |
python | openai__openai-python | src/openai/resources/containers/containers.py | {
"start": 16506,
"end": 17183
} | class ____:
def __init__(self, containers: Containers) -> None:
self._containers = containers
self.create = _legacy_response.to_raw_response_wrapper(
containers.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
containers.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
containers.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> FilesWithRawResponse:
return FilesWithRawResponse(self._containers.files)
| ContainersWithRawResponse |
python | pydantic__pydantic | pydantic/mypy.py | {
"start": 50001,
"end": 50448
} | class ____(TypeTranslator):
"""A type translator used to change type of Any's, if explicit."""
def __init__(self, type_of_any: int) -> None:
self._type_of_any = type_of_any
super().__init__()
def visit_any(self, t: AnyType) -> Type: # noqa: D102
if t.type_of_any == TypeOfAny.explicit:
return t.copy_modified(type_of_any=self._type_of_any)
else:
return t
| ChangeExplicitTypeOfAny |
python | getsentry__sentry | src/sentry/integrations/utils/sync.py | {
"start": 1301,
"end": 8774
} | class ____(StrEnum):
EMAIL = "email"
EXTERNAL_ACTOR = "external_actor"
def should_sync_assignee_inbound(
organization: Organization | RpcOrganization, provider: str
) -> bool:
if provider == "github":
return features.has("organizations:integrations-github-project-management", organization)
return True
def _get_user_id(projects_by_user: dict[int, set[int]], group: Group) -> int | None:
user_ids = [
user_id
for user_id, project_ids in projects_by_user.items()
for project_id in project_ids
if group.project_id == project_id
]
if not user_ids:
return None
return user_ids[0]
def _get_affected_groups(
integration: RpcIntegration | Integration, external_issue_key: str | None
) -> QuerySet[Group]:
orgs_with_sync_enabled = where_should_sync(integration, "inbound_assignee")
return Group.objects.get_groups_by_external_issue(
integration,
orgs_with_sync_enabled,
external_issue_key,
)
def _handle_deassign(
groups: QuerySet[Group], integration: RpcIntegration | Integration
) -> QuerySet[Group]:
for group in groups:
if not should_sync_assignee_inbound(group.organization, integration.provider):
continue
GroupAssignee.objects.deassign(
group,
assignment_source=AssignmentSource.from_integration(integration),
)
return groups
def _handle_assign(
affected_groups: QuerySet[Group],
integration: RpcIntegration | Integration,
users: list[RpcUser],
) -> list[Group]:
groups_assigned: list[Group] = []
users_by_id = {user.id: user for user in users}
projects_by_user = Project.objects.get_by_users(users)
logger = logging.getLogger(f"sentry.integrations.{integration.provider}")
for group in affected_groups:
if not should_sync_assignee_inbound(group.organization, integration.provider):
continue
user_id = _get_user_id(projects_by_user, group)
user = users_by_id.get(user_id) if user_id is not None else None
if user:
logger.info(
"sync_group_assignee_inbound._handle_assign.assigning.group",
extra={
"group_id": group.id,
"user_id": user.id,
},
)
GroupAssignee.objects.assign(
group,
user,
assignment_source=AssignmentSource.from_integration(integration),
)
groups_assigned.append(group)
else:
logger.info(
"sync_group_assignee_inbound._handle_assign.user_not_found",
extra={
"group_id": group.id,
"user_id": user_id,
},
)
return groups_assigned
@region_silo_function
def sync_group_assignee_inbound_by_external_actor(
integration: RpcIntegration | Integration,
external_user_name: str,
external_issue_key: str | None,
assign: bool = True,
) -> QuerySet[Group] | list[Group]:
logger = logging.getLogger(f"sentry.integrations.{integration.provider}")
with ProjectManagementEvent(
action_type=ProjectManagementActionType.INBOUND_ASSIGNMENT_SYNC, integration=integration
).capture() as lifecycle:
affected_groups = _get_affected_groups(integration, external_issue_key)
log_context = {
"integration_id": integration.id,
"external_user_name": external_user_name,
"issue_key": external_issue_key,
"method": AssigneeInboundSyncMethod.EXTERNAL_ACTOR.value,
"assign": assign,
}
if not affected_groups:
logger.info("no-affected-groups", extra=log_context)
return []
if not assign:
return _handle_deassign(affected_groups, integration)
external_actors = ExternalActor.objects.filter(
provider=EXTERNAL_PROVIDERS_REVERSE[ExternalProviderEnum(integration.provider)].value,
external_name__iexact=external_user_name,
integration_id=integration.id,
user_id__isnull=False,
).values_list("user_id", flat=True)
user_ids = [
external_actor for external_actor in external_actors if external_actor is not None
]
log_context["user_ids"] = user_ids
logger.info("sync_group_assignee_inbound_by_external_actor.user_ids", extra=log_context)
users = user_service.get_many_by_id(ids=user_ids)
groups_assigned = _handle_assign(affected_groups, integration, users)
if len(groups_assigned) != len(affected_groups):
log_context["groups_assigned_count"] = len(groups_assigned)
log_context["affected_groups_count"] = len(affected_groups)
lifecycle.record_halt(
ProjectManagementHaltReason.SYNC_INBOUND_ASSIGNEE_NOT_FOUND, extra=log_context
)
return groups_assigned
@region_silo_function
def sync_group_assignee_inbound(
integration: RpcIntegration | Integration,
email: str | None,
external_issue_key: str | None,
assign: bool = True,
) -> QuerySet[Group] | list[Group]:
"""
Given an integration, user email address and an external issue key,
assign linked groups to matching users. Checks project membership.
Returns a list of groups that were successfully assigned.
"""
logger = logging.getLogger(f"sentry.integrations.{integration.provider}")
with ProjectManagementEvent(
action_type=ProjectManagementActionType.INBOUND_ASSIGNMENT_SYNC, integration=integration
).capture() as lifecycle:
affected_groups = _get_affected_groups(integration, external_issue_key)
log_context = {
"integration_id": integration.id,
"email": email,
"issue_key": external_issue_key,
"method": AssigneeInboundSyncMethod.EMAIL.value,
"assign": assign,
}
if not affected_groups:
logger.info("no-affected-groups", extra=log_context)
return []
if not assign:
return _handle_deassign(affected_groups, integration)
users = user_service.get_many_by_email(emails=[email], is_verified=True)
groups_assigned = _handle_assign(affected_groups, integration, users)
if len(groups_assigned) != len(affected_groups):
lifecycle.record_halt(
ProjectManagementHaltReason.SYNC_INBOUND_ASSIGNEE_NOT_FOUND, extra=log_context
)
return groups_assigned
def sync_group_assignee_outbound(
group: Group,
user_id: int | None,
assign: bool = True,
assignment_source: AssignmentSource | None = None,
) -> None:
from sentry.models.grouplink import GroupLink
external_issue_ids = GroupLink.objects.filter(
project_id=group.project_id, group_id=group.id, linked_type=GroupLink.LinkedType.issue
).values_list("linked_id", flat=True)
for external_issue_id in external_issue_ids:
sync_assignee_outbound.apply_async(
kwargs={
"external_issue_id": external_issue_id,
"user_id": user_id,
"assign": assign,
"assignment_source_dict": (
assignment_source.to_dict() if assignment_source else None
),
}
)
| AssigneeInboundSyncMethod |
python | cython__cython | Cython/Build/IpythonMagic.py | {
"start": 2156,
"end": 21453
} | class ____(Magics):
def __init__(self, shell):
super().__init__(shell)
self._reloads = {}
self._code_cache = {}
self._pyximport_installed = False
def _import_all(self, module):
mdict = module.__dict__
if '__all__' in mdict:
keys = mdict['__all__']
else:
keys = [k for k in mdict if not k.startswith('_')]
for k in keys:
try:
self.shell.push({k: mdict[k]})
except KeyError:
msg = "'module' object has no attribute '%s'" % k
raise AttributeError(msg)
@cell_magic
def cython_inline(self, line, cell):
"""Compile and run a Cython code cell using Cython.inline.
This magic simply passes the body of the cell to Cython.inline
and returns the result. If the variables `a` and `b` are defined
in the user's namespace, here is a simple example that returns
their sum::
%%cython_inline
return a+b
For most purposes, we recommend the usage of the `%%cython` magic.
"""
locs = self.shell.user_global_ns
globs = self.shell.user_ns
return cython_inline(cell, locals=locs, globals=globs)
@cell_magic
def cython_pyximport(self, line, cell):
"""Compile and import a Cython code cell using pyximport.
The contents of the cell are written to a `.pyx` file in the current
working directory, which is then imported using `pyximport`. This
magic requires a module name to be passed::
%%cython_pyximport modulename
def f(x):
return 2.0*x
The compiled module is then imported and all of its symbols are
injected into the user's namespace. For most purposes, we recommend
the usage of the `%%cython` magic.
"""
module_name = line.strip()
if not module_name:
raise ValueError('module name must be given')
fname = module_name + '.pyx'
with open(fname, 'w', encoding='utf-8') as f:
f.write(cell)
if 'pyximport' not in sys.modules or not self._pyximport_installed:
import pyximport
pyximport.install()
self._pyximport_installed = True
if module_name in self._reloads:
module = self._reloads[module_name]
# Note: reloading extension modules is not actually supported
# (requires PEP-489 reinitialisation support).
# Don't know why this should ever have worked as it reads here.
# All we really need to do is to update the globals below.
#reload(module)
else:
__import__(module_name)
module = sys.modules[module_name]
self._reloads[module_name] = module
self._import_all(module)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-a', '--annotate', action='store_const', const='default', dest='annotate',
help="Produce a colorized HTML version of the source."
)
@magic_arguments.argument(
'--annotate-fullc', action='store_const', const='fullc', dest='annotate',
help="Produce a colorized HTML version of the source "
"which includes entire generated C/C++-code."
)
@magic_arguments.argument(
'-+', '--cplus', action='store_true', default=False,
help="Output a C++ rather than C file."
)
@magic_arguments.argument(
'-3', dest='language_level', action='store_const', const=3, default=None,
help="Select Python 3 syntax."
)
@magic_arguments.argument(
'-2', dest='language_level', action='store_const', const=2, default=None,
help="Select Python 2 syntax."
)
@magic_arguments.argument(
'-f', '--force', action='store_true', default=False,
help="Force the compilation of a new module, even if the source has been "
"previously compiled."
)
@magic_arguments.argument(
'-c', '--compile-args', action='append', default=[],
help="Extra flags to pass to compiler via the `extra_compile_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'--link-args', action='append', default=[],
help="Extra flags to pass to linker via the `extra_link_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'-l', '--lib', action='append', default=[],
help="Add a library to link the extension against (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-n', '--name',
help="Specify a name for the Cython module."
)
@magic_arguments.argument(
'-L', dest='library_dirs', metavar='dir', action='append', default=[],
help="Add a path to the list of library directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-I', '--include', action='append', default=[],
help="Add a path to the list of include directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-S', '--src', action='append', default=[],
help="Add a path to the list of src files (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'--pgo', dest='pgo', action='store_true', default=False,
help=("Enable profile guided optimisation in the C compiler. "
"Compiles the cell twice and executes it in between to generate a runtime profile.")
)
@magic_arguments.argument(
'--verbose', dest='quiet', action='store_false', default=True,
help=("Print debug information like generated .c/.cpp file location "
"and exact gcc/g++ command invoked.")
)
@cell_magic
def cython(self, line, cell):
"""Compile and import everything from a Cython code cell.
The contents of the cell are written to a `.pyx` file in the
directory returned by `get_ipython_cache_dir()/cython` using a filename
with the hash of the code. This file is then cythonized and compiled.
The resulting module is imported and all of its symbols are injected
into the user's namespace. The usage is similar to that of
`%%cython_pyximport` but you don't have to pass a module name::
%%cython
def f(x):
return 2.0*x
To compile OpenMP codes, pass the required `--compile-args`
and `--link-args`. For example with gcc::
%%cython --compile-args=-fopenmp --link-args=-fopenmp
...
To enable profile guided optimisation, pass the ``--pgo`` option.
Note that the cell itself needs to take care of establishing a suitable
profile when executed. This can be done by implementing the functions to
optimise, and then calling them directly in the same cell on some realistic
training data like this::
%%cython --pgo
def critical_function(data):
for item in data:
...
# execute function several times to build profile
from somewhere import some_typical_data
for _ in range(100):
critical_function(some_typical_data)
In Python 3.5 and later, you can distinguish between the profile and
non-profile runs as follows::
if "_pgo_" in __name__:
... # execute critical code here
"""
args = magic_arguments.parse_argstring(self.cython, line)
code = cell if cell.endswith('\n') else cell + '\n'
lib_dir = os.path.join(get_ipython_cache_dir(), 'cython')
key = (code, line, sys.version_info, sys.executable, cython_version)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if args.pgo:
key += ('pgo',)
if args.force:
# Force a new module name by adding the current time to the
# key which is hashed to determine the module name.
key += (time.time(),)
if args.name:
module_name = str(args.name) # no-op in Py3
else:
module_name = "_cython_magic_" + hashlib.sha256(str(key).encode('utf-8')).hexdigest()
html_file = os.path.join(lib_dir, module_name + '.html')
module_path = os.path.join(lib_dir, module_name + self.so_ext)
have_module = os.path.isfile(module_path)
need_cythonize = args.pgo or not have_module
if args.annotate:
if not os.path.isfile(html_file):
need_cythonize = True
extension = None
if need_cythonize:
extensions = self._cythonize(module_name, code, lib_dir, args, quiet=args.quiet)
if extensions is None:
# Compilation failed and printed error message
return None
assert len(extensions) == 1
extension = extensions[0]
self._code_cache[key] = module_name
if args.pgo:
self._profile_pgo_wrapper(extension, lib_dir)
def print_compiler_output(stdout, stderr, where):
# On windows, errors are printed to stdout, we redirect both to sys.stderr.
print_captured(stdout, where, "Content of stdout:\n")
print_captured(stderr, where, "Content of stderr:\n")
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
self._build_extension(
extension, lib_dir, pgo_step_name='use' if args.pgo else None, quiet=args.quiet)
except (distutils.errors.CompileError, distutils.errors.LinkError):
# Build failed, print error message from compiler/linker
print_compiler_output(get_stdout(), get_stderr(), sys.stderr)
return None
# Build seems ok, but we might still want to show any warnings that occurred
print_compiler_output(get_stdout(), get_stderr(), sys.stdout)
module = load_dynamic(module_name, module_path)
self._import_all(module)
if args.annotate:
try:
with open(html_file, encoding='utf-8') as f:
annotated_html = f.read()
except OSError as e:
# File could not be opened. Most likely the user has a version
# of Cython before 0.15.1 (when `cythonize` learned the
# `force` keyword argument) and has already compiled this
# exact source without annotation.
print('Cython completed successfully but the annotated '
'source could not be read.', file=sys.stderr)
print(e, file=sys.stderr)
else:
return display.HTML(self.clean_annotated_html(annotated_html))
def _profile_pgo_wrapper(self, extension, lib_dir):
"""
Generate a .c file for a separate extension module that calls the
module init function of the original module. This makes sure that the
PGO profiler sees the correct .o file of the final module, but it still
allows us to import the module under a different name for profiling,
before recompiling it into the PGO optimised module. Overwriting and
reimporting the same shared library is not portable.
"""
extension = copy.copy(extension) # shallow copy, do not modify sources in place!
module_name = extension.name
pgo_module_name = '_pgo_' + module_name
pgo_wrapper_c_file = os.path.join(lib_dir, pgo_module_name + '.c')
with open(pgo_wrapper_c_file, 'w', encoding='utf-8') as f:
f.write(textwrap.dedent("""
#include "Python.h"
extern PyMODINIT_FUNC PyInit_%(module_name)s(void);
PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void); /*proto*/
PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void) {
return PyInit_%(module_name)s();
}
""" % {'module_name': module_name, 'pgo_module_name': pgo_module_name}))
extension.sources = extension.sources + [pgo_wrapper_c_file] # do not modify in place!
extension.name = pgo_module_name
self._build_extension(extension, lib_dir, pgo_step_name='gen')
# import and execute module code to generate profile
so_module_path = os.path.join(lib_dir, pgo_module_name + self.so_ext)
load_dynamic(pgo_module_name, so_module_path)
def _cythonize(self, module_name, code, lib_dir, args, quiet=True):
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
c_include_dirs = args.include
c_src_files = list(map(str, args.src))
if 'numpy' in code:
import numpy
c_include_dirs.append(numpy.get_include())
with open(pyx_file, 'w', encoding='utf-8') as f:
f.write(code)
extension = Extension(
name=module_name,
sources=[pyx_file] + c_src_files,
include_dirs=c_include_dirs,
library_dirs=args.library_dirs,
extra_compile_args=args.compile_args,
extra_link_args=args.link_args,
libraries=args.lib,
language='c++' if args.cplus else 'c',
)
try:
opts = dict(
quiet=quiet,
annotate=args.annotate,
force=True,
language_level=min(3, sys.version_info[0]),
)
if args.language_level is not None:
assert args.language_level in (2, 3)
opts['language_level'] = args.language_level
return cythonize([extension], **opts)
except CompileError:
return None
def _build_extension(self, extension, lib_dir, temp_dir=None, pgo_step_name=None, quiet=True):
build_extension = self._get_build_extension(
extension, lib_dir=lib_dir, temp_dir=temp_dir, pgo_step_name=pgo_step_name)
old_threshold = None
try:
if not quiet:
old_threshold = distutils.log.set_threshold(distutils.log.DEBUG)
build_extension.run()
finally:
if not quiet and old_threshold is not None:
distutils.log.set_threshold(old_threshold)
def _add_pgo_flags(self, build_extension, step_name, temp_dir):
compiler_type = build_extension.compiler.compiler_type
if compiler_type == 'unix':
compiler_cmd = build_extension.compiler.compiler_so
# TODO: we could try to call "[cmd] --version" for better insights
if not compiler_cmd:
pass
elif 'clang' in compiler_cmd or 'clang' in compiler_cmd[0]:
compiler_type = 'clang'
elif 'icc' in compiler_cmd or 'icc' in compiler_cmd[0]:
compiler_type = 'icc'
elif 'gcc' in compiler_cmd or 'gcc' in compiler_cmd[0]:
compiler_type = 'gcc'
elif 'g++' in compiler_cmd or 'g++' in compiler_cmd[0]:
compiler_type = 'gcc'
config = PGO_CONFIG.get(compiler_type)
orig_flags = []
if config and step_name in config:
flags = [f.format(TEMPDIR=temp_dir) for f in config[step_name]]
for extension in build_extension.extensions:
orig_flags.append((extension.extra_compile_args, extension.extra_link_args))
extension.extra_compile_args = extension.extra_compile_args + flags
extension.extra_link_args = extension.extra_link_args + flags
else:
print("No PGO %s configuration known for C compiler type '%s'" % (step_name, compiler_type),
file=sys.stderr)
return orig_flags
@property
def so_ext(self):
"""The extension suffix for compiled modules."""
try:
return self._so_ext
except AttributeError:
self._so_ext = self._get_build_extension().get_ext_filename('')
return self._so_ext
def _clear_distutils_mkpath_cache(self):
"""clear distutils mkpath cache
prevents distutils from skipping re-creation of dirs that have been removed
"""
try:
from distutils.dir_util import _path_created
except ImportError:
pass
else:
_path_created.clear()
def _get_build_extension(self, extension=None, lib_dir=None, temp_dir=None,
pgo_step_name=None, _build_ext=build_ext):
self._clear_distutils_mkpath_cache()
dist = Distribution()
config_files = dist.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
dist.parse_config_files(config_files)
if not temp_dir:
temp_dir = lib_dir
add_pgo_flags = self._add_pgo_flags
if pgo_step_name:
base_build_ext = _build_ext
class _build_ext(_build_ext):
def build_extensions(self):
add_pgo_flags(self, pgo_step_name, temp_dir)
base_build_ext.build_extensions(self)
build_extension = _build_ext(dist)
build_extension.finalize_options()
if temp_dir:
build_extension.build_temp = temp_dir
if lib_dir:
build_extension.build_lib = lib_dir
if extension is not None:
build_extension.extensions = [extension]
return build_extension
@staticmethod
def clean_annotated_html(html, include_style=True):
"""Clean up the annotated HTML source.
Strips the link to the generated C or C++ file, which we do not
present to the user.
Returns an HTML snippet (no <html>, <head>, or <body>),
containing only the style tag(s) and _contents_ of the body,
appropriate for embedding multiple times in cell output.
"""
# extract CSS and body, rather than full HTML document
chunks = []
if include_style:
styles = re.findall("<style.*</style>", html, re.MULTILINE | re.DOTALL)
chunks.extend(styles)
# extract body
body = re.search(
r"<body[^>]*>(.+)</body>", html, re.MULTILINE | re.DOTALL
).group(1)
# exclude link to generated file
r = re.compile('<p>Raw output: <a href="(.*)">(.*)</a>')
for line in body.splitlines():
if not r.match(line):
chunks.append(line)
return "\n".join(chunks)
__doc__ = __doc__.format(
# rST doesn't see the -+ flag as part of an option list, so we
# hide it from the module-level docstring.
CYTHON_DOC=dedent(CythonMagics.cython.__doc__
.replace('-+, --cplus', '--cplus ')),
CYTHON_INLINE_DOC=dedent(CythonMagics.cython_inline.__doc__),
CYTHON_PYXIMPORT_DOC=dedent(CythonMagics.cython_pyximport.__doc__),
)
| CythonMagics |
python | pytorch__pytorch | benchmarks/dynamo/pr_time_benchmarks/benchmarks/symint_sum.py | {
"start": 69,
"end": 1450
} | class ____(BenchmarkBase):
N = 200
def __init__(self, use_loop=False):
self.use_loop = use_loop
super().__init__(
category="symint_sum",
backend="inductor",
device="cpu",
)
def name(self):
if self.use_loop:
return f"{self.category()}_loop"
return self.category()
def description(self):
return "see https://docs.google.com/document/d/11xJXl1etSmefUxPiVyk885e0Dl-4o7QwxYcPiMIo2iY/edit"
def _prepare_once(self):
torch._dynamo.config.capture_scalar_outputs = True
torch.manual_seed(0)
self.splits = torch.randint(10, (self.N,))
def _prepare(self):
torch._dynamo.reset()
def _work(self):
@torch.compile(fullgraph=True)
def f(a):
xs = a.tolist()
y = 0
if self.use_loop:
for i in xs:
y += i
else:
y = sum(xs)
return torch.tensor(y)
f(self.splits)
def main():
result_path = sys.argv[1]
Benchmark(
use_loop=False
).enable_compile_time_instruction_count().collect_all().append_results(result_path)
Benchmark(
use_loop=True
).enable_compile_time_instruction_count().collect_all().append_results(result_path)
if __name__ == "__main__":
main()
| Benchmark |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_superfences.py | {
"start": 29080,
"end": 30509
} | class ____(util.MdCase):
"""Test fence ids and classes with attribute lists and with no Pygments."""
extension = ['pymdownx.highlight', 'pymdownx.superfences', 'markdown.extensions.attr_list']
extension_configs = {
"pymdownx.highlight": {
"use_pygments": False
}
}
def test_classes(self):
"""Test extra classes."""
self.check_markdown(
r'''
```{.python .more}
import test
```
''',
r'''
<pre class="highlight"><code class="language-python more">import test</code></pre>
''',
True
)
def test_id(self):
"""Test extra id."""
self.check_markdown(
r'''
```{.python #id}
import test
```
''',
r'''
<pre class="highlight"><code id="id" class="language-python">import test</code></pre>
''',
True
)
def test_attr(self):
"""Test extra attributes."""
self.check_markdown(
r'''
```{.python #id attr="test"}
import test
```
''',
r'''
<pre class="highlight"><code id="id" class="language-python" attr="test">import test</code></pre>
''',
True
)
| TestSuperFencesClassesIdsAttrListNoPygments |
python | astropy__astropy | astropy/wcs/wcsapi/high_level_api.py | {
"start": 1315,
"end": 11717
} | class ____(metaclass=abc.ABCMeta):
"""
Abstract base class for the high-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def low_level_wcs(self):
"""
Returns a reference to the underlying low-level WCS object.
"""
@abc.abstractmethod
def pixel_to_world(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates (represented by
high-level objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` for pixel
indexing and ordering conventions.
"""
def array_index_to_world(self, *index_arrays):
"""
Convert array indices to world coordinates (represented by Astropy
objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_index_to_world_values` for
pixel indexing and ordering conventions.
"""
return self.pixel_to_world(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to pixel
coordinates.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` for pixel
indexing and ordering conventions.
"""
def world_to_array_index(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to array
indices.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_array_index_values` for
pixel indexing and ordering conventions. The indices should be returned
as rounded integers.
"""
if self.low_level_wcs.pixel_n_dim == 1:
return _toindex(self.world_to_pixel(*world_objects))
else:
return tuple(
np.asarray(x)
for x in _toindex(self.world_to_pixel(*world_objects)[::-1])
)
def high_level_objects_to_values(*world_objects, low_level_wcs):
"""
Convert the input high level object to low level values.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert the high level objects
(such as `~.SkyCoord`) to low level "values" which should be scalars or
Numpy arrays.
This is used in `.HighLevelWCSMixin.world_to_pixel`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_objects: object
High level coordinate objects.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
serialized_classes = low_level_wcs.world_axis_object_classes
components = low_level_wcs.world_axis_object_components
# Deserialize world_axis_object_classes using the default order
classes = OrderedDict()
for key in default_order(components):
if low_level_wcs.serialized_classes:
classes[key] = deserialize_class(serialized_classes[key], construct=False)
else:
classes[key] = serialized_classes[key]
# Check that the number of classes matches the number of inputs
if len(world_objects) != len(classes):
raise ValueError(
f"Number of world inputs ({len(world_objects)}) does not match expected"
f" ({len(classes)})"
)
# Determine whether the classes are uniquely matched, that is we check
# whether there is only one of each class.
world_by_key = {}
unique_match = True
for w in world_objects:
matches = []
for key, (klass, *_) in classes.items():
if isinstance(w, klass):
matches.append(key)
if len(matches) == 1:
world_by_key[matches[0]] = w
else:
unique_match = False
break
# If the match is not unique, the order of the classes needs to match,
# whereas if all classes are unique, we can still intelligently match
# them even if the order is wrong.
objects = {}
if unique_match:
for key, (klass, args, kwargs, *rest) in classes.items():
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(world_by_key[key], SkyCoord):
if "frame" in kwargs:
objects[key] = world_by_key[key].transform_to(kwargs["frame"])
else:
objects[key] = world_by_key[key]
else:
objects[key] = klass_gen(world_by_key[key], *args, **kwargs)
else:
for ikey, key in enumerate(classes):
klass, args, kwargs, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
w = world_objects[ikey]
if not isinstance(w, klass):
raise ValueError(
"Expected the following order of world arguments:"
f" {', '.join([k.__name__ for (k, *_) in classes.values()])}"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(w, SkyCoord):
if "frame" in kwargs:
objects[key] = w.transform_to(kwargs["frame"])
else:
objects[key] = w
else:
objects[key] = klass_gen(w, *args, **kwargs)
# We now extract the attributes needed for the world values
world = []
for key, _, attr in components:
if callable(attr):
world.append(attr(objects[key]))
else:
world.append(rec_getattr(objects[key], attr))
# Check the type of the return values - should be scalars or plain Numpy
# arrays, not e.g. Quantity. Note that we deliberately use type(w) because
# we don't want to match Numpy subclasses.
for w in world:
if not isinstance(w, numbers.Number) and not type(w) == np.ndarray:
raise TypeError(
f"WCS world_axis_object_components results in "
f"values which are not scalars or plain Numpy "
f"arrays (got {type(w)})"
)
return world
def values_to_high_level_objects(*world_values, low_level_wcs):
"""
Convert low level values into high level objects.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert low level "values"
`~.Quantity` objects, to high level objects (such as `~.SkyCoord`).
This is used in `.HighLevelWCSMixin.pixel_to_world`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_values: object
Low level, "values" representations of the world coordinates.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Check the type of the input values - should be scalars or plain Numpy
# arrays, not e.g. Quantity. Note that we deliberately use type(w) because
# we don't want to match Numpy subclasses.
for w in world_values:
if not isinstance(w, numbers.Number) and not type(w) == np.ndarray:
raise TypeError(
f"Expected world coordinates as scalars or plain Numpy "
f"arrays (got {type(w)})"
)
# Cache the classes and components since this may be expensive
components = low_level_wcs.world_axis_object_components
classes = low_level_wcs.world_axis_object_classes
# Deserialize classes
if low_level_wcs.serialized_classes:
classes_new = {}
for key, value in classes.items():
classes_new[key] = deserialize_class(value, construct=False)
classes = classes_new
args = defaultdict(list)
kwargs = defaultdict(dict)
for i, (key, attr, _) in enumerate(components):
if isinstance(attr, str):
kwargs[key][attr] = world_values[i]
else:
while attr > len(args[key]) - 1:
args[key].append(None)
args[key][attr] = world_values[i]
result = []
for key in default_order(components):
klass, ar, kw, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
result.append(klass_gen(*args[key], *ar, **kwargs[key], **kw))
return result
| BaseHighLevelWCS |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-upstage/llama_index/readers/upstage/base.py | {
"start": 2843,
"end": 13712
} | class ____(BaseReader):
"""
Upstage Layout Analysis Reader.
To use, you should have the environment variable `UPSTAGE_API_KEY`
set with your API key or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from llama_index.readers.file import UpstageLayoutAnalysisReader
reader = UpstageLayoutAnalysisReader()
docs = reader.load_data("path/to/file.pdf")
"""
def __init__(
self,
api_key: Optional[str] = None,
use_ocr: bool = False,
exclude: list = ["header", "footer"],
):
"""
Initializes an instance of the Upstage class.
Args:
api_key (str, optional): The API key for accessing the Upstage API.
Defaults to None, in which case it will be
fetched from the environment variable
`UPSTAGE_API_KEY`.
use_ocr (bool, optional): Extract text from images in the document.
Defaults to False. (Use text info in PDF file)
exclude (list, optional): Exclude specific elements from the output.
Defaults to [] (all included).
"""
self.api_key = get_from_param_or_env(
"UPSTAGE_API_KEY", api_key, "UPSTAGE_API_KEY"
)
self.use_ocr = use_ocr
self.exclude = exclude
validate_api_key(self.api_key)
def _get_response(self, files: Dict) -> List:
"""
Sends a POST request to the API endpoint with the provided files and
returns the response.
Args:
files (dict): A dictionary containing the files to be sent in the request.
Returns:
dict: The JSON response from the API.
Raises:
ValueError: If there is an error in the API call.
"""
try:
headers = {"Authorization": f"Bearer {self.api_key}"}
options = {"ocr": self.use_ocr}
response = requests.post(
LAYOUT_ANALYSIS_URL, headers=headers, files=files, data=options
)
response.raise_for_status()
result = response.json().get("elements", [])
return [
element for element in result if element["category"] not in self.exclude
]
except requests.RequestException as req_err:
# Handle any request-related exceptions
print(f"Request Exception: {req_err}")
raise ValueError(f"Failed to send request to Upstage API: {req_err}")
except json.JSONDecodeError as json_err:
# Handle JSON decode errors
print(f"JSON Decode Error: {json_err}")
raise ValueError(f"Failed to decode JSON response: {json_err}")
def _split_and_request(
self,
full_docs: fitzDocument,
start_page: int,
num_pages: int,
) -> List:
"""
Splits the full pdf document into partial pages and sends a request to the
server.
Args:
full_docs (str): The full document to be split and requested.
start_page (int): The starting page number for splitting the document.
num_pages (int, optional): The number of pages to split the document
into.
Defaults to DEFAULT_NUMBER_OF_PAGE.
Returns:
response: The response from the server.
"""
with fitz.open() as chunk_pdf:
chunk_pdf.insert_pdf(
full_docs,
from_page=start_page,
to_page=start_page + num_pages - 1,
)
pdf_bytes = chunk_pdf.write()
with io.BytesIO(pdf_bytes) as f:
return self._get_response({"document": f})
def _element_document(
self, element: Dict, output_type: OutputType, split: SplitType
) -> Document:
"""
Converts an elements into a Document object.
Args:
element (Dict): The element to be converted into a Document object.
output_type (OutputType): The output type of the document.
split (SplitType): The split type of the document.
Returns:
Document: A Document object representing the element with its content
and metadata.
"""
return Document(
text=(parse_output(element, output_type)),
extra_info={
"page": element["page"],
"id": element["id"],
"type": output_type,
"split": split,
"bounding_box": json.dumps(element["bounding_box"]),
},
)
def _page_document(
self, elements: List, output_type: OutputType, split: SplitType
) -> List[Document]:
"""
Combines elements with the same page number into a single Document object.
Args:
elements (List): A list of elements containing page numbers.
output_type (OutputType): The output type of the document.
split (SplitType): The split type of the document.
Returns:
List[Document]: A list of Document objects, each representing a page
with its content and metadata.
"""
_docs = []
pages = sorted({x["page"] for x in elements})
page_group = [
[element for element in elements if element["page"] == x] for x in pages
]
for group in page_group:
page_content = " ".join(
[parse_output(element, output_type) for element in group]
)
_docs.append(
Document(
text=page_content.strip(),
extra_info={
"page": group[0]["page"],
"type": output_type,
"split": split,
},
)
)
return _docs
def lazy_load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
output_type: Union[OutputType, dict] = "html",
split: SplitType = "none",
) -> Iterable[Document]:
"""
Load data from a file or list of files lazily.
Args:
file_path (Union[str, Path, List[str], List[Path]]): The path or list of paths to the file(s) to load.
output_type (Union[OutputType, dict], optional): The desired output type. Defaults to "html".
- If a dict is provided, it should be in the format {"category": "output_type", ...}.
- The category could possibly include the following:
- "paragraph"
- "caption"
- "table"
- "figure"
- "equation"
- "footer"
- "header"
- The output_type can be "text" or "html".
split (SplitType, optional): The type of splitting to apply. Defaults to "none".
Returns:
List[Document]: A list of Document objects containing the loaded data.
Raises:
ValueError: If an invalid split type is provided or if file_path is required.
"""
# Check if the file path is a list of paths
if isinstance(file_path, list):
for path in file_path:
docs = self.load_data(path, output_type, split)
yield from docs
else:
num_pages = DEFAULT_NUMBER_OF_PAGE
if not file_path:
raise ValueError("file_path is required.")
validate_file_path(file_path)
full_docs = fitz.open(file_path)
number_of_pages = full_docs.page_count
if split == "none":
if full_docs.is_pdf:
result = ""
start_page = 0
for _ in range(number_of_pages):
if start_page >= number_of_pages:
break
elements = self._split_and_request(
full_docs, start_page, num_pages
)
for element in elements:
result += parse_output(element, output_type)
start_page += num_pages
else:
with open(file_path, "rb") as f:
elements = self._get_response({"document": f})
result = ""
for element in elements:
result += parse_output(element, output_type)
yield Document(
text=result,
extra_info={
"total_pages": number_of_pages,
"type": output_type,
"split": split,
},
)
elif split == "element":
if full_docs.is_pdf:
start_page = 0
for _ in range(number_of_pages):
if start_page >= number_of_pages:
break
elements = self._split_and_request(
full_docs, start_page, num_pages
)
for element in elements:
yield self._element_document(element, output_type, split)
start_page += num_pages
else:
with open(file_path, "rb") as f:
elements = self._get_response({"document": f})
for element in elements:
yield self._element_document(element, output_type, split)
elif split == "page":
if full_docs.is_pdf:
start_page = 0
for _ in range(number_of_pages):
if start_page >= number_of_pages:
break
elements = self._split_and_request(
full_docs, start_page, num_pages
)
yield from self._page_document(elements, output_type, split)
start_page += num_pages
else:
with open(file_path, "rb") as f:
elements = self._get_response({"document": f})
yield from self._page_document(elements, output_type, split)
else:
raise ValueError(f"Invalid split type: {split}")
| UpstageLayoutAnalysisReader |
python | ZoranPandovski__al-go-rithms | data_structures/linked-queue/python/linked_queue.py | {
"start": 231,
"end": 2074
} | class ____:
#-------------------------------------------------------------------
# Construct Queue object self as an empty Queue object.
def __init__(self):
self._first = None # Reference to first _Node
self._last = None # Reference to last _Node
self._length = 0 # Number of items
#-------------------------------------------------------------------
# Return True if self is empty, and False otherwise.
def isEmpty(self):
return self._first is None
#-------------------------------------------------------------------
# Add item to the end of self.
def enqueue(self, item):
oldLast = self._last
self._last = _Node(item, None)
if self.isEmpty():
self._first = self._last
else:
oldLast.next = self._last
self._length += 1
#-------------------------------------------------------------------
# Remove the first item of self and return it.
def dequeue(self):
item = self._first.item
self._first = self._first.next
if self.isEmpty():
self._last = None
self._length -= 1
return item
#-------------------------------------------------------------------
# Return the number of items in self.
def __len__(self):
return self._length
#-------------------------------------------------------------------
# Return a string representation of self.
def __str__(self):
s = ''
cur = self._first
while cur is not None:
s += str(cur.item) + ' '
cur = cur.next
return s
#----------------------------------------------------------------------
# A _Node object references an item and a next _Node object.
# A Queue object is composed of _Node objects.
| Queue |
python | scipy__scipy | scipy/signal/tests/test_ltisys.py | {
"start": 32581,
"end": 33225
} | class ____:
def test_initialization(self):
# Check that all initializations work
ZerosPolesGain(1, 1, 1)
ZerosPolesGain([1], [2], 1)
ZerosPolesGain(np.array([1]), np.array([2]), 1)
def test_conversion(self):
#Check the conversion functions
s = ZerosPolesGain(1, 2, 3)
assert isinstance(s.to_ss(), StateSpace)
assert isinstance(s.to_tf(), TransferFunction)
assert isinstance(s.to_zpk(), ZerosPolesGain)
# Make sure copies work
assert ZerosPolesGain(s) is not s
assert s.to_zpk() is not s
@make_xp_test_case(abcd_normalize)
| TestZerosPolesGain |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 37908,
"end": 38110
} | class ____(Blockwise):
_parameters = ["frame", "index"]
_defaults = {"index": 0}
@staticmethod
def operation(value, index=0):
return pd.Series(value, index=[index])
| ScalarToSeries |
python | kamyu104__LeetCode-Solutions | Python/find-triangular-sum-of-an-array.py | {
"start": 1699,
"end": 2075
} | class ____(object):
def triangularSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
nCr = 1
for i in xrange(len(nums)):
result = (result+nCr*nums[i])%10
nCr *= (len(nums)-1)-i
nCr //= i+1
return result
# Time: O(n^2)
# Space: O(1)
# simulation
| Solution2 |
python | getsentry__sentry | src/sentry/users/api/serializers/user.py | {
"start": 12223,
"end": 12322
} | class ____(UserSerializerResponse):
permissions: Sequence[str]
| DetailedSelfUserSerializerResponse |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 85623,
"end": 85878
} | class ____(BaseModel, extra="forbid"):
id: "ExtendedPointId" = Field(..., description="")
vector: "VectorStruct" = Field(..., description="")
payload: Optional["Payload"] = Field(default=None, description="Payload values (optional)")
| PointStruct |
python | django-extensions__django-extensions | django_extensions/management/commands/sqldiff.py | {
"start": 48245,
"end": 51206
} | class ____(SQLDiff):
can_detect_notnull_differ = True
can_detect_unsigned_differ = False
def load_null(self):
for table_name in self.db_tables:
# sqlite does not support tablespaces
tablespace = "public"
# index, column_name, column_type, nullable, default_value
# see: https://www.sqlite.org/pragma.html#pragma_table_info
for table_info in self.sql_to_dict(
"PRAGMA table_info('%s');" % table_name, []
):
key = (tablespace, table_name, table_info["name"])
self.null[key] = not table_info["notnull"]
def load_unsigned(self):
pass
# Unique does not seem to be implied on Sqlite for Primary_key's
# if this is more generic among databases this might be useful
# to add to the superclass's find_unique_missing_in_db method
def find_unique_missing_in_db(
self, meta, table_indexes, table_constraints, table_name, skip_list=None
):
if skip_list is None:
skip_list = []
unique_columns = [
field.db_column or field.attname
for field in all_local_fields(meta)
if field.unique
]
for constraint in table_constraints.values():
columns = constraint["columns"]
if len(columns) == 1:
column = columns[0]
if column in unique_columns and (
constraint["unique"] or constraint["primary_key"]
):
skip_list.append(column)
unique_together = self.get_unique_together(meta)
db_unique_columns = normalize_together(
[v["columns"] for v in table_constraints.values() if v["unique"]]
)
for unique_columns in unique_together:
if unique_columns in db_unique_columns:
skip_list.append(unique_columns)
super().find_unique_missing_in_db(
meta, table_indexes, table_constraints, table_name, skip_list=skip_list
)
# Finding Indexes by using the get_indexes dictionary doesn't seem to work
# for sqlite.
def find_index_missing_in_db(
self, meta, table_indexes, table_constraints, table_name
):
pass
def find_index_missing_in_model(
self, meta, table_indexes, table_constraints, table_name
):
pass
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super().get_field_db_type(description, field, table_name)
if not db_type:
return None
if field:
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if (
self.strip_parameters(field_type) == "char"
and self.strip_parameters(db_type) == "varchar"
):
db_type = db_type.lstrip("var")
return db_type
| SqliteSQLDiff |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 47263,
"end": 51744
} | class ____(DefinedFunction):
r"""
Genocchi numbers / Genocchi polynomials / Genocchi function
The Genocchi numbers are a sequence of integers `G_n` that satisfy the
relation:
.. math:: \frac{-2t}{1 + e^{-t}} = \sum_{n=0}^\infty \frac{G_n t^n}{n!}
They are related to the Bernoulli numbers by
.. math:: G_n = 2 (1 - 2^n) B_n
and generalize like the Bernoulli numbers to the Genocchi polynomials and
function as
.. math:: \operatorname{G}(s, a) = 2 \left(\operatorname{B}(s, a) -
2^s \operatorname{B}\left(s, \frac{a+1}{2}\right)\right)
.. versionchanged:: 1.12
``genocchi(1)`` gives `-1` instead of `1`.
Examples
========
>>> from sympy import genocchi, Symbol
>>> [genocchi(n) for n in range(9)]
[0, -1, -1, 0, 1, 0, -3, 0, 17]
>>> n = Symbol('n', integer=True, positive=True)
>>> genocchi(2*n + 1)
0
>>> x = Symbol('x')
>>> genocchi(4, x)
-4*x**3 + 6*x**2 - 1
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic, lucas, partition, tribonacci
sympy.polys.appellseqs.genocchi_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Genocchi_number
.. [2] https://mathworld.wolfram.com/GenocchiNumber.html
.. [3] Peter Luschny, "An introduction to the Bernoulli function",
https://arxiv.org/abs/2009.06743
"""
@classmethod
def eval(cls, n, x=None):
if x is S.One:
return cls(n)
elif n.is_integer is False or n.is_nonnegative is False:
return
# Genocchi numbers
elif x is None:
if n.is_odd and (n-1).is_positive:
return S.Zero
elif n.is_Number:
return 2 * (1-S(2)**n) * bernoulli(n)
# Genocchi polynomials
elif n.is_Number:
return genocchi_poly(n, x)
def _eval_rewrite_as_bernoulli(self, n, x=1, **kwargs):
if x == 1 and n.is_integer and n.is_nonnegative:
return 2 * (1-S(2)**n) * bernoulli(n)
return 2 * (bernoulli(n, x) - 2**n * bernoulli(n, (x+1) / 2))
def _eval_rewrite_as_dirichlet_eta(self, n, x=1, **kwargs):
from sympy.functions.special.zeta_functions import dirichlet_eta
return -2*n * dirichlet_eta(1-n, x)
def _eval_is_integer(self):
if len(self.args) > 1 and self.args[1] != 1:
return
n = self.args[0]
if n.is_integer and n.is_nonnegative:
return True
def _eval_is_negative(self):
if len(self.args) > 1 and self.args[1] != 1:
return
n = self.args[0]
if n.is_integer and n.is_nonnegative:
if n.is_odd:
return fuzzy_not((n-1).is_positive)
return (n/2).is_odd
def _eval_is_positive(self):
if len(self.args) > 1 and self.args[1] != 1:
return
n = self.args[0]
if n.is_integer and n.is_nonnegative:
if n.is_zero or n.is_odd:
return False
return (n/2).is_even
def _eval_is_even(self):
if len(self.args) > 1 and self.args[1] != 1:
return
n = self.args[0]
if n.is_integer and n.is_nonnegative:
if n.is_even:
return n.is_zero
return (n-1).is_positive
def _eval_is_odd(self):
if len(self.args) > 1 and self.args[1] != 1:
return
n = self.args[0]
if n.is_integer and n.is_nonnegative:
if n.is_even:
return fuzzy_not(n.is_zero)
return fuzzy_not((n-1).is_positive)
def _eval_is_prime(self):
if len(self.args) > 1 and self.args[1] != 1:
return
n = self.args[0]
# only G_6 = -3 and G_8 = 17 are prime,
# but SymPy does not consider negatives as prime
# so only n=8 is tested
return (n-8).is_zero
def _eval_evalf(self, prec):
if all(i.is_number for i in self.args):
return self.rewrite(bernoulli)._eval_evalf(prec)
#----------------------------------------------------------------------------#
# #
# Andre numbers #
# #
#----------------------------------------------------------------------------#
| genocchi |
python | ray-project__ray | doc/source/serve/doc_code/grpc_proxy/user_defined_protos_pb2_grpc.py | {
"start": 3748,
"end": 6249
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def __call__(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/userdefinedprotos.UserDefinedService/__call__",
user__defined__protos__pb2.UserDefinedMessage.SerializeToString,
user__defined__protos__pb2.UserDefinedResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Multiplexing(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/userdefinedprotos.UserDefinedService/Multiplexing",
user__defined__protos__pb2.UserDefinedMessage2.SerializeToString,
user__defined__protos__pb2.UserDefinedResponse2.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Streaming(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/userdefinedprotos.UserDefinedService/Streaming",
user__defined__protos__pb2.UserDefinedMessage.SerializeToString,
user__defined__protos__pb2.UserDefinedResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| UserDefinedService |
python | pytorch__pytorch | test/test_datapipe.py | {
"start": 131328,
"end": 142107
} | class ____(TestCase):
r"""
Each `IterDataPipe` can only have one active iterator. Whenever a new iterator is created, older
iterators are invalidated. These tests aim to ensure `IterDataPipe` follows this behavior.
"""
def _check_single_iterator_invalidation_logic(self, source_dp: IterDataPipe):
r"""
Given a IterDataPipe, verifies that the iterator can be read, reset, and the creation of
a second iterator invalidates the first one.
"""
it1 = iter(source_dp)
self.assertEqual(list(range(10)), list(it1))
it1 = iter(source_dp)
self.assertEqual(
list(range(10)), list(it1)
) # A fresh iterator can be read in full again
it1 = iter(source_dp)
self.assertEqual(0, next(it1))
it2 = iter(source_dp) # This should invalidate `it1`
self.assertEqual(0, next(it2)) # Should read from the beginning again
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
def test_iterdatapipe_singleton_generator(self):
r"""
Testing for the case where IterDataPipe's `__iter__` is a generator function.
"""
# Functional Test: Check if invalidation logic is correct
source_dp: IterDataPipe = dp.iter.IterableWrapper(range(10))
self._check_single_iterator_invalidation_logic(source_dp)
# Functional Test: extend the test to a pipeline
dps = source_dp.map(_fake_fn).filter(_fake_filter_fn)
self._check_single_iterator_invalidation_logic(dps)
# Functional Test: multiple simultaneous references to the same DataPipe fails
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
for _ in zip(source_dp, source_dp):
pass
# Function Test: sequential references work
for _ in zip(list(source_dp), list(source_dp)):
pass
def test_iterdatapipe_singleton_self_next(self):
r"""
Testing for the case where IterDataPipe's `__iter__` returns `self` and there is a `__next__` method
Note that the following DataPipe by is singleton by default (because `__iter__` returns `self`).
"""
class _CustomIterDP_Self(IterDataPipe):
def __init__(self, iterable):
self.source = iterable
self.iterable = iter(iterable)
def __iter__(self):
self.reset()
return self
def __next__(self):
return next(self.iterable)
def reset(self):
self.iterable = iter(self.source)
# Functional Test: Check that every `__iter__` call returns the same object
source_dp = _CustomIterDP_Self(range(10))
res = list(source_dp)
it = iter(source_dp)
self.assertEqual(res, list(it))
# Functional Test: Check if invalidation logic is correct
source_dp = _CustomIterDP_Self(range(10))
self._check_single_iterator_invalidation_logic(source_dp)
self.assertEqual(
1, next(source_dp)
) # `source_dp` is still valid and can be read
# Functional Test: extend the test to a pipeline
source_dp = _CustomIterDP_Self(
dp.iter.IterableWrapper(range(10)).map(_fake_fn).filter(_fake_filter_fn)
)
self._check_single_iterator_invalidation_logic(source_dp)
self.assertEqual(
1, next(source_dp)
) # `source_dp` is still valid and can be read
# Functional Test: multiple simultaneous references to the same DataPipe fails
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
for _ in zip(source_dp, source_dp):
pass
def test_iterdatapipe_singleton_new_object(self):
r"""
Testing for the case where IterDataPipe's `__iter__` isn't a generator nor returns `self`,
and there isn't a `__next__` method.
"""
class _CustomIterDP(IterDataPipe):
def __init__(self, iterable):
self.iterable = iter(iterable)
def __iter__(self): # Note that this doesn't reset
return self.iterable # Intentionally not returning `self`
# Functional Test: Check if invalidation logic is correct
source_dp = _CustomIterDP(range(10))
it1 = iter(source_dp)
self.assertEqual(0, next(it1))
it2 = iter(source_dp)
self.assertEqual(1, next(it2))
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
# Functional Test: extend the test to a pipeline
source_dp = _CustomIterDP(
dp.iter.IterableWrapper(range(10)).map(_fake_fn).filter(_fake_filter_fn)
)
it1 = iter(source_dp)
self.assertEqual(0, next(it1))
it2 = iter(source_dp)
self.assertEqual(1, next(it2))
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
# Functional Test: multiple simultaneous references to the same DataPipe fails
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
for _ in zip(source_dp, source_dp):
pass
def test_iterdatapipe_singleton_buggy(self):
r"""
Buggy test case case where IterDataPipe's `__iter__` returns a new object, but also has
a `__next__` method.
"""
class _CustomIterDP(IterDataPipe):
def __init__(self, iterable):
self.source = iterable
self.iterable = iter(iterable)
def __iter__(self):
return iter(self.source) # Intentionally not returning `self`
def __next__(self):
return next(self.iterable)
# Functional Test: Check if invalidation logic is correct
source_dp = _CustomIterDP(range(10))
self._check_single_iterator_invalidation_logic(source_dp)
self.assertEqual(0, next(source_dp)) # `__next__` is unrelated with `__iter__`
# Functional Test: Special case to show `__next__` is unrelated with `__iter__`
source_dp = _CustomIterDP(range(10))
self.assertEqual(0, next(source_dp))
it1 = iter(source_dp)
self.assertEqual(0, next(it1))
self.assertEqual(1, next(source_dp))
it2 = iter(source_dp) # invalidates both `it1`
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
self.assertEqual(2, next(source_dp)) # not impacted by the creation of `it2`
self.assertEqual(
list(range(10)), list(it2)
) # `it2` still works because it is a new object
def test_iterdatapipe_singleton_constraint_multiple_outputs(self):
r"""
Testing for the case where IterDataPipe has multiple child DataPipes as outputs.
"""
# Functional Test: all previous related iterators should be invalidated when a new iterator
# is created from a ChildDataPipe
source_dp: IterDataPipe = dp.iter.IterableWrapper(range(10))
cdp1, cdp2 = source_dp.fork(num_instances=2)
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(list(range(10)), list(it1))
self.assertEqual(list(range(10)), list(it2))
it1, it2 = iter(cdp1), iter(cdp2)
with warnings.catch_warnings(record=True) as wa:
it3 = iter(cdp1) # This should invalidate `it1` and `it2`
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it2)
self.assertEqual(0, next(it3))
# The next line should not invalidate anything, as there was no new iterator created
# for `cdp2` after `it2` was invalidated
it4 = iter(cdp2)
self.assertEqual(1, next(it3)) # An error shouldn't be raised here
self.assertEqual(list(range(10)), list(it4))
# Functional Test: invalidation when a new iterator is created from `source_dp`
source_dp = dp.iter.IterableWrapper(range(10))
cdp1, cdp2 = source_dp.fork(num_instances=2)
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(list(range(10)), list(it1))
self.assertEqual(list(range(10)), list(it2))
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(0, next(it1))
self.assertEqual(0, next(it2))
it3 = iter(source_dp) # note that a new iterator is created from `source_dp`
self.assertEqual(
0, next(it3)
) # `it3` should invalidate `it1` and `it2` since they both use `source_dp`
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
self.assertEqual(1, next(it3))
# Function Test: Extending test to pipeline
source_dp = (
dp.iter.IterableWrapper(range(10)).map(_fake_fn).filter(_fake_filter_fn)
)
cdp1, cdp2 = source_dp.fork(num_instances=2)
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(list(range(10)), list(it1))
self.assertEqual(list(range(10)), list(it2))
it1, it2 = iter(cdp1), iter(cdp2)
with warnings.catch_warnings(record=True) as wa:
it3 = iter(cdp1) # This should invalidate `it1` and `it2`
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it2)
with warnings.catch_warnings(record=True) as wa:
it1, it2 = iter(cdp1), iter(cdp2)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"child DataPipes are not exhausted")
self.assertEqual(0, next(it1))
self.assertEqual(0, next(it2))
it3 = iter(source_dp) # note that a new iterator is created from `source_dp`
self.assertEqual(
0, next(it3)
) # `it3` should invalidate `it1` and `it2` since they both use `source_dp`
with self.assertRaisesRegex(RuntimeError, "This iterator has been invalidated"):
next(it1)
self.assertEqual(1, next(it3))
| TestIterDataPipeSingletonConstraint |
python | boto__boto3 | tests/functional/test_resource.py | {
"start": 733,
"end": 1430
} | class ____(unittest.TestCase):
def setUp(self):
self.botocore_session = botocore.session.get_session()
def add_new_method(self, name):
def handler(class_attributes, **kwargs):
class_attributes[name] = identity
return handler
def test_can_inject_method_onto_resource(self):
session = boto3.Session(botocore_session=self.botocore_session)
self.botocore_session.register(
'creating-resource-class.s3', self.add_new_method(name='my_method')
)
resource = session.resource('s3')
assert hasattr(resource, 'my_method')
assert resource.my_method('anything') == 'anything'
| TestResourceCustomization |
python | pytorch__pytorch | test/dynamo/cpython/3_13/seq_tests.py | {
"start": 2981,
"end": 3194
} | class ____:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
| IterGenExc |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/base_layer.py | {
"start": 129208,
"end": 130672
} | class ____(Layer):
"""Adds its inputs as a metric.
Attributes:
aggregation: 'mean' or None. How the inputs should be aggregated.
metric_name: The name to use for this metric.
"""
def __init__(self, aggregation=None, metric_name=None, **kwargs):
super(AddMetric, self).__init__(**kwargs)
self.aggregation = aggregation
self.metric_name = metric_name
def call(self, inputs):
self.add_metric(inputs, aggregation=self.aggregation, name=self.metric_name)
return inputs
def get_config(self):
config = super(AddMetric, self).get_config()
config.update({
'aggregation': self.aggregation,
'metric_name': self.metric_name
})
return config
def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list): # pylint: disable=unused-argument
"""Check the arguments to see if we are constructing a functional model."""
# We are constructing a functional model if any of the inputs
# are KerasTensors
return any(
isinstance(tensor, keras_tensor.KerasTensor)
for tensor in nest.flatten([inputs, args, kwargs]))
def _convert_numpy_or_python_types(x):
if isinstance(x, (np_arrays.ndarray, np.ndarray, float, int)):
return tensor_conversion.convert_to_tensor_v2_with_dispatch(x)
return x
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
| AddMetric |
python | pennersr__django-allauth | allauth/socialaccount/providers/tumblr/provider.py | {
"start": 366,
"end": 736
} | class ____(OAuthProvider):
id = "tumblr"
name = "Tumblr"
account_class = TumblrAccount
oauth_adapter_class = TumblrOAuthAdapter
def extract_uid(self, data):
return data["name"]
def extract_common_fields(self, data):
return dict(
first_name=data.get("name"),
)
provider_classes = [TumblrProvider]
| TumblrProvider |
python | haoel__leetcode | algorithms/python/SerializeAndDeserializeBST/serialize.py | {
"start": 0,
"end": 761
} | class ____:
def serialize(self, root):
preorder = []
def helper(node):
if node:
preorder.append(node.val)
helper(node.left)
helper(node.right)
helper(root)
return ' '.join(map(str, preorder))
def deserialize(self, data):
vals = collections.deque(int(val) for val in data.split())
def build(minval, maxval):
if vals and minval < vals[0] < maxval:
val = vals.popleft()
node = TreeNode(val)
node.left = build(minval, val)
node.right = build(val, maxval)
return node
return build(float('-infinity'), float('infinity')) | Codec |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 20020,
"end": 20182
} | class ____(BaseModel, extra="forbid"):
positive: "VectorInput" = Field(..., description="")
negative: "VectorInput" = Field(..., description="")
| ContextPair |
python | kamyu104__LeetCode-Solutions | Python/maximum-score-after-applying-operations-on-a-tree.py | {
"start": 54,
"end": 1116
} | class ____(object):
def maximumScoreAfterOperations(self, edges, values):
"""
:type edges: List[List[int]]
:type values: List[int]
:rtype: int
"""
def iter_dfs():
dp = [0]*len(values)
stk = [(1, 0, -1)]
while stk:
step, u, p = stk.pop()
if step == 1:
if len(adj[u]) == (1 if u else 0):
dp[u] = values[u]
continue
stk.append((2, u, p))
for v in reversed(adj[u]):
if v != p:
stk.append((1, v, u))
elif step == 2:
dp[u] = min(sum(dp[v] for v in adj[u] if v != p), values[u]) # min(pick u, not pick u)
return dp[0]
adj = [[] for _ in xrange(len(values))]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return sum(values)-iter_dfs()
# Time: O(n)
# Space: O(n)
# dfs, tree dp
| Solution |
python | pola-rs__polars | py-polars/src/polars/series/list.py | {
"start": 575,
"end": 28748
} | class ____:
"""Namespace for list related methods."""
_accessor = "list"
def __init__(self, series: Series) -> None:
self._s: PySeries = series._s
def all(self) -> Series:
"""
Evaluate whether all boolean values in a list are true.
Returns
-------
Series
Series of data type :class:`Boolean`.
Notes
-----
If there are no non-null elements in a row, the output is `True`.
Examples
--------
>>> s = pl.Series(
... [[True, True], [False, True], [False, False], [None], [], None],
... dtype=pl.List(pl.Boolean),
... )
>>> s.list.all()
shape: (6,)
Series: '' [bool]
[
true
false
false
true
true
null
]
"""
def any(self) -> Series:
"""
Evaluate whether any boolean value in a list is true.
Returns
-------
Series
Series of data type :class:`Boolean`.
Notes
-----
If there are no non-null elements in a row, the output is `False`.
Examples
--------
>>> s = pl.Series(
... [[True, True], [False, True], [False, False], [None], [], None],
... dtype=pl.List(pl.Boolean),
... )
>>> s.list.any()
shape: (6,)
Series: '' [bool]
[
true
true
false
false
false
null
]
"""
def len(self) -> Series:
"""
Return the number of elements in each list.
Null values count towards the total.
Returns
-------
Series
Series of data type :class:`UInt32`.
Examples
--------
>>> s = pl.Series([[1, 2, None], [5]])
>>> s.list.len()
shape: (2,)
Series: '' [u32]
[
3
1
]
"""
def drop_nulls(self) -> Series:
"""
Drop all null values in the list.
The original order of the remaining elements is preserved.
Examples
--------
>>> s = pl.Series("values", [[None, 1, None, 2], [None], [3, 4]])
>>> s.list.drop_nulls()
shape: (3,)
Series: 'values' [list[i64]]
[
[1, 2]
[]
[3, 4]
]
"""
def sample(
self,
n: int | IntoExprColumn | None = None,
*,
fraction: float | IntoExprColumn | None = None,
with_replacement: bool = False,
shuffle: bool = False,
seed: int | None = None,
) -> Series:
"""
Sample from this list.
Parameters
----------
n
Number of items to return. Cannot be used with `fraction`. Defaults to 1 if
`fraction` is None.
fraction
Fraction of items to return. Cannot be used with `n`.
with_replacement
Allow values to be sampled more than once.
shuffle
Shuffle the order of sampled data points.
seed
Seed for the random number generator. If set to None (default), a
random seed is generated for each sample operation.
Examples
--------
>>> s = pl.Series("values", [[1, 2, 3], [4, 5]])
>>> s.list.sample(n=pl.Series("n", [2, 1]), seed=1)
shape: (2,)
Series: 'values' [list[i64]]
[
[2, 3]
[5]
]
"""
def sum(self) -> Series:
"""
Sum all the arrays in the list.
Notes
-----
If there are no non-null elements in a row, the output is `0`.
Examples
--------
>>> s = pl.Series("values", [[1], [2, 3]])
>>> s.list.sum()
shape: (2,)
Series: 'values' [i64]
[
1
5
]
"""
def max(self) -> Series:
"""
Compute the max value of the arrays in the list.
Examples
--------
>>> s = pl.Series("values", [[4, 1], [2, 3]])
>>> s.list.max()
shape: (2,)
Series: 'values' [i64]
[
4
3
]
"""
def min(self) -> Series:
"""
Compute the min value of the arrays in the list.
Examples
--------
>>> s = pl.Series("values", [[4, 1], [2, 3]])
>>> s.list.min()
shape: (2,)
Series: 'values' [i64]
[
1
2
]
"""
def mean(self) -> Series:
"""
Compute the mean value of the arrays in the list.
Examples
--------
>>> s = pl.Series("values", [[3, 1], [3, 3]])
>>> s.list.mean()
shape: (2,)
Series: 'values' [f64]
[
2.0
3.0
]
"""
def median(self) -> Series:
"""
Compute the median value of the arrays in the list.
Examples
--------
>>> s = pl.Series("values", [[-1, 0, 1], [1, 10]])
>>> s.list.median()
shape: (2,)
Series: 'values' [f64]
[
0.0
5.5
]
"""
def std(self, ddof: int = 1) -> Series:
"""
Compute the std value of the arrays in the list.
Examples
--------
>>> s = pl.Series("values", [[-1, 0, 1], [1, 10]])
>>> s.list.std()
shape: (2,)
Series: 'values' [f64]
[
1.0
6.363961
]
"""
def var(self, ddof: int = 1) -> Series:
"""
Compute the var value of the arrays in the list.
Examples
--------
>>> s = pl.Series("values", [[-1, 0, 1], [1, 10]])
>>> s.list.var()
shape: (2,)
Series: 'values' [f64]
[
1.0
40.5
]
"""
def sort(
self,
*,
descending: bool = False,
nulls_last: bool = False,
multithreaded: bool = True,
) -> Series:
"""
Sort the arrays in this column.
Parameters
----------
descending
Sort in descending order.
nulls_last
Place null values last.
multithreaded
Sort using multiple threads.
Examples
--------
>>> s = pl.Series("a", [[3, 2, 1], [9, 1, 2]])
>>> s.list.sort()
shape: (2,)
Series: 'a' [list[i64]]
[
[1, 2, 3]
[1, 2, 9]
]
>>> s.list.sort(descending=True)
shape: (2,)
Series: 'a' [list[i64]]
[
[3, 2, 1]
[9, 2, 1]
]
"""
def reverse(self) -> Series:
"""
Reverse the arrays in the list.
Examples
--------
>>> s = pl.Series("a", [[3, 2, 1], [9, 1, 2]])
>>> s.list.reverse()
shape: (2,)
Series: 'a' [list[i64]]
[
[1, 2, 3]
[2, 1, 9]
]
"""
def unique(self, *, maintain_order: bool = False) -> Series:
"""
Get the unique/distinct values in the list.
Parameters
----------
maintain_order
Maintain order of data. This requires more work.
Examples
--------
>>> s = pl.Series("a", [[1, 1, 2], [2, 3, 3]])
>>> s.list.unique()
shape: (2,)
Series: 'a' [list[i64]]
[
[1, 2]
[2, 3]
]
"""
def n_unique(self) -> Series:
"""
Count the number of unique values in every sub-lists.
Examples
--------
>>> s = pl.Series("a", [[1, 1, 2], [2, 3, 4]])
>>> s.list.n_unique()
shape: (2,)
Series: 'a' [u32]
[
2
3
]
"""
def concat(self, other: list[Series] | Series | list[Any]) -> Series:
"""
Concat the arrays in a Series dtype List in linear time.
Parameters
----------
other
Columns to concat into a List Series
Examples
--------
>>> s1 = pl.Series("a", [["a", "b"], ["c"]])
>>> s2 = pl.Series("b", [["c"], ["d", None]])
>>> s1.list.concat(s2)
shape: (2,)
Series: 'a' [list[str]]
[
["a", "b", "c"]
["c", "d", null]
]
"""
def get(
self,
index: int | Series | list[int],
*,
null_on_oob: bool = False,
) -> Series:
"""
Get the value by index in the sublists.
So index `0` would return the first item of every sublist
and index `-1` would return the last item of every sublist
if an index is out of bounds, it will return a `None`.
Parameters
----------
index
Index to return per sublist
null_on_oob
Behavior if an index is out of bounds:
* True -> set as null
* False -> raise an error
Examples
--------
>>> s = pl.Series("a", [[3, 2, 1], [], [1, 2]])
>>> s.list.get(0, null_on_oob=True)
shape: (3,)
Series: 'a' [i64]
[
3
null
1
]
"""
def gather(
self,
indices: Series | list[int] | list[list[int]],
*,
null_on_oob: bool = False,
) -> Series:
"""
Take sublists by multiple indices.
The indices may be defined in a single column, or by sublists in another
column of dtype `List`.
Parameters
----------
indices
Indices to return per sublist
null_on_oob
Behavior if an index is out of bounds:
True -> set as null
False -> raise an error
Note that defaulting to raising an error is much cheaper
Examples
--------
>>> s = pl.Series("a", [[3, 2, 1], [], [1, 2]])
>>> s.list.gather([0, 2], null_on_oob=True)
shape: (3,)
Series: 'a' [list[i64]]
[
[3, 1]
[null, null]
[1, null]
]
"""
def gather_every(
self, n: int | IntoExprColumn, offset: int | IntoExprColumn = 0
) -> Series:
"""
Take every n-th value start from offset in sublists.
Parameters
----------
n
Gather every n-th element.
offset
Starting index.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3], [], [6, 7, 8, 9]])
>>> s.list.gather_every(2, offset=1)
shape: (3,)
Series: 'a' [list[i64]]
[
[2]
[]
[7, 9]
]
"""
def __getitem__(self, item: int) -> Series:
return self.get(item)
def join(self, separator: IntoExprColumn, *, ignore_nulls: bool = True) -> Series:
"""
Join all string items in a sublist and place a separator between them.
This errors if inner type of list `!= String`.
Parameters
----------
separator
string to separate the items with
ignore_nulls
Ignore null values (default).
If set to ``False``, null values will be propagated.
If the sub-list contains any null values, the output is ``None``.
Returns
-------
Series
Series of data type :class:`String`.
Examples
--------
>>> s = pl.Series([["foo", "bar"], ["hello", "world"]])
>>> s.list.join(separator="-")
shape: (2,)
Series: '' [str]
[
"foo-bar"
"hello-world"
]
"""
def first(self) -> Series:
"""
Get the first value of the sublists.
Examples
--------
>>> s = pl.Series("a", [[3, 2, 1], [], [1, 2]])
>>> s.list.first()
shape: (3,)
Series: 'a' [i64]
[
3
null
1
]
"""
def last(self) -> Series:
"""
Get the last value of the sublists.
Examples
--------
>>> s = pl.Series("a", [[3, 2, 1], [], [1, 2]])
>>> s.list.last()
shape: (3,)
Series: 'a' [i64]
[
1
null
2
]
"""
@unstable()
def item(self) -> Series:
"""
Get the single value of the sublists.
This errors if the sublist length is not exactly one.
See Also
--------
:meth:`Series.list.get` : Get the value by index in the sublists.
Examples
--------
>>> s = pl.Series("a", [[1], [4], [6]])
>>> s.list.item()
shape: (3,)
Series: 'a' [i64]
[
1
4
6
]
>>> df = pl.Series("a", [[3, 2, 1], [1], [2]])
>>> df.list.item()
Traceback (most recent call last):
...
polars.exceptions.ComputeError: aggregation 'item' expected a single value, got 3 values
""" # noqa: W505
def contains(self, item: IntoExpr, *, nulls_equal: bool = True) -> Series:
"""
Check if sublists contain the given item.
Parameters
----------
item
Item that will be checked for membership
nulls_equal : bool, default True
If True, treat null as a distinct value. Null values will not propagate.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series("a", [[3, 2, 1], [], [1, 2]])
>>> s.list.contains(1)
shape: (3,)
Series: 'a' [bool]
[
true
false
true
]
"""
def arg_min(self) -> Series:
"""
Retrieve the index of the minimal value in every sublist.
Returns
-------
Series
Series of data type :class:`UInt32` or :class:`UInt64`
(depending on compilation).
Examples
--------
>>> s = pl.Series("a", [[1, 2], [2, 1]])
>>> s.list.arg_min()
shape: (2,)
Series: 'a' [u32]
[
0
1
]
"""
def arg_max(self) -> Series:
"""
Retrieve the index of the maximum value in every sublist.
Returns
-------
Series
Series of data type :class:`UInt32` or :class:`UInt64`
(depending on compilation).
Examples
--------
>>> s = pl.Series("a", [[1, 2], [2, 1]])
>>> s.list.arg_max()
shape: (2,)
Series: 'a' [u32]
[
1
0
]
"""
def diff(self, n: int = 1, null_behavior: NullBehavior = "ignore") -> Series:
"""
Calculate the first discrete difference between shifted items of every sublist.
Parameters
----------
n
Number of slots to shift.
null_behavior : {'ignore', 'drop'}
How to handle null values.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.list.diff()
shape: (2,)
Series: 'a' [list[i64]]
[
[null, 1, … 1]
[null, -8, -1]
]
>>> s.list.diff(n=2)
shape: (2,)
Series: 'a' [list[i64]]
[
[null, null, … 2]
[null, null, -9]
]
>>> s.list.diff(n=2, null_behavior="drop")
shape: (2,)
Series: 'a' [list[i64]]
[
[2, 2]
[-9]
]
"""
def shift(self, n: int | IntoExprColumn = 1) -> Series:
"""
Shift list values by the given number of indices.
Parameters
----------
n
Number of indices to shift forward. If a negative value is passed, values
are shifted in the opposite direction instead.
Notes
-----
This method is similar to the `LAG` operation in SQL when the value for `n`
is positive. With a negative value for `n`, it is similar to `LEAD`.
Examples
--------
By default, list values are shifted forward by one index.
>>> s = pl.Series([[1, 2, 3], [4, 5]])
>>> s.list.shift()
shape: (2,)
Series: '' [list[i64]]
[
[null, 1, 2]
[null, 4]
]
Pass a negative value to shift in the opposite direction instead.
>>> s.list.shift(-2)
shape: (2,)
Series: '' [list[i64]]
[
[3, null, null]
[null, null]
]
"""
def slice(self, offset: int | Expr, length: int | Expr | None = None) -> Series:
"""
Slice every sublist.
Parameters
----------
offset
Start index. Negative indexing is supported.
length
Length of the slice. If set to `None` (default), the slice is taken to the
end of the list.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.list.slice(1, 2)
shape: (2,)
Series: 'a' [list[i64]]
[
[2, 3]
[2, 1]
]
"""
def head(self, n: int | Expr = 5) -> Series:
"""
Slice the first `n` values of every sublist.
Parameters
----------
n
Number of values to return for each sublist.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.list.head(2)
shape: (2,)
Series: 'a' [list[i64]]
[
[1, 2]
[10, 2]
]
"""
def tail(self, n: int | Expr = 5) -> Series:
"""
Slice the last `n` values of every sublist.
Parameters
----------
n
Number of values to return for each sublist.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3, 4], [10, 2, 1]])
>>> s.list.tail(2)
shape: (2,)
Series: 'a' [list[i64]]
[
[3, 4]
[2, 1]
]
"""
def explode(self, *, empty_as_null: bool = True, keep_nulls: bool = True) -> Series:
"""
Returns a column with a separate row for every list element.
Parameters
----------
empty_as_null
Explode an empty list into a `null`.
keep_nulls
Explode a `null` list into a `null`.
Returns
-------
Series
Series with the data type of the list elements.
See Also
--------
Series.reshape : Reshape this Series to a flat Series or a Series of Lists.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3], [4, 5, 6]])
>>> s.list.explode()
shape: (6,)
Series: 'a' [i64]
[
1
2
3
4
5
6
]
"""
def count_matches(self, element: IntoExpr) -> Series:
"""
Count how often the value produced by `element` occurs.
Parameters
----------
element
An expression that produces a single value
Examples
--------
>>> s = pl.Series("a", [[0], [1], [1, 2, 3, 2], [1, 2, 1], [4, 4]])
>>> s.list.count_matches(1)
shape: (5,)
Series: 'a' [u32]
[
0
1
1
2
0
]
"""
def to_array(self, width: int) -> Series:
"""
Convert a List column into an Array column with the same inner data type.
Parameters
----------
width
Width of the resulting Array column.
Returns
-------
Series
Series of data type :class:`Array`.
Examples
--------
>>> s = pl.Series([[1, 2], [3, 4]], dtype=pl.List(pl.Int8))
>>> s.list.to_array(2)
shape: (2,)
Series: '' [array[i8, 2]]
[
[1, 2]
[3, 4]
]
"""
def to_struct(
self,
n_field_strategy: ListToStructWidthStrategy = "first_non_null",
fields: Callable[[int], str] | Sequence[str] | None = None,
) -> Series:
"""
Convert the series of type `List` to a series of type `Struct`.
Parameters
----------
n_field_strategy : {'first_non_null', 'max_width'}
Strategy to determine the number of fields of the struct.
* "first_non_null": set number of fields equal to the length of the
first non zero-length sublist.
* "max_width": set number of fields as max length of all sublists.
fields
If the name and number of the desired fields is known in advance
a list of field names can be given, which will be assigned by index.
Otherwise, to dynamically assign field names, a custom function can be
used; if neither are set, fields will be `field_0, field_1 .. field_n`.
Examples
--------
Convert list to struct with default field name assignment:
>>> s1 = pl.Series("n", [[0, 1, 2], [0, 1]])
>>> s2 = s1.list.to_struct()
>>> s2
shape: (2,)
Series: 'n' [struct[3]]
[
{0,1,2}
{0,1,null}
]
>>> s2.struct.fields
['field_0', 'field_1', 'field_2']
Convert list to struct with field name assignment by function/index:
>>> s3 = s1.list.to_struct(fields=lambda idx: f"n{idx:02}")
>>> s3.struct.fields
['n00', 'n01', 'n02']
Convert list to struct with field name assignment by index from a list of names:
>>> s1.list.to_struct(fields=["one", "two", "three"]).struct.unnest()
shape: (2, 3)
┌─────┬─────┬───────┐
│ one ┆ two ┆ three │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═══════╡
│ 0 ┆ 1 ┆ 2 │
│ 0 ┆ 1 ┆ null │
└─────┴─────┴───────┘
"""
if isinstance(fields, Sequence):
s = wrap_s(self._s)
return (
s.to_frame()
.select_seq(F.col(s.name).list.to_struct(fields=fields))
.to_series()
)
return wrap_s(self._s.list_to_struct(n_field_strategy, fields))
def eval(self, expr: Expr, *, parallel: bool = False) -> Series:
"""
Run any polars expression against the lists' elements.
Parameters
----------
expr
Expression to run. Note that you can select an element with `pl.first()`, or
`pl.col()`
parallel
Run all expression parallel. Don't activate this blindly.
Parallelism is worth it if there is enough work to do per thread.
This likely should not be use in the group by context, because we already
parallel execution per group
Examples
--------
>>> s = pl.Series("a", [[1, 4], [8, 5], [3, 2]])
>>> s.list.eval(pl.element().rank())
shape: (3,)
Series: 'a' [list[f64]]
[
[1.0, 2.0]
[2.0, 1.0]
[2.0, 1.0]
]
"""
def agg(self, expr: Expr) -> Series:
"""
Run any polars aggregation expression against the list' elements.
Parameters
----------
expr
Expression to run. Note that you can select an element with `pl.element()`.
Examples
--------
>>> s = pl.Series("a", [[1, None], [42, 13], [None, None]])
>>> s.list.agg(pl.element().null_count())
shape: (3,)
Series: 'a' [u32]
[
1
0
2
]
>>> s.list.agg(pl.element().drop_nulls())
shape: (3,)
Series: 'a' [list[i64]]
[
[1]
[42, 13]
[]
]
"""
def filter(self, predicate: Expr) -> Series:
"""
Filter elements in each list by a boolean expression, returning a new Series of lists.
Parameters
----------
predicate
A boolean expression evaluated on each list element.
Use `pl.element()` to refer to the current element.
Examples
--------
>>> import polars as pl
>>> s = pl.Series("a", [[1, 4], [8, 5], [3, 2]])
>>> s.list.filter(pl.element() % 2 == 0)
shape: (3,)
Series: 'a' [list[i64]]
[
[4]
[8]
[2]
]
""" # noqa: W505
def set_union(self, other: Series | Collection[Any]) -> Series:
"""
Compute the SET UNION between the elements in this list and the elements of `other`.
Parameters
----------
other
Right hand side of the set operation.
Examples
--------
>>> a = pl.Series([[1, 2, 3], [], [None, 3], [5, 6, 7]])
>>> b = pl.Series([[2, 3, 4], [3], [3, 4, None], [6, 8]])
>>> a.list.set_union(b) # doctest: +IGNORE_RESULT
shape: (4,)
Series: '' [list[i64]]
[
[1, 2, 3, 4]
[3]
[null, 3, 4]
[5, 6, 7, 8]
]
""" # noqa: W505
def set_difference(self, other: Series | Collection[Any]) -> Series:
"""
Compute the SET DIFFERENCE between the elements in this list and the elements of `other`.
Parameters
----------
other
Right hand side of the set operation.
See Also
--------
polars.Series.list.diff: Calculates the n-th discrete difference of every sublist.
Examples
--------
>>> a = pl.Series([[1, 2, 3], [], [None, 3], [5, 6, 7]])
>>> b = pl.Series([[2, 3, 4], [3], [3, 4, None], [6, 8]])
>>> a.list.set_difference(b)
shape: (4,)
Series: '' [list[i64]]
[
[1]
[]
[]
[5, 7]
]
""" # noqa: W505
def set_intersection(self, other: Series | Collection[Any]) -> Series:
"""
Compute the SET INTERSECTION between the elements in this list and the elements of `other`.
Parameters
----------
other
Right hand side of the set operation.
Examples
--------
>>> a = pl.Series([[1, 2, 3], [], [None, 3], [5, 6, 7]])
>>> b = pl.Series([[2, 3, 4], [3], [3, 4, None], [6, 8]])
>>> a.list.set_intersection(b)
shape: (4,)
Series: '' [list[i64]]
[
[2, 3]
[]
[null, 3]
[6]
]
""" # noqa: W505
def set_symmetric_difference(self, other: Series | Collection[Any]) -> Series:
"""
Compute the SET SYMMETRIC DIFFERENCE between the elements in this list and the elements of `other`.
Parameters
----------
other
Right hand side of the set operation.
Examples
--------
>>> a = pl.Series([[1, 2, 3], [], [None, 3], [5, 6, 7]])
>>> b = pl.Series([[2, 3, 4], [3], [3, 4, None], [6, 8]])
>>> a.list.set_symmetric_difference(b)
shape: (4,)
Series: '' [list[i64]]
[
[1, 4]
[3]
[4]
[5, 7, 8]
]
""" # noqa: W505
| ListNameSpace |
python | jpadilla__pyjwt | jwt/algorithms.py | {
"start": 5451,
"end": 9121
} | class ____(ABC):
"""
The interface for an algorithm used to sign and verify tokens.
"""
# pyjwt-964: Validate to ensure the key passed in was decoded to the correct cryptography key family
_crypto_key_types: tuple[type[AllowedKeys], ...] | None = None
def compute_hash_digest(self, bytestr: bytes) -> bytes:
"""
Compute a hash digest using the specified algorithm's hash algorithm.
If there is no hash algorithm, raises a NotImplementedError.
"""
# lookup self.hash_alg if defined in a way that mypy can understand
hash_alg = getattr(self, "hash_alg", None)
if hash_alg is None:
raise NotImplementedError
if (
has_crypto
and isinstance(hash_alg, type)
and issubclass(hash_alg, hashes.HashAlgorithm)
):
digest = hashes.Hash(hash_alg(), backend=default_backend())
digest.update(bytestr)
return bytes(digest.finalize())
else:
return bytes(hash_alg(bytestr).digest())
def check_crypto_key_type(self, key: PublicKeyTypes | PrivateKeyTypes):
"""Check that the key belongs to the right cryptographic family.
Note that this method only works when ``cryptography`` is installed.
:param key: Potentially a cryptography key
:type key: :py:data:`PublicKeyTypes <cryptography.hazmat.primitives.asymmetric.types.PublicKeyTypes>` | :py:data:`PrivateKeyTypes <cryptography.hazmat.primitives.asymmetric.types.PrivateKeyTypes>`
:raises ValueError: if ``cryptography`` is not installed, or this method is called by a non-cryptography algorithm
:raises InvalidKeyError: if the key doesn't match the expected key classes
"""
if not has_crypto or self._crypto_key_types is None:
raise ValueError(
"This method requires the cryptography library, and should only be used by cryptography-based algorithms."
)
if not isinstance(key, self._crypto_key_types):
valid_classes = (cls.__name__ for cls in self._crypto_key_types)
actual_class = key.__class__.__name__
self_class = self.__class__.__name__
raise InvalidKeyError(
f"Expected one of {valid_classes}, got: {actual_class}. Invalid Key type for {self_class}"
)
@abstractmethod
def prepare_key(self, key: Any) -> Any:
"""
Performs necessary validation and conversions on the key and returns
the key value in the proper format for sign() and verify().
"""
@abstractmethod
def sign(self, msg: bytes, key: Any) -> bytes:
"""
Returns a digital signature for the specified message
using the specified key value.
"""
@abstractmethod
def verify(self, msg: bytes, key: Any, sig: bytes) -> bool:
"""
Verifies that the specified digital signature is valid
for the specified message and key values.
"""
@overload
@staticmethod
@abstractmethod
def to_jwk(key_obj, as_dict: Literal[True]) -> JWKDict: ... # pragma: no cover
@overload
@staticmethod
@abstractmethod
def to_jwk(key_obj, as_dict: Literal[False] = False) -> str: ... # pragma: no cover
@staticmethod
@abstractmethod
def to_jwk(key_obj, as_dict: bool = False) -> JWKDict | str:
"""
Serializes a given key into a JWK
"""
@staticmethod
@abstractmethod
def from_jwk(jwk: str | JWKDict) -> Any:
"""
Deserializes a given key from JWK back into a key object
"""
| Algorithm |
python | pytorch__pytorch | torch/_dynamo/variables/dicts.py | {
"start": 61896,
"end": 62918
} | class ____(DictViewVariable):
kv = "items"
@property
def view_items_vt(self) -> list[VariableTracker]:
# Returns an iterable of the unpacked items
return [variables.TupleVariable([k.vt, v]) for k, v in self.view_items]
def python_type(self) -> type:
return dict_items
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
# TODO(guilhermeleobas): This should actually check if args[0]
# implements the mapping protocol.
if name == "__eq__":
if len(args) != 1:
raise_args_mismatch(tx, name, "1 args", f"{len(args)} args")
if isinstance(args[0], DictItemsVariable):
return self.dv_dict.call_method(tx, "__eq__", [args[0].dv_dict], {})
return ConstantVariable.create(False)
return super().call_method(tx, name, args, kwargs)
| DictItemsVariable |
python | ethereum__web3.py | web3/utils/caching.py | {
"start": 137,
"end": 230
} | class ____(Enum):
FINALIZED = "finalized"
SAFE = "safe"
| RequestCacheValidationThreshold |
python | matplotlib__matplotlib | galleries/examples/animation/pause_resume.py | {
"start": 738,
"end": 1683
} | class ____:
def __init__(self):
fig, ax = plt.subplots()
ax.set_title('Click to pause/resume the animation')
x = np.linspace(-0.1, 0.1, 1000)
# Start with a normal distribution
self.n0 = (1.0 / ((4 * np.pi * 2e-4 * 0.1) ** 0.5)
* np.exp(-x ** 2 / (4 * 2e-4 * 0.1)))
self.p, = ax.plot(x, self.n0)
self.animation = animation.FuncAnimation(
fig, self.update, frames=200, interval=50, blit=True)
self.paused = False
fig.canvas.mpl_connect('button_press_event', self.toggle_pause)
def toggle_pause(self, *args, **kwargs):
if self.paused:
self.animation.resume()
else:
self.animation.pause()
self.paused = not self.paused
def update(self, i):
self.n0 += i / 100 % 5
self.p.set_ydata(self.n0 % 20)
return (self.p,)
pa = PauseAnimation()
plt.show()
| PauseAnimation |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 26801,
"end": 27922
} | class ____(TypeDefinition):
__slots__ = ('loc', 'name', 'fields', 'directives',)
_fields = ('name', 'fields',)
def __init__(self, name, fields, loc=None, directives=None):
self.loc = loc
self.name = name
self.fields = fields
self.directives = directives
def __eq__(self, other):
return (
self is other or (
isinstance(other, InterfaceTypeDefinition) and
# self.loc == other.loc and
self.name == other.name and
self.fields == other.fields and
self.directives == other.directives
)
)
def __repr__(self):
return ('InterfaceTypeDefinition('
'name={self.name!r}'
', fields={self.fields!r}'
', directives={self.directives!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.name,
self.fields,
self.loc,
self.directives,
)
def __hash__(self):
return id(self)
| InterfaceTypeDefinition |
python | langchain-ai__langchain | libs/partners/prompty/tests/unit_tests/fake_output_parser.py | {
"start": 733,
"end": 1330
} | class ____(AgentOutputParser):
def parse(self, text: str) -> AgentAction | AgentFinish:
action, input = extract_action_details(text)
if action:
log = f"\nInvoking: `{action}` with `{input}"
return AgentAction(tool=action, tool_input=(input or ""), log=log)
elif "Final Answer" in text:
return AgentFinish({"output": text}, text)
return AgentAction(
"Intermediate Answer", "after_colon", "Final Answer: This should end"
)
@property
def _type(self) -> str:
return "self_ask"
| FakeOutputParser |
python | kubernetes-client__python | kubernetes/client/models/v1_job_condition.py | {
"start": 383,
"end": 8111
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_probe_time': 'datetime',
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_probe_time': 'lastProbeTime',
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_probe_time=None, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1JobCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_probe_time = None
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_probe_time is not None:
self.last_probe_time = last_probe_time
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_probe_time(self):
"""Gets the last_probe_time of this V1JobCondition. # noqa: E501
Last time the condition was checked. # noqa: E501
:return: The last_probe_time of this V1JobCondition. # noqa: E501
:rtype: datetime
"""
return self._last_probe_time
@last_probe_time.setter
def last_probe_time(self, last_probe_time):
"""Sets the last_probe_time of this V1JobCondition.
Last time the condition was checked. # noqa: E501
:param last_probe_time: The last_probe_time of this V1JobCondition. # noqa: E501
:type: datetime
"""
self._last_probe_time = last_probe_time
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1JobCondition. # noqa: E501
Last time the condition transit from one status to another. # noqa: E501
:return: The last_transition_time of this V1JobCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1JobCondition.
Last time the condition transit from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1JobCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1JobCondition. # noqa: E501
Human readable message indicating details about last transition. # noqa: E501
:return: The message of this V1JobCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1JobCondition.
Human readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1JobCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1JobCondition. # noqa: E501
(brief) reason for the condition's last transition. # noqa: E501
:return: The reason of this V1JobCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1JobCondition.
(brief) reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1JobCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1JobCondition. # noqa: E501
Status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1JobCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1JobCondition.
Status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1JobCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1JobCondition. # noqa: E501
Type of job condition, Complete or Failed. # noqa: E501
:return: The type of this V1JobCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1JobCondition.
Type of job condition, Complete or Failed. # noqa: E501
:param type: The type of this V1JobCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1JobCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1JobCondition):
return True
return self.to_dict() != other.to_dict()
| V1JobCondition |
python | pennersr__django-allauth | allauth/socialaccount/providers/mailru/views.py | {
"start": 206,
"end": 1354
} | class ____(OAuth2Adapter):
provider_id = "mailru"
access_token_url = "https://connect.mail.ru/oauth/token" # nosec
authorize_url = "https://connect.mail.ru/oauth/authorize"
profile_url = "https://www.appsmail.ru/platform/api"
def complete_login(self, request, app, token, **kwargs):
uid = kwargs["response"]["x_mailru_vid"]
data = {
"method": "users.getInfo",
"app_id": app.client_id,
"secure": "1",
"uids": uid,
}
param_list = sorted([item + "=" + data[item] for item in data])
# See: https://api.mail.ru/docs/guides/restapi/
data["sig"] = md5(
("".join(param_list) + app.secret).encode("utf-8")
).hexdigest() # nosec
response = (
get_adapter().get_requests_session().get(self.profile_url, params=data)
)
extra_data = response.json()[0]
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(MailRuOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(MailRuOAuth2Adapter)
| MailRuOAuth2Adapter |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 132138,
"end": 142597
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "postgresql"
def setup_test(self):
metadata = MetaData()
self.test_table = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("hash", HSTORE),
)
self.hashcol = self.test_table.c.hash
def _test_where(self, whereclause, expected):
stmt = select(self.test_table).where(whereclause)
self.assert_compile(
stmt,
"SELECT test_table.id, test_table.hash FROM test_table "
"WHERE %s" % expected,
)
def test_bind_serialize_default(self):
dialect = postgresql.dialect(use_native_hstore=False)
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(
proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])),
'"key1"=>"value1", "key2"=>"value2"',
)
def test_bind_serialize_with_slashes_and_quotes(self):
dialect = postgresql.dialect(use_native_hstore=False)
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(proc({'\\"a': '\\"1'}), '"\\\\\\"a"=>"\\\\\\"1"')
def test_parse_error(self):
dialect = postgresql.dialect(use_native_hstore=False)
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
assert_raises_message(
ValueError,
r"""After u?'\[\.\.\.\], "key1"=>"value1", ', could not parse """
r"""residual at position 36: u?'crapcrapcrap, "key3"\[\.\.\.\]""",
proc,
'"key2"=>"value2", "key1"=>"value1", '
'crapcrapcrap, "key3"=>"value3"',
)
def test_result_deserialize_default(self):
dialect = postgresql.dialect(use_native_hstore=False)
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
eq_(
proc('"key2"=>"value2", "key1"=>"value1"'),
{"key1": "value1", "key2": "value2"},
)
def test_result_deserialize_with_slashes_and_quotes(self):
dialect = postgresql.dialect(use_native_hstore=False)
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
eq_(proc('"\\\\\\"a"=>"\\\\\\"1"'), {'\\"a': '\\"1'})
def test_bind_serialize_psycopg2(self):
from sqlalchemy.dialects.postgresql import psycopg2
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = True
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
is_(proc, None)
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = False
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(
proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])),
'"key1"=>"value1", "key2"=>"value2"',
)
def test_result_deserialize_psycopg2(self):
from sqlalchemy.dialects.postgresql import psycopg2
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = True
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
is_(proc, None)
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = False
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
eq_(
proc('"key2"=>"value2", "key1"=>"value1"'),
{"key1": "value1", "key2": "value2"},
)
def test_ret_type_text(self):
col = column("x", HSTORE())
is_(col["foo"].type.__class__, Text)
def test_ret_type_custom(self):
class MyType(types.UserDefinedType):
pass
col = column("x", HSTORE(text_type=MyType))
is_(col["foo"].type.__class__, MyType)
def test_where_has_key(self):
self._test_where(
self.hashcol.has_key("foo"),
"test_table.hash ? %(hash_1)s",
)
def test_where_has_all(self):
self._test_where(
self.hashcol.has_all(postgresql.array(["1", "2"])),
"test_table.hash ?& ARRAY[%(param_1)s, %(param_2)s]",
)
def test_where_has_any(self):
self._test_where(
self.hashcol.has_any(postgresql.array(["1", "2"])),
"test_table.hash ?| ARRAY[%(param_1)s, %(param_2)s]",
)
def test_where_defined(self):
self._test_where(
self.hashcol.defined("foo"),
"defined(test_table.hash, %(defined_1)s)",
)
def test_where_contains(self):
self._test_where(
self.hashcol.contains({"foo": "1"}),
"test_table.hash @> %(hash_1)s",
)
def test_where_contained_by(self):
self._test_where(
self.hashcol.contained_by({"foo": "1", "bar": None}),
"test_table.hash <@ %(hash_1)s",
)
def test_where_has_key_any(self):
self._test_where(
self.hashcol.has_key(any_(array(["foo"]))),
"test_table.hash ? ANY (ARRAY[%(param_1)s])",
)
def test_where_has_all_any(self):
self._test_where(
self.hashcol.has_all(any_(postgresql.array(["1", "2"]))),
"test_table.hash ?& ANY (ARRAY[%(param_1)s, %(param_2)s])",
)
def test_where_has_any_any(self):
self._test_where(
self.hashcol.has_any(any_(postgresql.array(["1", "2"]))),
"test_table.hash ?| ANY (ARRAY[%(param_1)s, %(param_2)s])",
)
def test_where_contains_any(self):
self._test_where(
self.hashcol.contains(any_(array(["foo"]))),
"test_table.hash @> ANY (ARRAY[%(param_1)s])",
)
def test_where_contained_by_any(self):
self._test_where(
self.hashcol.contained_by(any_(array(["foo"]))),
"test_table.hash <@ ANY (ARRAY[%(param_1)s])",
)
def test_where_getitem(self):
self._test_where(
self.hashcol["bar"] == None, # noqa
"test_table.hash[%(hash_1)s] IS NULL",
)
def test_where_getitem_any(self):
self._test_where(
self.hashcol["bar"] == any_(array(["foo"])), # noqa
"test_table.hash[%(hash_1)s] = ANY (ARRAY[%(param_1)s])",
)
# Test combinations that don't use subscript operator
@testing.combinations(
(
lambda self: self.hashcol.delete("foo"),
"delete(test_table.hash, %(delete_2)s) AS delete_1",
True,
),
(
lambda self: self.hashcol.delete(postgresql.array(["foo", "bar"])),
(
"delete(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) "
"AS delete_1"
),
True,
),
(
lambda self: self.hashcol.delete(hstore("1", "2")),
(
"delete(test_table.hash, hstore(%(hstore_1)s, %(hstore_2)s)) "
"AS delete_1"
),
True,
),
(
lambda self: self.hashcol.slice(postgresql.array(["1", "2"])),
(
"slice(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) "
"AS slice_1"
),
True,
),
(
lambda self: self.hashcol.concat(
hstore(cast(self.test_table.c.id, Text), "3")
),
(
"test_table.hash || hstore(CAST(test_table.id AS TEXT), "
"%(hstore_1)s) AS anon_1"
),
True,
),
(
lambda self: hstore("foo", "bar") + self.hashcol,
"hstore(%(hstore_1)s, %(hstore_2)s) || test_table.hash AS anon_1",
True,
),
(
# hide from 2to3
lambda self: getattr(self.hashcol, "keys")(),
"akeys(test_table.hash) AS akeys_1",
True,
),
(
lambda self: self.hashcol.vals(),
"avals(test_table.hash) AS avals_1",
True,
),
(
lambda self: self.hashcol.array(),
"hstore_to_array(test_table.hash) AS hstore_to_array_1",
True,
),
(
lambda self: self.hashcol.matrix(),
"hstore_to_matrix(test_table.hash) AS hstore_to_matrix_1",
True,
),
)
def test_cols(self, colclause_fn, expected, from_):
colclause = colclause_fn(self)
stmt = select(colclause)
self.assert_compile(
stmt,
("SELECT %s" + (" FROM test_table" if from_ else "")) % expected,
)
# Test combinations that use subscript operator (PG 14+ uses [] syntax)
@testing.combinations(
(
lambda self: self.hashcol["foo"],
"test_table.hash[%(hash_1)s] AS anon_1",
True,
),
(
lambda self: hstore("foo", "3")["foo"],
"(hstore(%(hstore_1)s, %(hstore_2)s))[%(hstore_3)s] AS anon_1",
False,
),
(
lambda self: hstore(
postgresql.array(["1", "2"]), postgresql.array(["3", None])
)["1"],
(
"(hstore(ARRAY[%(param_1)s, %(param_2)s], "
"ARRAY[%(param_3)s, NULL]))[%(hstore_1)s] AS anon_1"
),
False,
),
(
lambda self: hstore(postgresql.array(["1", "2", "3", None]))["3"],
(
"(hstore(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s, NULL]))"
"[%(hstore_1)s] AS anon_1"
),
False,
),
(
lambda self: (self.hashcol + self.hashcol)["foo"],
"(test_table.hash || test_table.hash)[%(param_1)s] AS anon_1",
True,
),
(
lambda self: self.hashcol["foo"] != None, # noqa
"test_table.hash[%(hash_1)s] IS NOT NULL AS anon_1",
True,
),
)
def test_cols_subscript(self, colclause_fn, expected, from_):
colclause = colclause_fn(self)
stmt = select(colclause)
self.assert_compile(
stmt,
("SELECT %s" + (" FROM test_table" if from_ else "")) % expected,
)
| HStoreTest |
python | ray-project__ray | python/ray/train/v2/_internal/metrics/base.py | {
"start": 2422,
"end": 4386
} | class ____(Metric, Generic[E]):
"""A metric for tracking enum values."""
DEFAULT_VALUE = 0
RECORDED_VALUE = 1
def __init__(
self,
name: str,
description: str,
base_tags: Dict[str, str],
enum_tag_key: str,
):
self._enum_tag_key = enum_tag_key
self._current_value: Optional[E] = None
super().__init__(
name=name,
default=self.DEFAULT_VALUE,
description=description,
base_tags=base_tags,
)
def record(self, enum_value: E):
"""Record a specific enum value.
The metric will be reset to 0 for the previous value and set to 1 for the new value.
Args:
enum_value: The enum value to record for.
"""
if enum_value == self._current_value:
return
if self._current_value is not None:
previous_tags = self._get_tags(self._current_value)
self._gauge.set(self._default, previous_tags)
current_tags = self._get_tags(enum_value)
self._gauge.set(self.RECORDED_VALUE, current_tags)
self._current_value = enum_value
def get_value(self, enum_value: E) -> int:
"""Get the value for a specific enum value.
Args:
enum_value: The enum value to get the value for
Returns:
The value for the enum value
"""
return int(enum_value == self._current_value)
def reset(self):
if self._current_value is not None:
tags = self._get_tags(self._current_value)
self._gauge.set(self._default, tags)
self._current_value = None
def _get_tag_keys(self) -> Tuple[str, ...]:
return tuple(self._base_tags.keys()) + (self._enum_tag_key,)
def _get_tags(self, enum_value: E) -> Dict[str, str]:
tags = self._base_tags.copy()
tags[self._enum_tag_key] = enum_value.name
return tags
| EnumMetric |
python | huggingface__transformers | src/transformers/models/deberta_v2/modeling_deberta_v2.py | {
"start": 23670,
"end": 28506
} | class ____(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.position_buckets = getattr(config, "position_buckets", -1)
pos_ebd_size = self.max_relative_positions * 2
if self.position_buckets > 0:
pos_ebd_size = self.position_buckets * 2
self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
if "layer_norm" in self.norm_rel_ebd:
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
self.gradient_checkpointing = False
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
if query_states is not None:
relative_pos = build_relative_position(
query_states,
hidden_states,
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
)
else:
relative_pos = build_relative_position(
hidden_states,
hidden_states,
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
)
return relative_pos
def forward(
self,
hidden_states,
attention_mask,
output_hidden_states=True,
output_attentions=False,
query_states=None,
relative_pos=None,
return_dict=True,
):
if attention_mask.dim() <= 2:
input_mask = attention_mask
else:
input_mask = attention_mask.sum(-2) > 0
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states: Optional[tuple[torch.Tensor]] = (hidden_states,) if output_hidden_states else None
all_attentions = () if output_attentions else None
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
for i, layer_module in enumerate(self.layer):
output_states, attn_weights = layer_module(
next_kv,
attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
)
if output_attentions:
all_attentions = all_attentions + (attn_weights,)
if i == 0 and self.conv is not None:
output_states = self.conv(hidden_states, output_states, input_mask)
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if query_states is not None:
query_states = output_states
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
else:
next_kv = output_states
if not return_dict:
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
)
@auto_docstring
| DebertaV2Encoder |
python | getsentry__sentry | tests/sentry/utils/test_types.py | {
"start": 143,
"end": 3224
} | class ____(TestCase):
def test_any(self) -> None:
assert Any("foo") == "foo"
assert Any(1) == 1
assert Any(None) is None
assert Any() is None
assert Any.test(None)
assert Any.test("foo")
assert Any.test("bar")
def test_bool(self) -> None:
assert Bool(True) is True
assert Bool(1) is True
assert Bool("y") is True
assert Bool("YES") is True
assert Bool("t") is True
assert Bool("true") is True
assert Bool("True") is True
assert Bool("1") is True
assert Bool("on") is True
assert Bool(False) is False
assert Bool(0) is False
assert Bool("n") is False
assert Bool("NO") is False
assert Bool("f") is False
assert Bool("false") is False
assert Bool("False") is False
assert Bool("0") is False
assert Bool("off") is False
assert Bool() is False
assert Bool.test(None) is False
assert Bool(True) is True
assert Bool.test("foo") is False
with pytest.raises(InvalidTypeError):
Bool("foo")
def test_int(self) -> None:
assert Int(1) == 1
assert Int("1") == 1
assert Int("-1") == -1
assert Int() == 0
with pytest.raises(InvalidTypeError):
Int("foo")
with pytest.raises(InvalidTypeError):
Int("1.1")
def test_float(self) -> None:
assert Float(1.0) == 1.0
assert Float("1") == 1.0
assert Float("-1.1") == -1.1
assert Float(1) == 1.0
assert Float() == 0.0
with pytest.raises(InvalidTypeError):
Float("foo")
def test_string(self) -> None:
assert String("foo") == "foo"
assert String("foo") == "foo"
assert String() == ""
with pytest.raises(InvalidTypeError):
String(0)
def test_dict(self) -> None:
assert Dict({}) == {}
assert Dict({"foo": "bar"}) == {"foo": "bar"}
assert Dict("{foo: bar}") == {"foo": "bar"}
assert Dict() == {}
with pytest.raises(InvalidTypeError):
assert Dict("[]")
with pytest.raises(InvalidTypeError):
assert Dict([])
with pytest.raises(InvalidTypeError):
assert Dict("")
with pytest.raises(InvalidTypeError):
# malformed yaml/json (a plain scalar, "b: ar", cannot contain ": ")
assert Dict("{foo: b: ar}")
def test_sequence(self) -> None:
assert Sequence(()) == []
assert Sequence([]) == []
assert Sequence((1, 2, 3)) == [1, 2, 3]
assert Sequence([1, 2, 3]) == [1, 2, 3]
assert Sequence("[1,2,3]") == [1, 2, 3]
with pytest.raises(InvalidTypeError):
Sequence("{}")
with pytest.raises(InvalidTypeError):
Sequence({})
with pytest.raises(InvalidTypeError):
Sequence("")
with pytest.raises(InvalidTypeError):
# malformed yaml/json
Sequence("[1,")
| OptionsTypesTest |
python | python-attrs__attrs | tests/test_make.py | {
"start": 69286,
"end": 79118
} | class ____:
@pytest.mark.parametrize("C", [BareC, BareSlottedC])
def test_determine_detects_non_presence_correctly(self, C):
"""
On an empty class, nothing should be detected.
"""
assert True is _determine_whether_to_implement(
C, None, True, ("__init__",)
)
assert True is _determine_whether_to_implement(
C, None, True, ("__repr__",)
)
assert True is _determine_whether_to_implement(
C, None, True, ("__eq__", "__ne__")
)
assert True is _determine_whether_to_implement(
C, None, True, ("__le__", "__lt__", "__ge__", "__gt__")
)
def test_make_all_by_default(self, slots, frozen):
"""
If nothing is there to be detected, imply init=True, repr=True,
unsafe_hash=None, eq=True, order=True.
"""
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class C:
x = attr.ib()
i = C(1)
o = object()
assert i.__init__ is not o.__init__
assert i.__repr__ is not o.__repr__
assert i.__eq__ is not o.__eq__
assert i.__ne__ is not o.__ne__
assert i.__le__ is not o.__le__
assert i.__lt__ is not o.__lt__
assert i.__ge__ is not o.__ge__
assert i.__gt__ is not o.__gt__
def test_detect_auto_init(self, slots, frozen):
"""
If auto_detect=True and an __init__ exists, don't write one.
"""
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class CI:
x = attr.ib()
def __init__(self):
object.__setattr__(self, "x", 42)
assert 42 == CI().x
def test_detect_auto_repr(self, slots, frozen):
"""
If auto_detect=True and an __repr__ exists, don't write one.
"""
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class C:
x = attr.ib()
def __repr__(self):
return "hi"
assert "hi" == repr(C(42))
def test_hash_uses_eq(self, slots, frozen):
"""
If eq is passed in, then __hash__ should use the eq callable
to generate the hash code.
"""
@attr.s(slots=slots, frozen=frozen, unsafe_hash=True)
class C:
x = attr.ib(eq=str)
@attr.s(slots=slots, frozen=frozen, unsafe_hash=True)
class D:
x = attr.ib()
# These hashes should be the same because 1 is turned into
# string before hashing.
assert hash(C("1")) == hash(C(1))
assert hash(D("1")) != hash(D(1))
def test_detect_auto_hash(self, slots, frozen):
"""
If auto_detect=True and an __hash__ exists, don't write one.
"""
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class C:
x = attr.ib()
def __hash__(self):
return 0xC0FFEE
assert 0xC0FFEE == hash(C(42))
def test_detect_auto_eq(self, slots, frozen):
"""
If auto_detect=True and an __eq__ or an __ne__, exist, don't write one.
"""
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class C:
x = attr.ib()
def __eq__(self, o):
raise ValueError("worked")
with pytest.raises(ValueError, match="worked"):
C(1) == C(1)
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class D:
x = attr.ib()
def __ne__(self, o):
raise ValueError("worked")
with pytest.raises(ValueError, match="worked"):
D(1) != D(1)
def test_detect_auto_order(self, slots, frozen):
"""
If auto_detect=True and an __ge__, __gt__, __le__, or and __lt__ exist,
don't write one.
It's surprisingly difficult to test this programmatically, so we do it
by hand.
"""
def assert_not_set(cls, ex, meth_name):
__tracebackhide__ = True
a = getattr(cls, meth_name)
if meth_name == ex:
assert a == 42
else:
assert a is getattr(object, meth_name)
def assert_none_set(cls, ex):
__tracebackhide__ = True
for m in ("le", "lt", "ge", "gt"):
assert_not_set(cls, ex, "__" + m + "__")
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class LE:
__le__ = 42
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class LT:
__lt__ = 42
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class GE:
__ge__ = 42
@attr.s(auto_detect=True, slots=slots, frozen=frozen)
class GT:
__gt__ = 42
assert_none_set(LE, "__le__")
assert_none_set(LT, "__lt__")
assert_none_set(GE, "__ge__")
assert_none_set(GT, "__gt__")
def test_override_init(self, slots, frozen):
"""
If init=True is passed, ignore __init__.
"""
@attr.s(init=True, auto_detect=True, slots=slots, frozen=frozen)
class C:
x = attr.ib()
def __init__(self):
pytest.fail("should not be called")
assert C(1) == C(1)
def test_override_repr(self, slots, frozen):
"""
If repr=True is passed, ignore __repr__.
"""
@attr.s(repr=True, auto_detect=True, slots=slots, frozen=frozen)
class C:
x = attr.ib()
def __repr__(self):
pytest.fail("should not be called")
assert "C(x=1)" == repr(C(1))
def test_override_hash(self, slots, frozen):
"""
If unsafe_hash=True is passed, ignore __hash__.
"""
@attr.s(unsafe_hash=True, auto_detect=True, slots=slots, frozen=frozen)
class C:
x = attr.ib()
def __hash__(self):
pytest.fail("should not be called")
assert hash(C(1))
def test_override_eq(self, slots, frozen):
"""
If eq=True is passed, ignore __eq__ and __ne__.
"""
@attr.s(eq=True, auto_detect=True, slots=slots, frozen=frozen)
class C:
x = attr.ib()
def __eq__(self, o):
pytest.fail("should not be called")
def __ne__(self, o):
pytest.fail("should not be called")
assert C(1) == C(1)
@pytest.mark.parametrize(
("eq", "order", "cmp"),
[
(True, None, None),
(True, True, None),
(None, True, None),
(None, None, True),
],
)
def test_override_order(self, slots, frozen, eq, order, cmp):
"""
If order=True is passed, ignore __le__, __lt__, __gt__, __ge__.
eq=True and cmp=True both imply order=True so test it too.
"""
def meth(self, o):
pytest.fail("should not be called")
@attr.s(
cmp=cmp,
order=order,
eq=eq,
auto_detect=True,
slots=slots,
frozen=frozen,
)
class C:
x = attr.ib()
__le__ = __lt__ = __gt__ = __ge__ = meth
assert C(1) < C(2)
assert C(1) <= C(2)
assert C(2) > C(1)
assert C(2) >= C(1)
@pytest.mark.parametrize("first", [True, False])
def test_total_ordering(self, slots, first):
"""
functools.total_ordering works as expected if an order method and an eq
method are detected.
Ensure the order doesn't matter.
"""
class C:
x = attr.ib()
own_eq_called = attr.ib(default=False)
own_le_called = attr.ib(default=False)
def __eq__(self, o):
self.own_eq_called = True
return self.x == o.x
def __le__(self, o):
self.own_le_called = True
return self.x <= o.x
if first:
C = functools.total_ordering(
attr.s(auto_detect=True, slots=slots)(C)
)
else:
C = attr.s(auto_detect=True, slots=slots)(
functools.total_ordering(C)
)
c1, c2 = C(1), C(2)
assert c1 < c2
assert c1.own_le_called
c1, c2 = C(1), C(2)
assert c2 > c1
assert c2.own_le_called
c1, c2 = C(1), C(2)
assert c2 != c1
assert c1 == c1
assert c1.own_eq_called
def test_detects_setstate_getstate(self, slots):
"""
__getstate__ and __setstate__ are not overwritten if either is present.
"""
@attr.s(slots=slots, auto_detect=True)
class C:
def __getstate__(self):
return ("hi",)
assert getattr(object, "__setstate__", None) is getattr(
C, "__setstate__", None
)
@attr.s(slots=slots, auto_detect=True)
class C:
called = attr.ib(False)
def __setstate__(self, state):
self.called = True
i = C()
assert False is i.called
i.__setstate__(())
assert True is i.called
assert getattr(object, "__getstate__", None) is getattr(
C, "__getstate__", None
)
@pytest.mark.skipif(PY_3_10_PLUS, reason="Pre-3.10 only.")
def test_match_args_pre_310(self):
"""
__match_args__ is not created on Python versions older than 3.10.
"""
@attr.s
class C:
a = attr.ib()
assert None is getattr(C, "__match_args__", None)
@pytest.mark.skipif(
not PY_3_10_PLUS, reason="Structural pattern matching is 3.10+"
)
| TestAutoDetect |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 108052,
"end": 116746
} | class ____:
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
assert_raises(ValueError, interp, 0, [0], [1, 2])
assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)
assert_raises(ValueError, interp, 0, [], [], period=360)
assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
def test_basic(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.linspace(0, 1, 50)
assert_almost_equal(np.interp(x0, x, y), x0)
def test_right_left_behavior(self):
# Needs range of sizes to test different code paths.
# size ==1 is special cased, 1 < size < 5 is linear search, and
# size >= 5 goes through local search and possibly binary search.
for size in range(1, 10):
xp = np.arange(size, dtype=np.double)
yp = np.ones(size, dtype=np.double)
incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
decpts = incpts[::-1]
incres = interp(incpts, xp, yp)
decres = interp(decpts, xp, yp)
inctgt = np.array([1, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0)
decres = interp(decpts, xp, yp, left=0)
inctgt = np.array([0, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, right=2)
decres = interp(decpts, xp, yp, right=2)
inctgt = np.array([1, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0, right=2)
decres = interp(decpts, xp, yp, left=0, right=2)
inctgt = np.array([0, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
def test_scalar_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = 0
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = .3
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float32(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float64(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
def test_non_finite_behavior_exact_x(self):
x = [1, 2, 2.5, 3, 4]
xp = [1, 2, 3, 4]
fp = [1, 2, np.inf, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])
fp = [1, 2, np.nan, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
@pytest.fixture(params=[
np.float64,
lambda x: _make_complex(x, 0),
lambda x: _make_complex(0, x),
lambda x: _make_complex(x, np.multiply(x, -2))
], ids=[
'real',
'complex-real',
'complex-imag',
'complex-both'
])
def sc(self, request):
""" scale function used by the below tests """
return request.param
def test_non_finite_any_nan(self, sc):
""" test that nans are propagated """
assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan))
def test_non_finite_inf(self, sc):
""" Test that interp between opposite infs gives nan """
inf = np.inf
nan = np.nan
assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan))
# unless the y values are equal
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10))
def test_non_finite_half_inf_xf(self, sc):
""" Test that interp where both axes have a bound at inf gives nan """
inf = np.inf
nan = np.nan
assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan))
assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan))
assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan))
assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan))
assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan))
assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan))
assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan))
assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan))
def test_non_finite_half_inf_x(self, sc):
""" Test interp where the x axis has a bound at inf """
assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10))
assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) # noqa: E202
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0))
assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0))
def test_non_finite_half_inf_f(self, sc):
""" Test interp where the f axis has a bound at inf """
assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf))
def test_complex_interp(self):
# test complex interpolation
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5)) * 1.0j
x0 = 0.3
y0 = x0 + (1 + x0) * 1.0j
assert_almost_equal(np.interp(x0, x, y), y0)
# test complex left and right
x0 = -1
left = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, left=left), left)
x0 = 2.0
right = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, right=right), right)
# test complex non finite
x = [1, 2, 2.5, 3, 4]
xp = [1, 2, 3, 4]
fp = [1, 2 + 1j, np.inf, 4]
y = [1, 2 + 1j, np.inf + 0.5j, np.inf, 4]
assert_almost_equal(np.interp(x, xp, fp), y)
# test complex periodic
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
fp = [5 + 1.0j, 10 + 2j, 3 + 3j, 4 + 4j]
y = [7.5 + 1.5j, 5. + 1.0j, 8.75 + 1.75j, 6.25 + 1.25j, 3. + 3j, 3.25 + 3.25j,
3.5 + 3.5j, 3.75 + 3.75j]
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
def test_zero_dimensional_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.array(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
xp = np.array([0, 2, 4])
fp = np.array([1, -1, 1])
actual = np.interp(np.array(1), xp, fp)
assert_equal(actual, 0)
assert_(isinstance(actual, np.float64))
actual = np.interp(np.array(4.5), xp, fp, period=4)
assert_equal(actual, 0.5)
assert_(isinstance(actual, np.float64))
def test_if_len_x_is_small(self):
xp = np.arange(0, 10, 0.0001)
fp = np.sin(xp)
assert_almost_equal(np.interp(np.pi, xp, fp), 0.0)
def test_period(self):
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
fp = [5, 10, 3, 4]
y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
x = np.array(x, order='F').reshape(2, -1)
y = np.array(y, order='C').reshape(2, -1)
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
quantile_methods = [
'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher',
'midpoint']
methods_supporting_weights = ["inverted_cdf"]
| TestInterp |
python | pdm-project__pdm | src/pdm/cli/commands/run.py | {
"start": 3578,
"end": 4247
} | class ____(NamedTuple):
kind: str
name: str
args: str | Sequence[str]
options: TaskOptions
def __str__(self) -> str:
return f"<task [primary]{self.name}[/]>"
@property
def short_description(self) -> str:
"""
A short one line task description
"""
if self.kind == "composite":
fallback = f" {termui.Emoji.ARROW_SEPARATOR} ".join(self.args)
else:
lines = [line.strip() for line in str(self.args).splitlines() if line.strip()]
fallback = f"{lines[0]}{termui.Emoji.ELLIPSIS}" if len(lines) > 1 else lines[0]
return self.options.get("help", fallback)
| Task |
python | walkccc__LeetCode | solutions/1224. Maximum Equal Frequency/1224.py | {
"start": 0,
"end": 470
} | class ____:
def maxEqualFreq(self, nums: list[int]) -> int:
ans = 0
maxFreq = 0
count = collections.Counter()
freq = collections.Counter()
for i, num in enumerate(nums):
freq[count[num]] -= 1
count[num] += 1
freq[count[num]] += 1
maxFreq = max(maxFreq, count[num])
if maxFreq == 1 or maxFreq * freq[maxFreq] == i or (maxFreq - 1) * (
freq[maxFreq - 1] + 1) == i:
ans = i + 1
return ans
| Solution |
python | cython__cython | Cython/Plex/Scanners.py | {
"start": 237,
"end": 11938
} | class ____:
"""
A Scanner is used to read tokens from a stream of characters
using the token set specified by a Plex.Lexicon.
Constructor:
Scanner(lexicon, stream, name = '')
See the docstring of the __init__ method for details.
Methods:
See the docstrings of the individual methods for more
information.
read() --> (value, text)
Reads the next lexical token from the stream.
position() --> (name, line, col)
Returns the position of the last token read using the
read() method.
begin(state_name)
Causes scanner to change state.
produce(value [, text])
Causes return of a token value to the caller of the
Scanner.
"""
# lexicon = None # Lexicon
# stream = None # file-like object
# name = ''
# buffer = ''
#
# These positions are used by the scanner to track its internal state:
# buf_start_pos = 0 # position in input of start of buffer
# next_pos = 0 # position in input of next char to read
# cur_pos = 0 # position in input of current char
# cur_line = 1 # line number of current char
# cur_line_start = 0 # position in input of start of current line
# start_pos = 0 # position in input of start of token
# current_scanner_position_tuple = ("", 0, 0)
# tuple of filename, line number and position in line, really mainly for error reporting
#
# These positions are used to track what was read from the queue
# (which may differ from the internal state when tokens are replaced onto the queue)
# last_token_position_tuple = ("", 0, 0) # tuple of filename, line number and position in line
# text = None # text of last token read
# initial_state = None # Node
# state_name = '' # Name of initial state
# queue = None # list of tokens and positions to be returned
# trace = 0
def __init__(self, lexicon, stream, name='', initial_pos=None):
"""
Scanner(lexicon, stream, name = '')
|lexicon| is a Plex.Lexicon instance specifying the lexical tokens
to be recognised.
|stream| can be a file object or anything which implements a
compatible read() method.
|name| is optional, and may be the name of the file being
scanned or any other identifying string.
"""
self.trace = 0
self.buffer = ''
self.buf_start_pos = 0
self.next_pos = 0
self.cur_pos = 0
self.cur_line = 1
self.start_pos = 0
self.current_scanner_position_tuple = ("", 0, 0)
self.last_token_position_tuple = ("", 0, 0)
self.text = None
self.state_name = None
self.lexicon = lexicon
self.stream = stream
self.name = name
self.queue = []
self.initial_state = None
self.begin('')
self.next_pos = 0
self.cur_pos = 0
self.cur_line_start = 0
self.cur_char = BOL
self.input_state = 1
if initial_pos is not None:
self.cur_line, self.cur_line_start = initial_pos[1], -initial_pos[2]
def read(self):
"""
Read the next lexical token from the stream and return a
tuple (value, text), where |value| is the value associated with
the token as specified by the Lexicon, and |text| is the actual
string read from the stream. Returns (None, '') on end of file.
"""
queue = self.queue
while not queue:
self.text, action = self.scan_a_token()
if action is None:
self.produce(None)
self.eof()
else:
value = action.perform(self, self.text)
if value is not None:
self.produce(value)
result, self.last_token_position_tuple = queue[0]
del queue[0]
return result
def unread(self, token, value, position):
self.queue.insert(0, ((token, value), position))
def get_current_scan_pos(self):
# distinct from the position of the last token due to the queue
return self.current_scanner_position_tuple
def scan_a_token(self):
"""
Read the next input sequence recognised by the machine
and return (text, action). Returns ('', None) on end of
file.
"""
self.start_pos = self.cur_pos
self.current_scanner_position_tuple = (
self.name, self.cur_line, self.cur_pos - self.cur_line_start
)
action = self.run_machine_inlined()
if action is not None:
if self.trace:
print("Scanner: read: Performing %s %d:%d" % (
action, self.start_pos, self.cur_pos))
text = self.buffer[
self.start_pos - self.buf_start_pos:
self.cur_pos - self.buf_start_pos]
return (text, action)
else:
if self.cur_pos == self.start_pos:
if self.cur_char is None or self.cur_char is EOF:
return ('', None)
raise Errors.UnrecognizedInput(self, self.state_name)
@cython.final
def run_machine_inlined(self):
"""
Inlined version of run_machine for speed.
"""
state: dict = self.initial_state
cur_pos: cython.Py_ssize_t = self.cur_pos
cur_line: cython.Py_ssize_t = self.cur_line
cur_line_start: cython.Py_ssize_t = self.cur_line_start
cur_char = self.cur_char
input_state: cython.long = self.input_state
next_pos: cython.Py_ssize_t = self.next_pos
data: str
buffer: str = self.buffer
buf_start_pos: cython.Py_ssize_t = self.buf_start_pos
buf_len: cython.Py_ssize_t = len(buffer)
buf_index: cython.Py_ssize_t
discard: cython.Py_ssize_t
b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \
None, 0, 0, 0, '', 0, 0
trace: cython.bint = self.trace
while 1:
if trace:
print("State %d, %d/%d:%s -->" % (
state['number'], input_state, cur_pos, repr(cur_char)))
# Begin inlined self.save_for_backup()
action = state['action']
if action is not None:
b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \
action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos
# End inlined self.save_for_backup()
c = cur_char
new_state = state.get(c, NOT_FOUND)
if new_state is NOT_FOUND:
new_state = c and state.get('else')
if new_state:
if trace:
print("State %d" % new_state['number'])
state = new_state
# Begin inlined: self.next_char()
if input_state == 1:
cur_pos = next_pos
# Begin inlined: c = self.read_char()
buf_index = next_pos - buf_start_pos
if buf_index < buf_len:
c = buffer[buf_index]
next_pos += 1
else:
discard = self.start_pos - buf_start_pos
data = self.stream.read(0x1000)
buffer = self.buffer[discard:] + data
self.buffer = buffer
buf_start_pos += discard
self.buf_start_pos = buf_start_pos
buf_len = len(buffer)
buf_index -= discard
if data:
c = buffer[buf_index]
next_pos += 1
else:
c = ''
# End inlined: c = self.read_char()
if c == '\n':
cur_char = EOL
input_state = 2
elif not c:
cur_char = EOL
input_state = 4
else:
cur_char = c
elif input_state == 2: # after EoL (1) -> BoL (3)
cur_char = '\n'
input_state = 3
elif input_state == 3: # start new code line
cur_line += 1
cur_line_start = cur_pos = next_pos
cur_char = BOL
input_state = 1
elif input_state == 4: # after final line (1) -> EoF (5)
cur_char = EOF
input_state = 5
else: # input_state == 5 (EoF)
cur_char = ''
# End inlined self.next_char()
else: # not new_state
if trace:
print("blocked")
# Begin inlined: action = self.back_up()
if b_action is not None:
(action, cur_pos, cur_line, cur_line_start,
cur_char, input_state, next_pos) = \
(b_action, b_cur_pos, b_cur_line, b_cur_line_start,
b_cur_char, b_input_state, b_next_pos)
else:
action = None
break # while 1
# End inlined: action = self.back_up()
self.cur_pos = cur_pos
self.cur_line = cur_line
self.cur_line_start = cur_line_start
self.cur_char = cur_char
self.input_state = input_state
self.next_pos = next_pos
if trace:
if action is not None:
print("Doing %s" % action)
return action
def position(self) -> tuple:
"""
Return a tuple (name, line, col) representing the location of
the last token read using the read() method. |name| is the
name that was provided to the Scanner constructor; |line|
is the line number in the stream (1-based); |col| is the
position within the line of the first character of the token
(0-based).
"""
return self.last_token_position_tuple
def get_position(self):
"""
Python accessible wrapper around position(), only for error reporting.
"""
return self.position()
def begin(self, state_name):
"""Set the current state of the scanner to the named state."""
self.initial_state = (
self.lexicon.get_initial_state(state_name))
self.state_name = state_name
def produce(self, value, text=None):
"""
Called from an action procedure, causes |value| to be returned
as the token value from read(). If |text| is supplied, it is
returned in place of the scanned text.
produce() can be called more than once during a single call to an action
procedure, in which case the tokens are queued up and returned one
at a time by subsequent calls to read(), until the queue is empty,
whereupon scanning resumes.
"""
if text is None:
text = self.text
self.queue.append(((value, text), self.current_scanner_position_tuple))
def eof(self):
"""
Override this method if you want something to be done at
end of file.
"""
pass
@property
def start_line(self):
return self.last_token_position_tuple[1]
| Scanner |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 177223,
"end": 178311
} | class ____(Response):
"""
Response of events.scalar_metrics_iter_histogram endpoint.
:param images:
:type images: Sequence[dict]
"""
_service = "events"
_action = "scalar_metrics_iter_histogram"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"images": {"items": {"type": "object"}, "type": ["array", "null"]}},
"type": "object",
}
def __init__(self, images: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(ScalarMetricsIterHistogramResponse, self).__init__(**kwargs)
self.images = images
@schema_property("images")
def images(self) -> Optional[List[dict]]:
return self._property_images
@images.setter
def images(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_images = None
return
self.assert_isinstance(value, "images", (list, tuple))
self.assert_isinstance(value, "images", (dict,), is_array=True)
self._property_images = value
| ScalarMetricsIterHistogramResponse |
python | doocs__leetcode | solution/2000-2099/2021.Brightest Position on Street/Solution.py | {
"start": 0,
"end": 382
} | class ____:
def brightestPosition(self, lights: List[List[int]]) -> int:
d = defaultdict(int)
for i, j in lights:
l, r = i - j, i + j
d[l] += 1
d[r + 1] -= 1
ans = s = mx = 0
for k in sorted(d):
s += d[k]
if mx < s:
mx = s
ans = k
return ans
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_device_request.py | {
"start": 383,
"end": 7443
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'exactly': 'V1beta2ExactDeviceRequest',
'first_available': 'list[V1beta2DeviceSubRequest]',
'name': 'str'
}
attribute_map = {
'exactly': 'exactly',
'first_available': 'firstAvailable',
'name': 'name'
}
def __init__(self, exactly=None, first_available=None, name=None, local_vars_configuration=None): # noqa: E501
"""V1beta2DeviceRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._exactly = None
self._first_available = None
self._name = None
self.discriminator = None
if exactly is not None:
self.exactly = exactly
if first_available is not None:
self.first_available = first_available
self.name = name
@property
def exactly(self):
"""Gets the exactly of this V1beta2DeviceRequest. # noqa: E501
:return: The exactly of this V1beta2DeviceRequest. # noqa: E501
:rtype: V1beta2ExactDeviceRequest
"""
return self._exactly
@exactly.setter
def exactly(self, exactly):
"""Sets the exactly of this V1beta2DeviceRequest.
:param exactly: The exactly of this V1beta2DeviceRequest. # noqa: E501
:type: V1beta2ExactDeviceRequest
"""
self._exactly = exactly
@property
def first_available(self):
"""Gets the first_available of this V1beta2DeviceRequest. # noqa: E501
FirstAvailable contains subrequests, of which exactly one will be selected by the scheduler. It tries to satisfy them in the order in which they are listed here. So if there are two entries in the list, the scheduler will only check the second one if it determines that the first one can not be used. DRA does not yet implement scoring, so the scheduler will select the first set of devices that satisfies all the requests in the claim. And if the requirements can be satisfied on more than one node, other scheduling features will determine which node is chosen. This means that the set of devices allocated to a claim might not be the optimal set available to the cluster. Scoring will be implemented later. # noqa: E501
:return: The first_available of this V1beta2DeviceRequest. # noqa: E501
:rtype: list[V1beta2DeviceSubRequest]
"""
return self._first_available
@first_available.setter
def first_available(self, first_available):
"""Sets the first_available of this V1beta2DeviceRequest.
FirstAvailable contains subrequests, of which exactly one will be selected by the scheduler. It tries to satisfy them in the order in which they are listed here. So if there are two entries in the list, the scheduler will only check the second one if it determines that the first one can not be used. DRA does not yet implement scoring, so the scheduler will select the first set of devices that satisfies all the requests in the claim. And if the requirements can be satisfied on more than one node, other scheduling features will determine which node is chosen. This means that the set of devices allocated to a claim might not be the optimal set available to the cluster. Scoring will be implemented later. # noqa: E501
:param first_available: The first_available of this V1beta2DeviceRequest. # noqa: E501
:type: list[V1beta2DeviceSubRequest]
"""
self._first_available = first_available
@property
def name(self):
"""Gets the name of this V1beta2DeviceRequest. # noqa: E501
Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim. References using the name in the DeviceRequest will uniquely identify a request when the Exactly field is set. When the FirstAvailable field is set, a reference to the name of the DeviceRequest will match whatever subrequest is chosen by the scheduler. Must be a DNS label. # noqa: E501
:return: The name of this V1beta2DeviceRequest. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta2DeviceRequest.
Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim. References using the name in the DeviceRequest will uniquely identify a request when the Exactly field is set. When the FirstAvailable field is set, a reference to the name of the DeviceRequest will match whatever subrequest is chosen by the scheduler. Must be a DNS label. # noqa: E501
:param name: The name of this V1beta2DeviceRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeviceRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2DeviceRequest):
return True
return self.to_dict() != other.to_dict()
| V1beta2DeviceRequest |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/property_subclasses/my_models.py | {
"start": 3192,
"end": 3446
} | class ____(ndb.Model):
name = ndb.StringProperty()
birth = FuzzyDateProperty()
death = FuzzyDateProperty()
# Parallel lists:
event_dates = FuzzyDateProperty(repeated=True)
event_names = ndb.StringProperty(repeated=True)
| HistoricPerson |
python | spack__spack | lib/spack/spack/test/cmd/url.py | {
"start": 362,
"end": 5594
} | class ____:
def __init__(self, name, versions):
self.name = name
self.versions = versions
def test_name_parsed_correctly():
# Expected True
assert name_parsed_correctly(MyPackage("netcdf", []), "netcdf")
assert name_parsed_correctly(MyPackage("r-devtools", []), "devtools")
assert name_parsed_correctly(MyPackage("py-numpy", []), "numpy")
assert name_parsed_correctly(MyPackage("octave-splines", []), "splines")
assert name_parsed_correctly(MyPackage("th-data", []), "TH.data")
assert name_parsed_correctly(MyPackage("imagemagick", []), "ImageMagick")
# Expected False
assert not name_parsed_correctly(MyPackage("", []), "hdf5")
assert not name_parsed_correctly(MyPackage("hdf5", []), "")
assert not name_parsed_correctly(MyPackage("yaml-cpp", []), "yamlcpp")
assert not name_parsed_correctly(MyPackage("yamlcpp", []), "yaml-cpp")
assert not name_parsed_correctly(MyPackage("r-py-parser", []), "parser")
assert not name_parsed_correctly(MyPackage("oce", []), "oce-0.18.0")
def test_version_parsed_correctly():
# Expected True
assert version_parsed_correctly(MyPackage("", ["1.2.3"]), "1.2.3")
assert version_parsed_correctly(MyPackage("", ["5.4a", "5.4b"]), "5.4a")
assert version_parsed_correctly(MyPackage("", ["5.4a", "5.4b"]), "5.4b")
assert version_parsed_correctly(MyPackage("", ["1.63.0"]), "1_63_0")
assert version_parsed_correctly(MyPackage("", ["0.94h"]), "094h")
# Expected False
assert not version_parsed_correctly(MyPackage("", []), "1.2.3")
assert not version_parsed_correctly(MyPackage("", ["1.2.3"]), "")
assert not version_parsed_correctly(MyPackage("", ["1.2.3"]), "1.2.4")
assert not version_parsed_correctly(MyPackage("", ["3.4a"]), "3.4")
assert not version_parsed_correctly(MyPackage("", ["3.4"]), "3.4b")
assert not version_parsed_correctly(MyPackage("", ["0.18.0"]), "oce-0.18.0")
def test_url_parse():
url("parse", "http://zlib.net/fossils/zlib-1.2.10.tar.gz")
def test_url_with_no_version_fails():
# No version in URL
with pytest.raises(UndetectableVersionError):
url("parse", "http://www.netlib.org/voronoi/triangle.zip")
def test_url_list(mock_packages):
out = url("list")
total_urls = len(out.split("\n"))
# The following two options should not change the number of URLs printed.
out = url("list", "--color", "--extrapolation")
colored_urls = len(out.split("\n"))
assert colored_urls == total_urls
# The following options should print fewer URLs than the default.
# If they print the same number of URLs, something is horribly broken.
# If they say we missed 0 URLs, something is probably broken too.
out = url("list", "--incorrect-name")
incorrect_name_urls = len(out.split("\n"))
assert 0 < incorrect_name_urls < total_urls
out = url("list", "--incorrect-version")
incorrect_version_urls = len(out.split("\n"))
assert 0 < incorrect_version_urls < total_urls
out = url("list", "--correct-name")
correct_name_urls = len(out.split("\n"))
assert 0 < correct_name_urls < total_urls
out = url("list", "--correct-version")
correct_version_urls = len(out.split("\n"))
assert 0 < correct_version_urls < total_urls
def test_url_summary(mock_packages):
"""Test the URL summary command."""
# test url_summary, the internal function that does the work
(total_urls, correct_names, correct_versions, name_count_dict, version_count_dict) = (
url_summary(None)
)
assert 0 < correct_names <= sum(name_count_dict.values()) <= total_urls
assert 0 < correct_versions <= sum(version_count_dict.values()) <= total_urls
# make sure it agrees with the actual command.
out = url("summary")
out_total_urls = int(re.search(r"Total URLs found:\s*(\d+)", out).group(1))
assert out_total_urls == total_urls
out_correct_names = int(re.search(r"Names correctly parsed:\s*(\d+)", out).group(1))
assert out_correct_names == correct_names
out_correct_versions = int(re.search(r"Versions correctly parsed:\s*(\d+)", out).group(1))
assert out_correct_versions == correct_versions
def test_url_stats(capfd, mock_packages):
with capfd.disabled():
output = url("stats")
npkgs = "%d packages" % len(spack.repo.all_package_names())
assert npkgs in output
assert "url" in output
assert "git" in output
assert "schemes" in output
assert "versions" in output
assert "resources" in output
output = url("stats", "--show-issues")
npkgs = "%d packages" % len(spack.repo.all_package_names())
assert npkgs in output
assert "url" in output
assert "git" in output
assert "schemes" in output
assert "versions" in output
assert "resources" in output
assert "Package URLs with md5 hashes" in output
assert "needs-relocation" in output
assert "https://cmake.org/files/v3.4/cmake-0.0.0.tar.gz" in output
assert "Package URLs with http urls" in output
assert "zmpi" in output
assert "http://www.spack-fake-zmpi.org/downloads/zmpi-1.0.tar.gz" in output
| MyPackage |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_arn.py | {
"start": 121,
"end": 3438
} | class ____(RegexBasedColumnMapExpectation):
"""Expect values in this column to be a valid amazon arn."""
# These values will be used to configure the metric created by your expectation
regex_camel_name = "AmazonResourceName"
regex = r"^arn:([^:\n]*):([^:\n]*):([^:\n]*):([^:\n]*):(([^:\/\n]*)[:\/])?(.*)$"
semantic_type_name_plural = "arns"
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_arns": [
"arn:aws:s3:::my-bucket/my-object",
"arn:partition:service:region:account-id:resource",
],
"invalid_alphanumeric": [
"apz8",
"bubba:arn:123",
],
"invalid_arn": [
"arn:aws:::::::my-bucket/my-object",
"arn::::",
],
"empty": ["", None],
},
"suppress_test_for": ["mssql", "bigquery", "snowflake"],
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_arns"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_alphanumeric", "mostly": 1},
"out": {
"success": False,
},
},
{
"title": "invalid_non_alphanumeric",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_arn", "mostly": 1},
"out": {
"success": False,
},
},
{
"title": "empty",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "empty", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# Here your regex is used to create a custom metric for this expectation
map_metric = RegexBasedColumnMapExpectation.register_metric(
regex_camel_name=regex_camel_name,
regex_=regex,
)
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"amazon",
"arn",
"expectation",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@rdodev", # Don't forget to add your github handle here!
"@mkopec87",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidArn().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidArn |
python | jmcnamara__XlsxWriter | xlsxwriter/test/utility/test_xl_cell_to_rowcol.py | {
"start": 283,
"end": 1597
} | class ____(unittest.TestCase):
"""
Test xl_cell_to_rowcol() utility function.
"""
def test_xl_cell_to_rowcol(self):
"""Test xl_cell_to_rowcol()"""
tests = [
# row, col, A1 string
(0, 0, "A1"),
(0, 1, "B1"),
(0, 2, "C1"),
(0, 9, "J1"),
(1, 0, "A2"),
(2, 0, "A3"),
(9, 0, "A10"),
(1, 24, "Y2"),
(7, 25, "Z8"),
(9, 26, "AA10"),
(1, 254, "IU2"),
(1, 255, "IV2"),
(1, 256, "IW2"),
(0, 16383, "XFD1"),
(1048576, 16384, "XFE1048577"),
]
for row, col, string in tests:
exp = (row, col)
got = xl_cell_to_rowcol(string)
self.assertEqual(exp, got)
def test_xl_cell_to_rowcol_abs(self):
"""Test xl_cell_to_rowcol() with absolute references"""
tests = [
# row, col, row_abs, col_abs, A1 string
(0, 0, 0, 0, "A1"),
(0, 0, 1, 0, "A$1"),
(0, 0, 0, 1, "$A1"),
(0, 0, 1, 1, "$A$1"),
]
for row, col, row_abs, col_abs, string in tests:
exp = (row, col)
got = xl_cell_to_rowcol(string)
self.assertEqual(exp, got)
| TestUtility |
python | fluentpython__example-code | 16-coroutine/taxi_sim.py | {
"start": 2763,
"end": 7938
} | class ____:
def __init__(self, procs_map):
self.events = queue.PriorityQueue()
self.procs = dict(procs_map)
def run(self, end_time): # <1>
"""Schedule and display events until time is up"""
# schedule the first event for each cab
for _, proc in sorted(self.procs.items()): # <2>
first_event = next(proc) # <3>
self.events.put(first_event) # <4>
# main loop of the simulation
sim_time = 0 # <5>
while sim_time < end_time: # <6>
if self.events.empty(): # <7>
print('*** end of events ***')
break
current_event = self.events.get() # <8>
sim_time, proc_id, previous_action = current_event # <9>
print('taxi:', proc_id, proc_id * ' ', current_event) # <10>
active_proc = self.procs[proc_id] # <11>
next_time = sim_time + compute_duration(previous_action) # <12>
try:
next_event = active_proc.send(next_time) # <13>
except StopIteration:
del self.procs[proc_id] # <14>
else:
self.events.put(next_event) # <15>
else: # <16>
msg = '*** end of simulation time: {} events pending ***'
print(msg.format(self.events.qsize()))
# END TAXI_SIMULATOR
def compute_duration(previous_action):
"""Compute action duration using exponential distribution"""
if previous_action in ['leave garage', 'drop off passenger']:
# new state is prowling
interval = SEARCH_DURATION
elif previous_action == 'pick up passenger':
# new state is trip
interval = TRIP_DURATION
elif previous_action == 'going home':
interval = 1
else:
raise ValueError('Unknown previous_action: %s' % previous_action)
return int(random.expovariate(1/interval)) + 1
def main(end_time=DEFAULT_END_TIME, num_taxis=DEFAULT_NUMBER_OF_TAXIS,
seed=None):
"""Initialize random generator, build procs and run simulation"""
if seed is not None:
random.seed(seed) # get reproducible results
taxis = {i: taxi_process(i, (i+1)*2, i*DEPARTURE_INTERVAL)
for i in range(num_taxis)}
sim = Simulator(taxis)
sim.run(end_time)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Taxi fleet simulator.')
parser.add_argument('-e', '--end-time', type=int,
default=DEFAULT_END_TIME,
help='simulation end time; default = %s'
% DEFAULT_END_TIME)
parser.add_argument('-t', '--taxis', type=int,
default=DEFAULT_NUMBER_OF_TAXIS,
help='number of taxis running; default = %s'
% DEFAULT_NUMBER_OF_TAXIS)
parser.add_argument('-s', '--seed', type=int, default=None,
help='random generator seed (for testing)')
args = parser.parse_args()
main(args.end_time, args.taxis, args.seed)
"""
Sample run from the command line, seed=3, maximum elapsed time=120::
# BEGIN TAXI_SAMPLE_RUN
$ python3 taxi_sim.py -s 3 -e 120
taxi: 0 Event(time=0, proc=0, action='leave garage')
taxi: 0 Event(time=2, proc=0, action='pick up passenger')
taxi: 1 Event(time=5, proc=1, action='leave garage')
taxi: 1 Event(time=8, proc=1, action='pick up passenger')
taxi: 2 Event(time=10, proc=2, action='leave garage')
taxi: 2 Event(time=15, proc=2, action='pick up passenger')
taxi: 2 Event(time=17, proc=2, action='drop off passenger')
taxi: 0 Event(time=18, proc=0, action='drop off passenger')
taxi: 2 Event(time=18, proc=2, action='pick up passenger')
taxi: 2 Event(time=25, proc=2, action='drop off passenger')
taxi: 1 Event(time=27, proc=1, action='drop off passenger')
taxi: 2 Event(time=27, proc=2, action='pick up passenger')
taxi: 0 Event(time=28, proc=0, action='pick up passenger')
taxi: 2 Event(time=40, proc=2, action='drop off passenger')
taxi: 2 Event(time=44, proc=2, action='pick up passenger')
taxi: 1 Event(time=55, proc=1, action='pick up passenger')
taxi: 1 Event(time=59, proc=1, action='drop off passenger')
taxi: 0 Event(time=65, proc=0, action='drop off passenger')
taxi: 1 Event(time=65, proc=1, action='pick up passenger')
taxi: 2 Event(time=65, proc=2, action='drop off passenger')
taxi: 2 Event(time=72, proc=2, action='pick up passenger')
taxi: 0 Event(time=76, proc=0, action='going home')
taxi: 1 Event(time=80, proc=1, action='drop off passenger')
taxi: 1 Event(time=88, proc=1, action='pick up passenger')
taxi: 2 Event(time=95, proc=2, action='drop off passenger')
taxi: 2 Event(time=97, proc=2, action='pick up passenger')
taxi: 2 Event(time=98, proc=2, action='drop off passenger')
taxi: 1 Event(time=106, proc=1, action='drop off passenger')
taxi: 2 Event(time=109, proc=2, action='going home')
taxi: 1 Event(time=110, proc=1, action='going home')
*** end of events ***
# END TAXI_SAMPLE_RUN
"""
| Simulator |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 683,
"end": 809
} | class ____(TextMessageContentEvent, Event):
type: EventType = EventType.TEXT_MESSAGE_CONTENT
| TextMessageContentWorkflowEvent |
python | allegroai__clearml | clearml/binding/frameworks/tensorflow_bind.py | {
"start": 52806,
"end": 54432
} | class ____(object):
"""Model adapter which extends the save and save_weights methods of a Keras Model instance"""
_model: Any = None
_output_model: OutputModel = None
def __init__(self, model: Any, output_model: OutputModel) -> None:
super(_ModelAdapter, self).__init__()
super(_ModelAdapter, self).__setattr__("_model", model)
super(_ModelAdapter, self).__setattr__("_output_model", output_model)
super(_ModelAdapter, self).__setattr__("_logger", LoggerRoot.get_base_logger(TensorflowBinding))
def __getattr__(self, attr: str) -> Any:
return getattr(self._model, attr)
def __setattr__(self, key: str, value: Any) -> None:
return setattr(self._model, key, value)
def save(
self,
filepath: str,
overwrite: bool = True,
include_optimizer: bool = True,
) -> None:
self._model.save(filepath=filepath, overwrite=overwrite, include_optimizer=include_optimizer)
# TODO: auto generate new objects of filename changes
try:
self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True)
except Exception as ex:
self._logger.error(str(ex))
def save_weights(self, filepath: str, overwrite: bool = True) -> None:
self._model.save_weights(filepath=filepath, overwrite=overwrite)
# TODO: auto generate new objects of filename changes
try:
self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True)
except Exception as ex:
self._logger.error(str(ex))
| _ModelAdapter |
python | yaml__pyyaml | lib/yaml/dumper.py | {
"start": 1051,
"end": 1950
} | class ____(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
Resolver.__init__(self)
| SafeDumper |
python | python-poetry__poetry | tests/plugins/test_plugin_manager.py | {
"start": 1842,
"end": 18876
} | class ____:
group = "poetry.plugin"
def activate(self, poetry: Poetry, io: IO) -> None:
io.write_line("Updating version")
poetry.package.version = Version.parse("9.9.9")
@pytest.fixture
def repo() -> Repository:
repo = Repository("repo")
repo.add_package(Package("my-other-plugin", "1.0"))
for version in ("1.0", "2.0"):
package = Package("my-application-plugin", version)
package.add_dependency(Dependency("some-lib", version))
repo.add_package(package)
repo.add_package(Package("some-lib", version))
return repo
@pytest.fixture
def pool(repo: Repository) -> RepositoryPool:
pool = RepositoryPool()
pool.add_repository(repo)
return pool
@pytest.fixture
def poetry(fixture_dir: FixtureDirGetter, config: Config) -> Poetry:
project_path = fixture_dir("simple_project")
poetry = Poetry(
project_path / "pyproject.toml",
{},
ProjectPackage("simple-project", "1.2.3"),
Locker(project_path / "poetry.lock", {}),
config,
)
return poetry
@pytest.fixture
def poetry_with_plugins(
fixture_dir: FixtureDirGetter, pool: RepositoryPool, tmp_path: Path
) -> Poetry:
orig_path = fixture_dir("project_plugins")
project_path = tmp_path / "project"
project_path.mkdir()
shutil.copy(orig_path / "pyproject.toml", project_path / "pyproject.toml")
poetry = Factory().create_poetry(project_path)
poetry.set_pool(pool)
return poetry
@pytest.fixture()
def io() -> BufferedIO:
return BufferedIO()
@pytest.fixture()
def manager_factory(poetry: Poetry, io: BufferedIO) -> ManagerFactory:
def _manager(group: str = Plugin.group) -> PluginManager:
return PluginManager(group)
return _manager
@pytest.fixture
def with_my_plugin(mocker: MockerFixture) -> None:
mock_metadata_entry_points(mocker, MyPlugin)
@pytest.fixture
def with_invalid_plugin(mocker: MockerFixture) -> None:
mock_metadata_entry_points(mocker, InvalidPlugin)
def test_load_plugins_and_activate(
manager_factory: ManagerFactory,
poetry: Poetry,
io: BufferedIO,
with_my_plugin: None,
) -> None:
manager = manager_factory()
manager.load_plugins()
manager.activate(poetry, io)
assert poetry.package.readmes == (Path("README.md"),)
assert io.fetch_output() == "Setting readmes\n"
def test_load_plugins_with_invalid_plugin(
manager_factory: ManagerFactory,
poetry: Poetry,
io: BufferedIO,
with_invalid_plugin: None,
) -> None:
manager = manager_factory()
with pytest.raises(ValueError):
manager.load_plugins()
def test_add_project_plugin_path(
poetry_with_plugins: Poetry,
io: BufferedIO,
system_env: Env,
fixture_dir: FixtureDirGetter,
) -> None:
dist_info_1 = "my_application_plugin-1.0.dist-info"
dist_info_2 = "my_application_plugin-2.0.dist-info"
cache = ProjectPluginCache(poetry_with_plugins, io)
shutil.copytree(
fixture_dir("project_plugins") / dist_info_1, cache._path / dist_info_1
)
shutil.copytree(
fixture_dir("project_plugins") / dist_info_2, system_env.purelib / dist_info_2
)
assert {
f"{p.name} {p.version}" for p in InstalledRepository.load(system_env).packages
} == {"my-application-plugin 2.0"}
PluginManager.add_project_plugin_path(poetry_with_plugins.pyproject_path.parent)
assert {
f"{p.name} {p.version}" for p in InstalledRepository.load(system_env).packages
} == {"my-application-plugin 1.0"}
def test_ensure_plugins_no_plugins_no_output(poetry: Poetry, io: BufferedIO) -> None:
PluginManager.ensure_project_plugins(poetry, io)
assert not (poetry.pyproject_path.parent / ProjectPluginCache.PATH).exists()
assert io.fetch_output() == ""
assert io.fetch_error() == ""
def test_ensure_plugins_no_plugins_existing_cache_is_removed(
poetry: Poetry, io: BufferedIO
) -> None:
plugin_path = poetry.pyproject_path.parent / ProjectPluginCache.PATH
plugin_path.mkdir(parents=True)
PluginManager.ensure_project_plugins(poetry, io)
assert not plugin_path.exists()
assert io.fetch_output() == (
"No project plugins defined. Removing the project's plugin cache\n\n"
)
assert io.fetch_error() == ""
@pytest.mark.parametrize("debug_out", [False, True])
def test_ensure_plugins_no_output_if_fresh(
poetry_with_plugins: Poetry, io: BufferedIO, debug_out: bool
) -> None:
io.set_verbosity(Verbosity.DEBUG if debug_out else Verbosity.NORMAL)
cache = ProjectPluginCache(poetry_with_plugins, io)
cache._write_config()
cache.ensure_plugins()
assert cache._config_file.exists()
assert (
cache._gitignore_file.exists()
and cache._gitignore_file.read_text(encoding="utf-8") == "*"
)
assert io.fetch_output() == (
"The project's plugin cache is up to date.\n\n" if debug_out else ""
)
assert io.fetch_error() == ""
@pytest.mark.parametrize("debug_out", [False, True])
def test_ensure_plugins_ignore_irrelevant_markers(
poetry_with_plugins: Poetry, io: BufferedIO, debug_out: bool
) -> None:
io.set_verbosity(Verbosity.DEBUG if debug_out else Verbosity.NORMAL)
poetry_with_plugins.local_config["requires-plugins"] = {
"irrelevant": {"version": "1.0", "markers": "python_version < '3'"}
}
cache = ProjectPluginCache(poetry_with_plugins, io)
cache.ensure_plugins()
assert cache._config_file.exists()
assert (
cache._gitignore_file.exists()
and cache._gitignore_file.read_text(encoding="utf-8") == "*"
)
assert io.fetch_output() == (
"No relevant project plugins for Poetry's environment defined.\n\n"
if debug_out
else ""
)
assert io.fetch_error() == ""
def test_ensure_plugins_remove_outdated(
poetry_with_plugins: Poetry, io: BufferedIO, fixture_dir: FixtureDirGetter
) -> None:
# Test with irrelevant plugins because this is the first return
# where it is relevant that an existing cache is removed.
poetry_with_plugins.local_config["requires-plugins"] = {
"irrelevant": {"version": "1.0", "markers": "python_version < '3'"}
}
fixture_path = fixture_dir("project_plugins")
cache = ProjectPluginCache(poetry_with_plugins, io)
cache._path.mkdir(parents=True)
dist_info = "my_application_plugin-1.0.dist-info"
shutil.copytree(fixture_path / dist_info, cache._path / dist_info)
cache._config_file.touch()
cache.ensure_plugins()
assert cache._config_file.exists()
assert not (cache._path / dist_info).exists()
assert io.fetch_output() == (
"Removing the project's plugin cache because it is outdated\n"
)
assert io.fetch_error() == ""
def test_ensure_plugins_ignore_already_installed_in_system_env(
poetry_with_plugins: Poetry,
io: BufferedIO,
system_env: Env,
fixture_dir: FixtureDirGetter,
) -> None:
fixture_path = fixture_dir("project_plugins")
for dist_info in (
"my_application_plugin-2.0.dist-info",
"my_other_plugin-1.0.dist-info",
):
shutil.copytree(fixture_path / dist_info, system_env.purelib / dist_info)
cache = ProjectPluginCache(poetry_with_plugins, io)
cache.ensure_plugins()
assert cache._config_file.exists()
assert (
cache._gitignore_file.exists()
and cache._gitignore_file.read_text(encoding="utf-8") == "*"
)
assert io.fetch_output() == (
"Ensuring that the Poetry plugins required by the project are available...\n"
"All required plugins have already been installed in Poetry's environment.\n\n"
)
assert io.fetch_error() == ""
def test_ensure_plugins_install_missing_plugins(
poetry_with_plugins: Poetry,
io: BufferedIO,
system_env: Env,
fixture_dir: FixtureDirGetter,
mocker: MockerFixture,
) -> None:
cache = ProjectPluginCache(poetry_with_plugins, io)
install_spy = mocker.spy(cache, "_install")
execute_mock = mocker.patch(
"poetry.plugins.plugin_manager.Installer._execute", return_value=0
)
cache.ensure_plugins()
install_spy.assert_called_once_with(
[
Dependency("my-application-plugin", ">=2.0"),
Dependency("my-other-plugin", ">=1.0"),
],
system_env,
[],
)
execute_mock.assert_called_once()
assert [repr(op) for op in execute_mock.call_args.args[0] if not op.skipped] == [
"<Install some-lib (2.0)>",
"<Install my-application-plugin (2.0)>",
"<Install my-other-plugin (1.0)>",
]
assert cache._config_file.exists()
assert (
cache._gitignore_file.exists()
and cache._gitignore_file.read_text(encoding="utf-8") == "*"
)
assert io.fetch_output() == (
"Ensuring that the Poetry plugins required by the project are available...\n"
"The following Poetry plugins are required by the project"
" but are not installed in Poetry's environment:\n"
" - my-application-plugin (>=2.0)\n"
" - my-other-plugin (>=1.0)\n"
"Installing Poetry plugins only for the current project...\n"
"Updating dependencies\n"
"Resolving dependencies...\n\n"
"Writing lock file\n\n"
)
assert io.fetch_error() == ""
def test_ensure_plugins_install_only_missing_plugins(
poetry_with_plugins: Poetry,
io: BufferedIO,
system_env: Env,
fixture_dir: FixtureDirGetter,
mocker: MockerFixture,
) -> None:
fixture_path = fixture_dir("project_plugins")
for dist_info in (
"my_application_plugin-2.0.dist-info",
"some_lib-2.0.dist-info",
):
shutil.copytree(fixture_path / dist_info, system_env.purelib / dist_info)
cache = ProjectPluginCache(poetry_with_plugins, io)
install_spy = mocker.spy(cache, "_install")
execute_mock = mocker.patch(
"poetry.plugins.plugin_manager.Installer._execute", return_value=0
)
cache.ensure_plugins()
install_spy.assert_called_once_with(
[Dependency("my-other-plugin", ">=1.0")],
system_env,
[Package("my-application-plugin", "2.0"), Package("some-lib", "2.0")],
)
execute_mock.assert_called_once()
assert [repr(op) for op in execute_mock.call_args.args[0] if not op.skipped] == [
"<Install my-other-plugin (1.0)>"
]
assert cache._config_file.exists()
assert (
cache._gitignore_file.exists()
and cache._gitignore_file.read_text(encoding="utf-8") == "*"
)
assert io.fetch_output() == (
"Ensuring that the Poetry plugins required by the project are available...\n"
"The following Poetry plugins are required by the project"
" but are not installed in Poetry's environment:\n"
" - my-other-plugin (>=1.0)\n"
"Installing Poetry plugins only for the current project...\n"
"Updating dependencies\n"
"Resolving dependencies...\n\n"
"Writing lock file\n\n"
)
assert io.fetch_error() == ""
@pytest.mark.parametrize("debug_out", [False, True])
def test_ensure_plugins_install_overwrite_wrong_version_plugins(
poetry_with_plugins: Poetry,
io: BufferedIO,
system_env: Env,
fixture_dir: FixtureDirGetter,
mocker: MockerFixture,
debug_out: bool,
) -> None:
io.set_verbosity(Verbosity.DEBUG if debug_out else Verbosity.NORMAL)
fixture_path = fixture_dir("project_plugins")
for dist_info in (
"my_application_plugin-1.0.dist-info",
"some_lib-2.0.dist-info",
):
shutil.copytree(fixture_path / dist_info, system_env.purelib / dist_info)
cache = ProjectPluginCache(poetry_with_plugins, io)
install_spy = mocker.spy(cache, "_install")
execute_mock = mocker.patch(
"poetry.plugins.plugin_manager.Installer._execute", return_value=0
)
cache.ensure_plugins()
install_spy.assert_called_once_with(
[
Dependency("my-application-plugin", ">=2.0"),
Dependency("my-other-plugin", ">=1.0"),
],
system_env,
[Package("some-lib", "2.0")],
)
execute_mock.assert_called_once()
assert [repr(op) for op in execute_mock.call_args.args[0] if not op.skipped] == [
"<Install my-application-plugin (2.0)>",
"<Install my-other-plugin (1.0)>",
]
assert cache._config_file.exists()
assert (
cache._gitignore_file.exists()
and cache._gitignore_file.read_text(encoding="utf-8") == "*"
)
start = (
"Ensuring that the Poetry plugins required by the project are available...\n"
)
opt = (
"The following Poetry plugins are required by the project"
" but are not satisfied by the installed versions:\n"
" - my-application-plugin (>=2.0)\n"
" installed: my-application-plugin (1.0)\n"
)
end = (
"The following Poetry plugins are required by the project"
" but are not installed in Poetry's environment:\n"
" - my-application-plugin (>=2.0)\n"
" - my-other-plugin (>=1.0)\n"
"Installing Poetry plugins only for the current project...\n"
)
expected = (start + opt + end) if debug_out else (start + end)
assert io.fetch_output().startswith(expected)
assert io.fetch_error() == ""
def test_ensure_plugins_pins_other_installed_packages(
poetry_with_plugins: Poetry,
io: BufferedIO,
system_env: Env,
fixture_dir: FixtureDirGetter,
mocker: MockerFixture,
) -> None:
fixture_path = fixture_dir("project_plugins")
for dist_info in (
"my_application_plugin-1.0.dist-info",
"some_lib-1.0.dist-info",
):
shutil.copytree(fixture_path / dist_info, system_env.purelib / dist_info)
cache = ProjectPluginCache(poetry_with_plugins, io)
install_spy = mocker.spy(cache, "_install")
execute_mock = mocker.patch(
"poetry.plugins.plugin_manager.Installer._execute", return_value=0
)
with pytest.raises(SolverProblemError):
cache.ensure_plugins()
install_spy.assert_called_once_with(
[
Dependency("my-application-plugin", ">=2.0"),
Dependency("my-other-plugin", ">=1.0"),
],
system_env,
# pinned because it might be a dependency of another plugin or Poetry itself
[Package("some-lib", "1.0")],
)
execute_mock.assert_not_called()
assert not cache._config_file.exists()
assert (
cache._gitignore_file.exists()
and cache._gitignore_file.read_text(encoding="utf-8") == "*"
)
assert io.fetch_output() == (
"Ensuring that the Poetry plugins required by the project are available...\n"
"The following Poetry plugins are required by the project"
" but are not installed in Poetry's environment:\n"
" - my-application-plugin (>=2.0)\n"
" - my-other-plugin (>=1.0)\n"
"Installing Poetry plugins only for the current project...\n"
"Updating dependencies\n"
"Resolving dependencies...\n"
)
assert io.fetch_error() == ""
@pytest.mark.parametrize("other_version", [False, True])
def test_project_plugins_are_installed_in_project_folder(
poetry_with_plugins: Poetry,
io: BufferedIO,
system_env: Env,
fixture_dir: FixtureDirGetter,
tmp_path: Path,
other_version: bool,
) -> None:
orig_purelib = system_env.purelib
orig_platlib = system_env.platlib
# make sure that the path dependency is on the same drive (for Windows tests in CI)
orig_wheel_path = (
fixture_dir("wheel_with_no_requires_dist") / "demo-0.1.0-py2.py3-none-any.whl"
)
wheel_path = tmp_path / orig_wheel_path.name
shutil.copy(orig_wheel_path, wheel_path)
if other_version:
WheelInstaller(system_env).install(wheel_path)
dist_info = orig_purelib / "demo-0.1.0.dist-info"
metadata = dist_info / "METADATA"
metadata.write_text(
metadata.read_text(encoding="utf-8").replace("0.1.0", "0.1.2"),
encoding="utf-8",
)
dist_info.rename(orig_purelib / "demo-0.1.2.dist-info")
cache = ProjectPluginCache(poetry_with_plugins, io)
# just use a file dependency so that we do not have to set up a repository
cache._install([FileDependency("demo", wheel_path)], system_env, [])
project_site_packages = [p.name for p in cache._path.iterdir()]
assert "demo" in project_site_packages
assert "demo-0.1.0.dist-info" in project_site_packages
orig_site_packages = [p.name for p in orig_purelib.iterdir()]
if other_version:
assert "demo" in orig_site_packages
assert "demo-0.1.2.dist-info" in orig_site_packages
assert "demo-0.1.0.dist-info" not in orig_site_packages
else:
assert not any(p.startswith("demo") for p in orig_site_packages)
if orig_platlib != orig_purelib:
assert not any(p.name.startswith("demo") for p in orig_platlib.iterdir())
| InvalidPlugin |
python | mlflow__mlflow | mlflow/telemetry/events.py | {
"start": 638,
"end": 704
} | class ____(Event):
name: str = "create_prompt"
| CreatePromptEvent |
python | spack__spack | lib/spack/spack/modules/common.py | {
"start": 11007,
"end": 16885
} | class ____:
"""Manipulates the information needed to generate a module file to make
querying easier. It needs to be sub-classed for specific module types.
"""
default_projections = {"all": "{name}/{version}-{compiler.name}-{compiler.version}"}
def __init__(self, spec: spack.spec.Spec, module_set_name: str, explicit: bool) -> None:
# Spec for which we want to generate a module file
self.spec = spec
self.name = module_set_name
self.explicit = explicit
# Dictionary of configuration options that should be applied to the spec
self.conf = merge_config_rules(self.module.configuration(self.name), self.spec)
@property
def module(self):
return inspect.getmodule(self)
@property
def projections(self):
"""Projection from specs to module names"""
# backwards compatibility for naming_scheme key
conf = self.module.configuration(self.name)
if "naming_scheme" in conf:
default = {"all": conf["naming_scheme"]}
else:
default = self.default_projections
projections = conf.get("projections", default)
# Ensure the named tokens we are expanding are allowed, see
# issue #2884 for reference
msg = "some tokens cannot be part of the module naming scheme"
for projection in projections.values():
_check_tokens_are_valid(projection, message=msg)
return projections
@property
def template(self):
"""Returns the name of the template to use for the module file
or None if not specified in the configuration.
"""
return self.conf.get("template", None)
@property
def defaults(self):
"""Returns the specs configured as defaults or []."""
return self.conf.get("defaults", [])
@property
def env(self):
"""List of environment modifications that should be done in the
module.
"""
return spack.schema.environment.parse(self.conf.get("environment", {}))
@property
def suffixes(self):
"""List of suffixes that should be appended to the module
file name.
"""
suffixes = []
for constraint, suffix in self.conf.get("suffixes", {}).items():
if constraint in self.spec:
suffixes.append(suffix)
suffixes = list(dedupe(suffixes))
# For hidden modules we can always add a fixed length hash as suffix, since it guards
# against file name clashes, and the module is not exposed to the user anyways.
if self.hidden:
suffixes.append(self.spec.dag_hash(length=7))
elif self.hash:
suffixes.append(self.hash)
return suffixes
@property
def hash(self):
"""Hash tag for the module or None"""
hash_length = self.conf.get("hash_length", 7)
if hash_length != 0:
return self.spec.dag_hash(length=hash_length)
return None
@property
def conflicts(self):
"""Conflicts for this module file"""
return self.conf.get("conflict", [])
@property
def excluded(self):
"""Returns True if the module has been excluded, False otherwise."""
# A few variables for convenience of writing the method
spec = self.spec
conf = self.module.configuration(self.name)
# Compute the list of matching include / exclude rules, and whether excluded as implicit
include_matches = [x for x in conf.get("include", []) if spec.satisfies(x)]
exclude_matches = [x for x in conf.get("exclude", []) if spec.satisfies(x)]
excluded_as_implicit = not self.explicit and conf.get("exclude_implicits", False)
def debug_info(line_header, match_list):
if match_list:
tty.debug(f"\t{line_header} : {spec.cshort_spec}")
for rule in match_list:
tty.debug(f"\t\tmatches rule: {rule}")
debug_info("INCLUDE", include_matches)
debug_info("EXCLUDE", exclude_matches)
if excluded_as_implicit:
tty.debug(f"\tEXCLUDED_AS_IMPLICIT : {spec.cshort_spec}")
return not include_matches and (exclude_matches or excluded_as_implicit)
@property
def hidden(self):
"""Returns True if the module has been hidden, False otherwise."""
conf = self.module.configuration(self.name)
hidden_as_implicit = not self.explicit and conf.get("hide_implicits", False)
if hidden_as_implicit:
tty.debug(f"\tHIDDEN_AS_IMPLICIT : {self.spec.cshort_spec}")
return hidden_as_implicit
@property
def context(self):
return self.conf.get("context", {})
@property
def specs_to_load(self):
"""List of specs that should be loaded in the module file."""
return self._create_list_for("autoload")
@property
def literals_to_load(self):
"""List of literal modules to be loaded."""
return self.conf.get("load", [])
@property
def specs_to_prereq(self):
"""List of specs that should be prerequisite of the module file."""
return self._create_list_for("prerequisites")
@property
def exclude_env_vars(self):
"""List of variables that should be left unmodified."""
filter_subsection = self.conf.get("filter", {})
return filter_subsection.get("exclude_env_vars", {})
def _create_list_for(self, what):
include = []
for item in self.conf[what]:
if not self.module.make_configuration(item, self.name).excluded:
include.append(item)
return include
@property
def verbose(self):
"""Returns True if the module file needs to be verbose, False
otherwise
"""
return self.conf.get("verbose")
| BaseConfiguration |
python | sympy__sympy | sympy/plotting/pygletplot/plot_modes.py | {
"start": 436,
"end": 915
} | class ____(PlotCurve):
i_vars, d_vars = 'x', 'y'
intervals = [[-5, 5, 100]]
aliases = ['cartesian']
is_default = True
def _get_sympy_evaluator(self):
fy = self.d_vars[0]
x = self.t_interval.v
@float_vec3
def e(_x):
return (_x, fy.subs(x, _x), 0.0)
return e
def _get_lambda_evaluator(self):
fy = self.d_vars[0]
x = self.t_interval.v
return lambdify([x], [x, fy, 0.0])
| Cartesian2D |
python | weaviate__weaviate-python-client | weaviate/collections/classes/aggregate.py | {
"start": 2190,
"end": 2489
} | class ____:
"""The property that the collection was grouped by."""
prop: str
value: Union[
str,
int,
float,
bool,
List[str],
List[int],
List[float],
List[bool],
GeoCoordinate,
None,
]
@dataclass
| GroupedBy |
python | google__jax | jax/experimental/jet.py | {
"start": 8076,
"end": 10788
} | class ____(core.Trace):
__slots__ = ("tag", "parent_trace", "order")
def __init__(self, tag, parent_trace, order):
super().__init__()
self.tag = tag
self.parent_trace = parent_trace
self.order = order
def to_primal_terms_pair(self, val):
if isinstance(val, JetTracer) and val._trace.tag is self.tag:
return val.primal, val.terms
else:
return val, zero_series
def process_primitive(self, primitive, tracers, params):
order = self.order # pytype: disable=attribute-error
primals_in, series_in = unzip2(map(self.to_primal_terms_pair, tracers))
if all(t is zero_series for t in series_in):
primal_out = primitive.bind_with_trace(self.parent_trace, primals_in, params)
if primitive.multiple_results:
return [JetTracer(self, p, zero_series) for p in primal_out]
else:
return JetTracer(self, primal_out, zero_series)
series_in = [[zero_term] * order if s is zero_series else s
for s in series_in]
with core.set_current_trace(self.parent_trace):
# TODO(mattjj): avoid always instantiating zeros
series_in = [[jnp.zeros(np.shape(x), dtype=jnp.result_type(x))
if t is zero_term else t for t in series]
for x, series in zip(primals_in, series_in)]
rule = jet_rules[primitive]
primal_out, terms_out = rule(primals_in, series_in, **params)
if not primitive.multiple_results:
return JetTracer(self, primal_out, terms_out)
else:
return [JetTracer(self, p, ts) for p, ts in zip(primal_out, terms_out)]
def process_call(self, call_primitive, f, tracers, params):
primals_in, series_in = unzip2(map(self.to_primal_terms_pair, tracers))
primals_and_series, in_tree_def = tree_flatten((primals_in, series_in))
f_jet, out_tree_def = traceable(jet_subtrace(f, self.main), in_tree_def)
update_params = call_param_updaters.get(call_primitive)
new_params = (update_params(params, len(primals_and_series))
if update_params else params)
result = call_primitive.bind(f_jet, *primals_and_series, **new_params)
primals_out, series_out = tree_unflatten(out_tree_def(), result)
return [JetTracer(self, p, ts) for p, ts in zip(primals_out, series_out)]
def process_custom_jvp_call(self, primitive, fun, jvp, tracers, *,
symbolic_zeros):
# TODO(mattjj): don't just ignore custom jvp rules?
del primitive, jvp # Unused.
return fun.call_wrapped(*tracers)
def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers, out_trees):
del primitive, fwd, bwd, out_trees # Unused.
return fun.call_wrapped(*tracers)
| JetTrace |
python | huggingface__transformers | src/transformers/models/dia/configuration_dia.py | {
"start": 4583,
"end": 9904
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DiaDecoder`]. It is used to instantiate a Dia
decoder according to the specified arguments, defining the decoder architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
max_position_embeddings (`int`, *optional*, defaults to 3072):
The maximum sequence length that this model might ever be used with.
num_hidden_layers (`int`, *optional*, defaults to 18):
Number of hidden layers in the Transformer decoder.
hidden_size (`int`, *optional*, defaults to 2048):
Dimensionality of the decoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
Number of key and value heads for each attention layer in the Transformer decoder.
head_dim (`int`, *optional*, defaults to 128):
Dimensionality of the attention head.
cross_num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each cross-attention layer in the Transformer decoder.
cross_head_dim (`int`, *optional*, defaults to 128):
Dimensionality of the cross-attention head.
cross_num_key_value_heads (`int`, *optional*, defaults to 16):
Number of key and value heads for each cross-attention layer in the Transformer decoder.
cross_hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the cross-attention layers.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
vocab_size (`int`, *optional*, defaults to 1028):
Vocabulary size of the Dia model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DiaModel`].
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`, `"relu"`,
`"swish"` and `"gelu_new"` are supported.
num_channels (`int`, *optional*, defaults to 9):
Number of channels for the Dia decoder.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Indicating that this model is part of an encoder-decoder architecture.
"""
model_type = "dia_decoder"
def __init__(
self,
max_position_embeddings: int = 3072,
num_hidden_layers: int = 18,
hidden_size: int = 2048,
intermediate_size: int = 8192,
num_attention_heads: int = 16,
num_key_value_heads: int = 4,
head_dim: int = 128,
cross_num_attention_heads: int = 16,
cross_head_dim: int = 128,
cross_num_key_value_heads: int = 16,
cross_hidden_size: int = 1024,
norm_eps: float = 1e-5,
vocab_size: int = 1028,
hidden_act: str = "silu",
num_channels: int = 9,
rope_parameters: Optional[RopeParameters] = None,
initializer_range: float = 0.02,
use_cache: bool = True,
is_encoder_decoder: bool = True,
**kwargs,
):
self.max_position_embeddings = max_position_embeddings
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.cross_num_key_value_heads = cross_num_key_value_heads
self.cross_num_attention_heads = cross_num_attention_heads
self.cross_head_dim = cross_head_dim
self.cross_hidden_size = cross_hidden_size
self.norm_eps = norm_eps
self.vocab_size = vocab_size
self.hidden_act = hidden_act
self.num_channels = num_channels
self.initializer_range = initializer_range
self.use_cache = use_cache
self.rope_parameters = rope_parameters
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
| DiaDecoderConfig |
python | miyuchina__mistletoe | test/test_span_token.py | {
"start": 1438,
"end": 2704
} | class ____(TestBranchToken):
def test_parse(self):
self._test_parse(span_token.Emphasis, '*some text*', 'some text')
self._test_parse(span_token.Emphasis, '_some text_', 'some text')
def test_emphasis_with_straight_quote(self):
tokens = iter(span_token.tokenize_inner('_Book Title_\'s author'))
self._test_token(next(tokens), 'Book Title', children=True)
self._test_token(next(tokens), '\'s author', children=False)
def test_emphasis_with_smart_quote(self):
tokens = iter(span_token.tokenize_inner('_Book Title_’s author'))
self._test_token(next(tokens), 'Book Title', children=True)
self._test_token(next(tokens), '’s author', children=False)
def test_no_emphasis_for_underscore_without_punctuation(self):
tokens = iter(span_token.tokenize_inner('_an example without_punctuation'))
self._test_token(next(tokens), '_an example without_punctuation', children=True)
def test_emphasis_for_asterisk_without_punctuation(self):
tokens = iter(span_token.tokenize_inner('*an example without*punctuation'))
self._test_token(next(tokens), 'an example without', children=True)
self._test_token(next(tokens), 'punctuation', children=False)
| TestEmphasis |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/uninitializedVariable1.py | {
"start": 178,
"end": 424
} | class ____:
# This should generate an error if reportUninitializedInstanceVariable
# is enabled.
v1: int
v2: int
v3 = 2
v4: int = 3
def __init__(self) -> None:
self.v2 = 3
super().__init__()
@dataclass
| A |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 20810,
"end": 21437
} | class ____(BaseModel):
"""
Serializer for React App Plugin responses.
"""
model_config = ConfigDict(
extra="allow",
)
name: Annotated[str, Field(title="Name")]
icon: Annotated[str | None, Field(title="Icon")] = None
icon_dark_mode: Annotated[str | None, Field(title="Icon Dark Mode")] = None
url_route: Annotated[str | None, Field(title="Url Route")] = None
category: Annotated[str | None, Field(title="Category")] = None
bundle_url: Annotated[str, Field(title="Bundle Url")]
destination: Annotated[Destination1 | None, Field(title="Destination")] = "nav"
| ReactAppResponse |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_metaestimators.py | {
"start": 86,
"end": 2107
} | class ____:
"""This estimator's `available` parameter toggles the presence of a method"""
def __init__(self, available=True, return_value=1):
self.available = available
self.return_value = return_value
@available_if(lambda est: est.available)
def available_func(self):
"""This is a mock available_if function"""
return self.return_value
def test_available_if_docstring():
assert "This is a mock available_if function" in str(
AvailableParameterEstimator.__dict__["available_func"].__doc__
)
assert "This is a mock available_if function" in str(
AvailableParameterEstimator.available_func.__doc__
)
assert "This is a mock available_if function" in str(
AvailableParameterEstimator().available_func.__doc__
)
def test_available_if():
assert hasattr(AvailableParameterEstimator(), "available_func")
assert not hasattr(AvailableParameterEstimator(available=False), "available_func")
def test_available_if_unbound_method():
# This is a non regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/20614
# to make sure that decorated functions can be used as an unbound method,
# for instance when monkeypatching.
est = AvailableParameterEstimator()
AvailableParameterEstimator.available_func(est)
est = AvailableParameterEstimator(available=False)
with pytest.raises(
AttributeError,
match="This 'AvailableParameterEstimator' has no attribute 'available_func'",
):
AvailableParameterEstimator.available_func(est)
def test_available_if_methods_can_be_pickled():
"""Check that available_if methods can be pickled.
Non-regression test for #21344.
"""
return_value = 10
est = AvailableParameterEstimator(available=True, return_value=return_value)
pickled_bytes = pickle.dumps(est.available_func)
unpickled_func = pickle.loads(pickled_bytes)
assert unpickled_func() == return_value
| AvailableParameterEstimator |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 1714,
"end": 1816
} | class ____(torch.nn.Module):
def forward(self, foo):
return CustomFunc3().apply(foo)
| Module5 |
python | streamlit__streamlit | lib/streamlit/external/langchain/streamlit_callback_handler.py | {
"start": 2964,
"end": 3029
} | class ____(NamedTuple):
name: str
input_str: str
| ToolRecord |
python | pydantic__pydantic | pydantic/_internal/_repr.py | {
"start": 799,
"end": 1075
} | class ____(str):
"""String class where repr doesn't include quotes. Useful with Representation when you want to return a string
representation of something that is valid (or pseudo-valid) python.
"""
def __repr__(self) -> str:
return str(self)
| PlainRepr |
python | rapidsai__cudf | python/cudf/cudf/pandas/fast_slow_proxy.py | {
"start": 33754,
"end": 50785
} | class ____(FallbackError):
"""Raises when cuDF produces a TypeError"""
pass
def _raise_fallback_error(err, name):
"""Raises a fallback error."""
err_message = f"Falling back to the slow path. The exception was {err}. \
The function called was {name}."
exception_map = {
(RMMError, MemoryError): OOMFallbackError,
NotImplementedError: NotImplementedFallbackError,
AttributeError: AttributeFallbackError,
TypeError: TypeFallbackError,
}
for err_type, fallback_err_type in exception_map.items():
if isinstance(err, err_type):
raise fallback_err_type(err_message) from err
raise FallbackError(err_message) from err
def _fast_function_call():
"""
Placeholder fast function for pytest profiling purposes.
"""
return None
def _slow_function_call():
"""
Placeholder slow function for pytest profiling purposes.
"""
return None
def _fast_slow_function_call(
func: Callable,
transfer_block: _BlockState | None = None,
/,
*args,
**kwargs,
) -> Any:
"""
Call `func` with all `args` and `kwargs` converted to their
respective fast type. If that fails, call `func` with all
`args` and `kwargs` converted to their slow type.
Wrap the result in a fast-slow proxy if it is a type we know how
to wrap.
"""
from .module_accelerator import disable_module_accelerator
fast = False
block_transfer_to_fast = False
try:
if transfer_block is _BlockState.TO_FAST:
raise Exception("Forcing slow path due to transfer blocking")
with nvtx.annotate(
"EXECUTE_FAST",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_FAST"],
domain="cudf_pandas",
):
fast_args, fast_kwargs = _fast_arg(args), _fast_arg(kwargs)
result = func(*fast_args, **fast_kwargs)
if result is NotImplemented:
# try slow path
raise Exception()
fast = True
_fast_function_call()
if _env_get_bool("CUDF_PANDAS_DEBUGGING", False):
try:
with nvtx.annotate(
"EXECUTE_SLOW_DEBUG",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
):
slow_args, slow_kwargs = (
_slow_arg(args),
_slow_arg(kwargs),
)
with disable_module_accelerator():
slow_result = func(*slow_args, **slow_kwargs)
except Exception as e:
warnings.warn(
"The result from pandas could not be computed. "
f"The exception was {e}."
)
else:
try:
_assert_fast_slow_eq(result, slow_result)
except AssertionError as e:
warnings.warn(
"The results from cudf and pandas were different. "
f"The exception was {e}."
)
except Exception as e:
warnings.warn(
"Pandas debugging mode failed. "
f"The exception was {e}."
)
except Exception as err:
if type(err) is cudf.errors.MixedTypeError:
block_transfer_to_fast = True
with nvtx.annotate(
"EXECUTE_SLOW",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
):
slow_args, slow_kwargs = _slow_arg(args), _slow_arg(kwargs)
if _env_get_bool("CUDF_PANDAS_FAIL_ON_FALLBACK", False):
_raise_fallback_error(err, slow_args[0].__name__)
if _env_get_bool("LOG_FAST_FALLBACK", False):
from ._logger import log_fallback
log_fallback(slow_args, slow_kwargs, err)
_slow_function_call()
with disable_module_accelerator():
result = func(*slow_args, **slow_kwargs)
result = _maybe_wrap_result(result, func, *args, **kwargs)
if block_transfer_to_fast and isinstance(result, _FastSlowProxy):
result.force_state(_State.SLOW)
return result, fast
def _transform_arg(
arg: Any,
attribute_name: Literal["_fsproxy_slow", "_fsproxy_fast"],
seen: set[int],
) -> Any:
"""
Transform "arg" into its corresponding slow (or fast) type.
"""
import numpy as np
if isinstance(arg, (_FastSlowProxy, _FastSlowProxyMeta, _FunctionProxy)):
typ = getattr(arg, attribute_name)
if typ is _Unusable:
raise Exception("Cannot transform _Unusable")
return typ
elif isinstance(arg, types.ModuleType) and attribute_name in arg.__dict__:
return arg.__dict__[attribute_name]
elif isinstance(arg, list):
return type(arg)(_transform_arg(a, attribute_name, seen) for a in arg)
elif isinstance(arg, tuple):
# This attempts to handle arbitrary subclasses of tuple by
# assuming that if you've subclassed tuple with some special
# behaviour you'll also make the object pickleable by
# implementing the custom pickle protocol interface (either
# __getnewargs_ex__ or __getnewargs__). Perhaps this should
# use __reduce_ex__ instead...
if type(arg) is tuple:
# Must come first to avoid infinite recursion
if (
len(arg) > 0
and isinstance(arg[0], _MethodProxy)
and arg[0]._customqualname in _SPECIAL_FUNCTIONS_ARGS_MAP
):
indices_map = _SPECIAL_FUNCTIONS_ARGS_MAP[
arg[0]._customqualname
]
method_proxy, original_args, original_kwargs = arg
original_args = tuple(
_transform_arg(a, "_fsproxy_slow", seen)
if i - 1 in indices_map
else _transform_arg(a, attribute_name, seen)
for i, a in enumerate(original_args)
)
original_kwargs = _transform_arg(
original_kwargs, attribute_name, seen
)
return tuple(
(
_transform_arg(method_proxy, attribute_name, seen),
original_args,
original_kwargs,
)
)
else:
return tuple(
_transform_arg(a, attribute_name, seen) for a in arg
)
elif hasattr(arg, "__getnewargs_ex__"):
# Partial implementation of to reconstruct with
# transformed pieces
# This handles scipy._lib._bunch._make_tuple_bunch
args, kwargs = (
_transform_arg(a, attribute_name, seen)
for a in arg.__getnewargs_ex__()
)
obj = type(arg).__new__(type(arg), *args, **kwargs)
if hasattr(obj, "__setstate__"):
raise NotImplementedError(
"Transforming tuple-like with __getnewargs_ex__ and "
"__setstate__ not implemented"
)
if not hasattr(obj, "__dict__") and kwargs:
raise NotImplementedError(
"Transforming tuple-like with kwargs from "
"__getnewargs_ex__ and no __dict__ not implemented"
)
obj.__dict__.update(kwargs)
return obj
elif hasattr(arg, "__getnewargs__"):
# This handles namedtuple, and would catch tuple if we
# didn't handle it above.
args = _transform_arg(arg.__getnewargs__(), attribute_name, seen)
return type(arg).__new__(type(arg), *args)
else:
# Hope we can just call the constructor with transformed entries.
return type(arg)(
_transform_arg(a, attribute_name, seen) for a in args
)
elif isinstance(arg, dict):
return {
_transform_arg(k, attribute_name, seen): _transform_arg(
a, attribute_name, seen
)
for k, a in arg.items()
}
elif isinstance(arg, np.ndarray) and arg.dtype == "O":
transformed: list[Any] = [ # type: ignore[var-annotated]
_transform_arg(a, attribute_name, seen) for a in arg.flat
]
# Keep the same memory layout as arg (the default is C_CONTIGUOUS)
if arg.flags["F_CONTIGUOUS"] and not arg.flags["C_CONTIGUOUS"]:
order = "F"
else:
order = "C"
result = np.empty( # type: ignore[call-overload]
int(np.prod(arg.shape)), dtype=np.object_, order=order
)
result[...] = transformed
return result.reshape(arg.shape)
elif isinstance(arg, Iterator) and attribute_name == "_fsproxy_fast":
# this may include consumable objects like generators or
# IOBase objects, which we don't want unavailable to the slow
# path in case of fallback. So, we raise here and ensure the
# slow path is taken:
raise Exception()
elif isinstance(arg, types.FunctionType):
if id(arg) in seen:
# `arg` is mutually recursive with another function. We
# can't handle these cases yet:
return arg
seen.add(id(arg))
return _replace_closurevars(arg, attribute_name, seen)
else:
return arg
def _fast_arg(arg: Any) -> Any:
"""
Transform "arg" into its corresponding fast type.
"""
seen: set[int] = set()
return _transform_arg(arg, "_fsproxy_fast", seen)
def _slow_arg(arg: Any) -> Any:
"""
Transform "arg" into its corresponding slow type.
"""
seen: set[int] = set()
return _transform_arg(arg, "_fsproxy_slow", seen)
def _maybe_wrap_result(result: Any, func: Callable, /, *args, **kwargs) -> Any:
"""
Wraps "result" in a fast-slow proxy if is a "proxiable" object.
"""
if isinstance(result, (int, str, float, bool, type(None))):
return result
elif _is_final_type(result):
typ = get_final_type_map()[type(result)]
return typ._fsproxy_wrap(result, func)
elif _is_intermediate_type(result):
typ = get_intermediate_type_map()[type(result)]
return typ._fsproxy_wrap(result, method_chain=(func, args, kwargs))
elif _is_final_class(result):
return get_final_type_map()[result]
elif isinstance(result, list):
return type(result)(
[
_maybe_wrap_result(r, operator.getitem, result, i)
for i, r in enumerate(result)
]
)
elif isinstance(result, tuple):
wrapped = (
_maybe_wrap_result(r, operator.getitem, result, i)
for i, r in enumerate(result)
)
if hasattr(result, "_make"):
# namedtuple
return type(result)._make(wrapped)
else:
return type(result)(wrapped)
elif isinstance(result, Iterator):
return (_maybe_wrap_result(r, lambda x: x, r) for r in result)
else:
return result
def _is_final_type(result: Any) -> bool:
return type(result) in get_final_type_map()
def _is_final_class(result: Any) -> bool:
if not isinstance(result, type):
return False
return result in get_final_type_map()
def _is_intermediate_type(result: Any) -> bool:
return type(result) in get_intermediate_type_map()
def _is_function_or_method(obj: Any) -> bool:
res = isinstance(
obj,
(
types.FunctionType,
types.BuiltinFunctionType,
types.MethodType,
types.WrapperDescriptorType,
types.MethodWrapperType,
types.MethodDescriptorType,
types.BuiltinMethodType,
),
)
if not res:
try:
return "cython_function_or_method" in str(type(obj))
except Exception:
return False
return res
def _replace_closurevars(
f: types.FunctionType,
attribute_name: Literal["_fsproxy_slow", "_fsproxy_fast"],
seen: set[int],
) -> Callable[..., Any]:
"""
Return a copy of `f` with its closure variables replaced with
their corresponding slow (or fast) types.
"""
if f.__closure__:
# GH #254: If empty cells are present - which can happen in
# situations like when `f` is a method that invokes the
# "empty" `super()` - the call to `getclosurevars` below will
# fail. For now, we just return `f` in this case. If needed,
# we can consider populating empty cells with a placeholder
# value to allow the call to `getclosurevars` to succeed.
if any(c == types.CellType() for c in f.__closure__):
return f
f_nonlocals, f_globals, _, _ = inspect.getclosurevars(f)
g_globals = _transform_arg(f_globals, attribute_name, seen)
g_nonlocals = _transform_arg(f_nonlocals, attribute_name, seen)
# if none of the globals/nonlocals were transformed, we
# can just return f:
if all(f_globals[k] is g_globals[k] for k in f_globals) and all(
g_nonlocals[k] is f_nonlocals[k] for k in f_nonlocals
):
return f
g_closure = tuple(types.CellType(val) for val in g_nonlocals.values())
# https://github.com/rapidsai/cudf/issues/15548
new_g_globals = f.__globals__.copy()
new_g_globals.update(g_globals)
g = types.FunctionType(
f.__code__,
new_g_globals,
name=f.__name__,
argdefs=f.__defaults__,
closure=g_closure,
)
return functools.update_wrapper(
g,
f,
assigned=(*functools.WRAPPER_ASSIGNMENTS, "__kwdefaults__"),
)
def is_proxy_object(obj: Any) -> bool:
"""Determine if an object is proxy object
Parameters
----------
obj : object
Any python object.
"""
if _FastSlowProxyMeta in type(type(obj)).__mro__:
return True
return False
def _get_proxy_base_class(cls):
"""Returns the proxy base class if one exists"""
for proxy_class in PROXY_BASE_CLASSES:
if proxy_class in cls.__mro__:
return proxy_class
return object
def as_proxy_object(obj: Any) -> Any:
"""
Wraps a cudf or pandas object in a proxy object if applicable.
There will be no memory transfer, i.e., GPU objects stay on GPU and
CPU objects stay on CPU. The object will be wrapped in a
proxy object. This is useful for ensuring that the object is
compatible with the fast-slow proxy system.
Parameters
----------
obj : Any
The object to wrap.
Returns
-------
Any
The wrapped proxy object if applicable, otherwise the original object.
"""
if _is_final_type(obj):
typ = get_final_type_map()[type(obj)]
return typ._fsproxy_wrap(obj, None)
return obj
def is_proxy_instance(obj, type):
return is_proxy_object(obj) and obj.__class__.__name__ == type.__name__
PROXY_BASE_CLASSES: set[type] = {
ProxyNDarrayBase,
datetime.datetime,
datetime.timedelta,
}
NUMPY_TYPES: set[type[np.generic]] = set(np.sctypeDict.values())
_SPECIAL_METHODS: set[str] = {
"__abs__",
"__add__",
"__and__",
"__bool__",
"__call__",
"__getattr__",
"__complex__",
"__contains__",
"__copy__",
"__dataframe__",
"__deepcopy__",
"__delitem__",
"__delslice__",
"__divmod__",
"__enter__",
"__eq__",
"__exit__",
"__float__",
"__floordiv__",
"__format__",
"__ge__",
"__getitem__",
"__getslice__",
"__gt__",
# Added on a per-proxy basis
# https://github.com/rapidsai/xdf/pull/306#pullrequestreview-1636155428
# "__hash__",
"__iadd__",
"__iand__",
"__iconcat__",
"__ifloordiv__",
"__ilshift__",
"__imatmul__",
"__imod__",
"__imul__",
"__int__",
"__invert__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__iter__",
"__itruediv__",
"__ixor__",
"__le__",
"__len__",
"__lshift__",
"__lt__",
"__matmul__",
"__mod__",
"__mul__",
"__ne__",
"__neg__",
"__next__",
"__or__",
"__pos__",
"__pow__",
"__radd__",
"__rand__",
"__rdivmod__",
"__repr__",
"__rfloordiv__",
"__rlshift__",
"__rmatmul__",
"__rmod__",
"__rmul__",
"__ror__",
"__round__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__setitem__",
"__setslice__",
"__str__",
"__sub__",
"__truediv__",
"__xor__",
}
| TypeFallbackError |
python | great-expectations__great_expectations | great_expectations/render/components.py | {
"start": 12564,
"end": 13763
} | class ____(RenderedComponentContent):
def __init__(
self, tabs, header=None, subheader=None, styling=None, content_block_type="tabs"
) -> None:
super().__init__(content_block_type=content_block_type, styling=styling)
self.tabs = tabs
self.header = header
self.subheader = subheader
@override
def to_json_dict(self) -> dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this RenderedTabsContent.
Returns:
A JSON-serializable dict representation of this RenderedTabsContent.
"""
d = super().to_json_dict()
d["tabs"] = RenderedContent.rendered_content_list_to_json(self.tabs, check_dicts=True)
if self.header is not None:
if isinstance(self.header, RenderedContent):
d["header"] = self.header.to_json_dict()
else:
d["header"] = self.header
if self.subheader is not None:
if isinstance(self.subheader, RenderedContent):
d["subheader"] = self.subheader.to_json_dict()
else:
d["subheader"] = self.subheader
return d
| RenderedTabsContent |
python | jazzband__django-waffle | waffle/tests/test_mixin.py | {
"start": 613,
"end": 1722
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.request = get()
def test_flag_must_be_active(self):
view = views.FlagView
self.assertRaises(Http404, process_request, self.request, view)
Flag.objects.create(name='foo', everyone=True)
response = process_request(self.request, view)
self.assertEqual(b'foo', response.content)
def test_flag_must_be_inactive(self):
view = views.FlagOffView
response = process_request(self.request, view)
self.assertEqual(b'foo', response.content)
Flag.objects.create(name='foo', everyone=True)
self.assertRaises(Http404, process_request, self.request, view)
def test_override_with_cookie(self):
Flag.objects.create(name='foo', percent='0.1')
self.request.COOKIES['dwf_foo'] = 'True'
response = process_request(self.request, views.FlagView)
self.assertEqual(b'foo', response.content)
self.assertIn('dwf_foo', response.cookies)
self.assertEqual('True', response.cookies['dwf_foo'].value)
| WaffleFlagMixinTest |
python | ray-project__ray | python/ray/data/_internal/datasource/iceberg_datasource.py | {
"start": 2137,
"end": 8660
} | class ____(
_ExprVisitor["BooleanExpression | UnboundTerm[Any] | Literal[Any]"]
):
"""
Visitor that converts Ray Data expressions to PyIceberg expressions.
This enables Ray Data users to write filters using the familiar col() syntax
while leveraging Iceberg's native filtering capabilities.
Example:
>>> from ray.data.expressions import col
>>> ray_expr = (col("date") >= "2024-01-01") & (col("status") == "active")
>>> iceberg_expr = _IcebergExpressionVisitor().visit(ray_expr)
>>> # iceberg_expr can now be used with PyIceberg's filter APIs
"""
def visit_column(self, expr: "ColumnExpr") -> "UnboundTerm[Any]":
"""Convert a column reference to an Iceberg reference."""
return Reference(expr.name)
def visit_literal(self, expr: "LiteralExpr") -> "Literal[Any]":
"""Convert a literal value to an Iceberg literal."""
return literal(expr.value)
def visit_binary(self, expr: "BinaryExpr") -> "BooleanExpression":
"""Convert a binary operation to an Iceberg expression."""
# Handle IN/NOT_IN specially since they don't visit the right operand
# (the right operand is a list literal that can't be converted)
if expr.op in (Operation.IN, Operation.NOT_IN):
left = self.visit(expr.left)
if not isinstance(expr.right, LiteralExpr):
raise ValueError(
f"{expr.op.name} operation requires right operand to be a literal list, "
f"got {type(expr.right).__name__}"
)
return RAY_DATA_OPERATION_TO_ICEBERG[expr.op](left, expr.right.value)
# For all other operations, visit both operands
left = self.visit(expr.left)
right = self.visit(expr.right)
if expr.op in RAY_DATA_OPERATION_TO_ICEBERG:
return RAY_DATA_OPERATION_TO_ICEBERG[expr.op](left, right)
else:
# Arithmetic operations are not supported in filter expressions
raise ValueError(
f"Unsupported binary operation for Iceberg filters: {expr.op}. "
f"Iceberg filters support: {RAY_DATA_OPERATION_TO_ICEBERG.keys()}. "
f"Arithmetic operations (ADD, SUB, MUL, DIV) cannot be used in filters."
)
def visit_unary(self, expr: "UnaryExpr") -> "BooleanExpression":
"""Convert a unary operation to an Iceberg expression."""
operand = self.visit(expr.operand)
if expr.op in RAY_DATA_OPERATION_TO_ICEBERG:
return RAY_DATA_OPERATION_TO_ICEBERG[expr.op](operand)
else:
raise ValueError(
f"Unsupported unary operation for Iceberg: {expr.op}. "
f"Supported operations: {RAY_DATA_OPERATION_TO_ICEBERG.keys()}"
)
def visit_alias(
self, expr: "AliasExpr"
) -> "BooleanExpression | UnboundTerm[Any] | Literal[Any]":
"""Convert an aliased expression (just unwrap the alias)."""
return self.visit(expr.expr)
def visit_udf(
self, expr: "UDFExpr"
) -> "BooleanExpression | UnboundTerm[Any] | Literal[Any]":
"""UDF expressions cannot be converted to Iceberg expressions."""
raise TypeError(
"UDF expressions cannot be converted to Iceberg expressions. "
"Iceberg filters must use simple column comparisons and boolean operations."
)
def visit_download(
self, expr: "DownloadExpr"
) -> "BooleanExpression | UnboundTerm[Any] | Literal[Any]":
"""Download expressions cannot be converted to Iceberg expressions."""
raise TypeError(
"Download expressions cannot be converted to Iceberg expressions."
)
def visit_star(
self, expr: "StarExpr"
) -> "BooleanExpression | UnboundTerm[Any] | Literal[Any]":
"""Star expressions cannot be converted to Iceberg expressions."""
raise TypeError(
"Star expressions cannot be converted to Iceberg filter expressions."
)
def _get_read_task(
tasks: Iterable["FileScanTask"],
table_io: "FileIO",
table_metadata: "TableMetadata",
row_filter: "BooleanExpression",
case_sensitive: bool,
limit: Optional[int],
schema: "Schema",
column_rename_map: Optional[Dict[str, str]],
) -> Iterable[Block]:
# Determine the PyIceberg version to handle backward compatibility
import pyiceberg
from ray.data.datasource.datasource import _DatasourceProjectionPushdownMixin
def _generate_tables() -> Iterable[pa.Table]:
"""Inner generator that yields tables without renaming."""
if version.parse(pyiceberg.__version__) >= version.parse("0.9.0"):
# Modern implementation using ArrowScan (PyIceberg 0.9.0+)
from pyiceberg.io.pyarrow import ArrowScan
# Initialize scanner with Iceberg metadata and query parameters
scanner = ArrowScan(
table_metadata=table_metadata,
io=table_io,
row_filter=row_filter,
projected_schema=schema,
case_sensitive=case_sensitive,
limit=limit,
)
# Convert scanned data to Arrow Table format
result_table = scanner.to_table(tasks=tasks)
# Stream results as RecordBatches for memory efficiency
for batch in result_table.to_batches():
yield pa.Table.from_batches([batch])
else:
# Legacy implementation using project_table (PyIceberg <0.9.0)
from pyiceberg.io import pyarrow as pyi_pa_io
# Use the PyIceberg API to read only a single task (specifically, a
# FileScanTask) - note that this is not as simple as reading a single
# parquet file, as there might be delete files, etc. associated, so we
# must use the PyIceberg API for the projection.
table = pyi_pa_io.project_table(
tasks=tasks,
table_metadata=table_metadata,
io=table_io,
row_filter=row_filter,
projected_schema=schema,
case_sensitive=case_sensitive,
limit=limit,
)
yield table
# Apply renames to all tables from the generator
yield from _DatasourceProjectionPushdownMixin._apply_rename_to_tables(
_generate_tables(), column_rename_map
)
@DeveloperAPI
| _IcebergExpressionVisitor |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 41753,
"end": 60099
} | class ____(MoshiPreTrainedModel, GenerationMixin):
"""
Transformer depth decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoshiTransformerLayer`]
Args:
config: MoshiConfig
"""
config: MoshiDepthConfig
def __init__(self, config: MoshiDepthConfig):
super().__init__(config)
self.text_embed_tokens = nn.Embedding(config.vocab_size + 1, config.hidden_size)
# the last codebook is never used as input
self.embed_tokens = nn.ModuleList(
[nn.Embedding(config.audio_vocab_size + 1, config.hidden_size) for _ in range(config.num_codebooks - 1)]
)
self.input_projections = MoshiFlexibleLinear(config.input_size, config.hidden_size, config.num_codebooks)
self.layers = nn.ModuleList(
[
MoshiDecoderLayer(config, layer_idx, use_flexible_linear=True, use_rope=False)
for layer_idx in range(config.num_hidden_layers)
]
)
self.lm_heads = MoshiFlexibleLinear(config.hidden_size, config.audio_vocab_size, config.num_codebooks)
self._attn_implementation = config._attn_implementation
self.gradient_checkpointing = False
self.config = config
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
last_hidden_state: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.BoolTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
position_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, BaseModelOutputWithPast]:
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens. The first element of the sequence must the text token associated to the audio codebooks.
The rest of the elements must be flatten audio codebooks. The `cache_position` argument can be used to indicate to which index is associated each token.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the main decoder. Used to contextualize `input_ids`
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more
information on the default strategy.
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert the inputs into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if use_cache and past_key_values is None and not self.training:
past_key_values = DynamicCache(config=self.config)
past_seen_tokens = 0 if past_key_values is None else past_key_values.get_seq_length()
if cache_position is None:
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + input_ids.shape[1], device=input_ids.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# If inputs_embeds is provided, it has the priority over input_ids, which won't be used
if inputs_embeds is None:
inputs_embeds = []
for position_idx in cache_position:
position_idx = position_idx.item()
if position_idx == 0:
inputs_embeds.append(self.text_embed_tokens(input_ids[:, [position_idx]]))
else:
inputs_embeds.append(
self.embed_tokens[(position_idx - 1)](input_ids[:, [position_idx - past_seen_tokens]])
)
inputs_embeds = torch.cat(inputs_embeds, dim=1)
inputs_embeds += self.input_projections(last_hidden_state, cache_position)
causal_mask = None
if attention_mask is not None:
causal_mask = self._update_causal_mask(
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
hidden_states = inputs_embeds
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
logits = self.lm_heads(hidden_states, cache_position)
loss = None
if labels is not None:
# Upcast to float if we need to compute the loss to avoid potential precision issues
logits = logits.float()
loss_fct = CrossEntropyLoss()
labels = labels.masked_fill(labels == self.config.audio_vocab_size, -100).reshape(-1)
labels = labels.to(logits.device)
loss = loss_fct(logits.reshape(-1, self.config.audio_vocab_size), labels)
if not return_dict:
return tuple(
v for v in [loss, logits, past_key_values, all_hidden_states, all_self_attns] if v is not None
)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=past_key_values,
hidden_states=past_key_values,
attentions=all_self_attns,
)
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and past_key_values is not None:
is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
if is_padding_right:
raise ValueError(
"You are attempting to perform batched generation with padding_side='right'"
" this may lead to unexpected behaviour for Flash Attention version of Moshi. Make sure to "
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
)
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
sliding_window=self.config.sliding_window,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
# StaticCache
if using_static_cache:
target_length = past_key_values.get_max_cache_shape()
# DynamicCache or no cache
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
config=self.config,
past_key_values=past_key_values,
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
config: MoshiDepthConfig,
past_key_values: Cache,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
config (`MoshiDepthConfig`):
The model's configuration class
past_key_values (`Cache`):
The cache class that is being used currently to generate
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
-1, 1
)
text_config = config.get_text_config()
if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
# the check is needed to verify is current checkpoint was trained with sliding window or not
is_static_sliding_cache = isinstance(past_key_values, StaticCache) and all(past_key_values.is_sliding)
if not is_static_sliding_cache or sequence_length > target_length:
sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (
cache_position.reshape(-1, 1) - text_config.sliding_window
)
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
causal_mask *= diagonal_attend_mask
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.shape[-1] > target_length:
attention_mask = attention_mask[:, :target_length]
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
@auto_docstring
| MoshiDepthDecoder |
python | run-llama__llama_index | llama-index-core/llama_index/core/prompts/utils.py | {
"start": 174,
"end": 1929
} | class ____:
"""Safe string formatter that does not raise KeyError if key is missing."""
def __init__(self, format_dict: Optional[Dict[str, str]] = None):
self.format_dict = format_dict or {}
def format(self, format_string: str) -> str:
return re.sub(r"\{([^{}]+)\}", self._replace_match, format_string)
def parse(self, format_string: str) -> List[str]:
return re.findall(
r"\{([a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)*)\}", format_string
)
def _replace_match(self, match: re.Match) -> str:
key = match.group(1)
return str(self.format_dict.get(key, match.group(0)))
def format_string(string_to_format: str, **kwargs: str) -> str:
"""Format a string with kwargs."""
formatter = SafeFormatter(format_dict=kwargs)
return formatter.format(string_to_format)
def format_content_blocks(
content_blocks: List[ContentBlock], **kwargs: str
) -> List[ContentBlock]:
"""Format content blocks with kwargs."""
formatter = SafeFormatter(format_dict=kwargs)
formatted_blocks: List[ContentBlock] = []
for block in content_blocks:
if isinstance(block, TextBlock):
formatted_blocks.append(TextBlock(text=formatter.format(block.text)))
else:
formatted_blocks.append(block)
return formatted_blocks
def get_template_vars(template_str: str) -> List[str]:
"""Get template variables from a template string."""
variables = []
formatter = SafeFormatter()
for variable_name in formatter.parse(template_str):
if variable_name:
variables.append(variable_name)
return variables
def is_chat_model(llm: BaseLLM) -> bool:
return llm.metadata.is_chat_model
| SafeFormatter |
python | wandb__wandb | wandb/apis/public/api.py | {
"start": 2777,
"end": 5223
} | class ____:
"""A GraphQL client that retries requests on failure.
<!-- lazydoc-ignore-class: internal -->
"""
INFO_QUERY = gql(
"""
query ServerInfo{
serverInfo {
cliVersionInfo
latestLocalVersionInfo {
outOfDate
latestVersionString
versionOnThisInstanceString
}
}
}
"""
)
def __init__(self, client: Client):
self._server_info = None
self._client = client
self._execute_decorated: Callable[..., Any] | None = None
def execute(self, *args, **kwargs):
if self._execute_decorated is None:
self._execute_decorated = self._build_execute_wrapper()
return self._execute_decorated(*args, **kwargs)
def _build_execute_wrapper(self) -> Callable[..., Any]:
import requests
@retry.retriable(
retry_timedelta=RETRY_TIMEDELTA,
check_retry_fn=util.no_retry_auth,
retryable_exceptions=(RetryError, requests.RequestException),
)
def _wrapped(*args, **kwargs):
try:
return self._client.execute(*args, **kwargs)
except requests.exceptions.ReadTimeout:
if "timeout" not in kwargs:
timeout = self._client.transport.default_timeout
wandb.termwarn(
f"A graphql request initiated by the public wandb API timed out (timeout={timeout} sec). "
f"Create a new API with an integer timeout larger than {timeout}, e.g., "
f"`api = wandb.Api(timeout={timeout + 10})` to increase the graphql timeout."
)
raise
return _wrapped
@property
def app_url(self):
return util.app_url(self._client.transport.url.replace("/graphql", "")) + "/"
@property
def server_info(self):
if self._server_info is None:
self._server_info = self.execute(self.INFO_QUERY).get("serverInfo")
return self._server_info
def version_supported(
self, min_version: str
) -> bool: # User not encouraged to use this class directly
from packaging.version import parse
return parse(min_version) <= parse(
self.server_info["cliVersionInfo"]["max_cli_version"]
)
| RetryingClient |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg.py | {
"start": 10890,
"end": 10971
} | class ____(_PGExecutionContext_common_psycopg):
pass
| PGExecutionContext_psycopg |
python | getsentry__sentry | tests/sentry/tasks/test_weekly_reports.py | {
"start": 2250,
"end": 51752
} | class ____(OutcomesSnubaTest, SnubaTestCase, PerformanceIssueTestCase):
def setUp(self) -> None:
super().setUp()
self.now = timezone.now()
self.timestamp = floor_to_utc_day(self.now).timestamp()
self.two_days_ago = self.now - timedelta(days=2)
self.three_days_ago = self.now - timedelta(days=3)
_dummy_batch_id = "20bd6c5b-7fac-4f31-9548-d6f8bb63226d"
def store_event_outcomes(
self,
organization_id,
project_id,
timestamp,
num_times,
outcome=Outcome.ACCEPTED,
category=DataCategory.ERROR,
):
self.store_outcomes(
{
"org_id": organization_id,
"project_id": project_id,
"outcome": outcome,
"category": category,
"timestamp": timestamp,
"key_id": 1,
},
num_times=num_times,
)
@freeze_time(before_now(days=2).replace(hour=0, minute=0, second=0, microsecond=0))
def test_integration(self) -> None:
with unguarded_write(using=router.db_for_write(Project)):
Project.objects.all().delete()
project = self.create_project(
organization=self.organization,
teams=[self.team],
date_added=self.now - timedelta(days=90),
)
member_set = set(project.teams.get().member_set.all())
self.store_event(
data={
"timestamp": before_now(days=1).isoformat(),
},
project_id=project.id,
)
with self.tasks():
schedule_organizations(timestamp=self.now.timestamp())
assert len(mail.outbox) == len(member_set) == 1
message = mail.outbox[0]
assert self.organization.name in message.subject
@freeze_time(before_now(days=2).replace(hour=0, minute=0, second=0, microsecond=0))
def test_with_empty_string_user_option(self) -> None:
project = self.create_project(
organization=self.organization,
teams=[self.team],
date_added=self.now - timedelta(days=90),
)
self.store_event(data={"timestamp": before_now(days=1).isoformat()}, project_id=project.id)
member_set = set(project.teams.get().member_set.all())
for member in member_set:
# some users have an empty string value set for this key, presumably cleared.
user_option_service.set_option(
user_id=member.user_id, key="reports:disabled-organizations", value=""
)
with self.tasks():
schedule_organizations(timestamp=self.now.timestamp())
assert len(mail.outbox) == len(member_set) == 1
message = mail.outbox[0]
assert self.organization.name in message.subject
@with_feature("system:multi-region")
@freeze_time(before_now(days=2).replace(hour=0, minute=0, second=0, microsecond=0))
def test_message_links_customer_domains(self) -> None:
with unguarded_write(using=router.db_for_write(Project)):
Project.objects.all().delete()
project = self.create_project(
organization=self.organization,
teams=[self.team],
date_added=self.now - timedelta(days=90),
)
self.store_event(
data={
"timestamp": before_now(days=1).isoformat(),
},
project_id=project.id,
)
with self.tasks():
schedule_organizations(timestamp=self.now.timestamp())
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert isinstance(message, EmailMultiAlternatives)
assert self.organization.name in message.subject
html = message.alternatives[0][0]
assert isinstance(html, str)
assert (
f"http://{self.organization.slug}.testserver/issues/?referrer=weekly_report" in html
)
def _set_option_value(self, value):
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingOption.objects.update_or_create(
scope_type="organization",
scope_identifier=self.organization.id,
user_id=self.user.id,
type="reports",
defaults={"value": value},
)
def _set_timezone(self, user, value):
with assume_test_silo_mode(SiloMode.CONTROL):
UserOption.objects.set_value(user=user, key="timezone", value=value)
@mock.patch("sentry.tasks.summaries.weekly_reports.prepare_template_context")
@mock.patch("sentry.tasks.summaries.weekly_reports.OrganizationReportBatch.send_email")
def test_deliver_reports_respects_settings(
self, mock_send_email, mock_prepare_template_context
):
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
ctx = OrganizationReportContext(0, 0, organization=self.organization)
user_project_ownership(ctx)
template_context = prepare_template_context(ctx, [self.user.id])
mock_prepare_template_context.return_value = template_context
batch_id = "77a1d368-33d5-47cd-88cf-d66c97b38333"
# disabled
self._set_option_value("never")
OrganizationReportBatch(ctx, batch_id).deliver_reports()
assert mock_send_email.call_count == 0
# enabled
self._set_option_value("always")
OrganizationReportBatch(ctx, batch_id).deliver_reports()
assert mock_send_email.call_count == 1
mock_send_email.assert_called_once_with(
template_ctx=template_context[0].get("context"),
user_id=template_context[0].get("user_id"),
)
@mock.patch("sentry.tasks.summaries.weekly_reports.OrganizationReportBatch.send_email")
def test_member_disabled(self, mock_send_email: mock.MagicMock) -> None:
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
ctx = OrganizationReportContext(0, 0, self.organization)
user_project_ownership(ctx)
with unguarded_write(using=router.db_for_write(Project)):
OrganizationMember.objects.get(user_id=self.user.id).update(
flags=F("flags").bitor(OrganizationMember.flags["member-limit:restricted"])
)
# disabled
OrganizationReportBatch(ctx, self._dummy_batch_id).deliver_reports()
assert mock_send_email.call_count == 0
@mock.patch("sentry.tasks.summaries.weekly_reports.OrganizationReportBatch.send_email")
def test_user_inactive(self, mock_send_email: mock.MagicMock) -> None:
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
ctx = OrganizationReportContext(0, 0, self.organization)
user_project_ownership(ctx)
with assume_test_silo_mode(SiloMode.CONTROL), outbox_runner():
self.user.update(is_active=False)
# disabled
OrganizationReportBatch(ctx, self._dummy_batch_id).deliver_reports()
assert mock_send_email.call_count == 0
@mock.patch("sentry.tasks.summaries.weekly_reports.OrganizationReportBatch.send_email")
def test_invited_member(self, mock_send_email: mock.MagicMock) -> None:
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
ctx = OrganizationReportContext(0, 0, self.organization)
user_project_ownership(ctx)
# create a member without a user
OrganizationMember.objects.create(
organization=self.organization, email="different.email@example.com", token="abc"
)
OrganizationReportBatch(ctx, self._dummy_batch_id).deliver_reports()
assert mock_send_email.call_count == 1
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
@freeze_time(before_now(days=2).replace(hour=0, minute=0, second=0, microsecond=0))
def test_transferred_project(self, message_builder: mock.MagicMock) -> None:
self.login_as(user=self.user)
project = self.create_project(
organization=self.organization, teams=[self.team], name="new-project"
)
self.store_event_outcomes(
self.organization.id, self.project.id, self.three_days_ago, num_times=2
)
self.store_event_outcomes(
self.organization.id, project.id, self.three_days_ago, num_times=2
)
project.transfer_to(organization=self.create_organization())
prepare_organization_report(
self.now.timestamp(), ONE_DAY * 7, self.organization.id, self._dummy_batch_id
)
assert message_builder.call_count == 1
@with_feature("organizations:escalating-issues")
@freeze_time(before_now(days=2).replace(hour=0, minute=0, second=0, microsecond=0))
def test_organization_project_issue_substatus_summaries(self) -> None:
self.login_as(user=self.user)
min_ago = (self.now - timedelta(minutes=1)).isoformat()
event1 = self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": min_ago,
"fingerprint": ["group-1"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
event1.group.substatus = GroupSubStatus.ONGOING
event1.group.save()
event2 = self.store_event(
data={
"event_id": "b" * 32,
"message": "message",
"timestamp": min_ago,
"fingerprint": ["group-2"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
event2.group.substatus = GroupSubStatus.NEW
event2.group.save()
timestamp = self.now.timestamp()
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
ctx = OrganizationReportContext(timestamp, ONE_DAY * 7, self.organization)
user_project_ownership(ctx)
organization_project_issue_substatus_summaries(ctx)
project_ctx = cast(ProjectContext, ctx.projects_context_map[self.project.id])
assert project_ctx.new_substatus_count == 1
assert project_ctx.escalating_substatus_count == 0
assert project_ctx.ongoing_substatus_count == 1
assert project_ctx.regression_substatus_count == 0
assert project_ctx.total_substatus_count == 2
@freeze_time(before_now(days=2).replace(hour=0, minute=0, second=0, microsecond=0))
def test_organization_project_issue_status(self) -> None:
self.login_as(user=self.user)
self.project.first_event = self.now - timedelta(days=3)
min_ago = (self.now - timedelta(minutes=1)).isoformat()
event1 = self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": min_ago,
"fingerprint": ["group-1"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
event2 = self.store_event(
data={
"event_id": "b" * 32,
"message": "message",
"timestamp": min_ago,
"fingerprint": ["group-2"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
group2 = event2.group
group2.status = GroupStatus.RESOLVED
group2.substatus = None
group2.resolved_at = self.now - timedelta(minutes=1)
group2.save()
timestamp = self.now.timestamp()
ctx = OrganizationReportContext(timestamp, ONE_DAY * 7, self.organization)
user_project_ownership(ctx)
key_errors = project_key_errors(ctx, self.project, Referrer.REPORTS_KEY_ERRORS.value)
assert key_errors == [{"events.group_id": event1.group.id, "count()": 1}]
@mock.patch("sentry.analytics.record")
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_message_builder_simple(
self, message_builder: mock.MagicMock, record: mock.MagicMock
) -> None:
user = self.create_user()
self.create_member(teams=[self.team], user=user, organization=self.organization)
event1 = self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
event2 = self.store_event(
data={
"event_id": "b" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
self.store_event_outcomes(
self.organization.id, self.project.id, self.three_days_ago, num_times=2
)
self.store_event_outcomes(
self.organization.id,
self.project.id,
self.three_days_ago,
num_times=10,
category=DataCategory.TRANSACTION,
)
group1 = event1.group
group2 = event2.group
group1.status = GroupStatus.RESOLVED
group1.substatus = None
group1.resolved_at = self.two_days_ago
group1.save()
group2.status = GroupStatus.RESOLVED
group2.substatus = None
group2.resolved_at = self.two_days_ago
group2.save()
perf_event_1 = self.create_performance_issue(
fingerprint=f"{PerformanceNPlusOneGroupType.type_id}-group1"
)
perf_event_2 = self.create_performance_issue(
fingerprint=f"{PerformanceNPlusOneGroupType.type_id}-group2"
)
assert perf_event_1.group is not None
assert perf_event_2.group is not None
perf_event_1.group.update(substatus=GroupSubStatus.ONGOING)
perf_event_2.group.update(substatus=GroupSubStatus.ONGOING)
# store a crons issue just to make sure it's not counted in key_performance_issues
self.create_group(type=MonitorIncidentType.type_id)
prepare_organization_report(
self.now.timestamp(), ONE_DAY * 7, self.organization.id, self._dummy_batch_id
)
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert message_params["template"] == "sentry/emails/reports/body.txt"
assert message_params["html_template"] == "sentry/emails/reports/body.html"
assert context["organization"] == self.organization
assert context["issue_summary"] == {
"escalating_substatus_count": 0,
"new_substatus_count": 0,
"ongoing_substatus_count": 2,
"regression_substatus_count": 0,
"total_substatus_count": 2,
}
assert len(context["key_errors"]) == 0
assert len(context["key_performance_issues"]) == 2
assert context["trends"]["total_error_count"] == 2
assert context["trends"]["total_transaction_count"] == 10
assert "Weekly Report for" in message_params["subject"]
assert isinstance(context["notification_uuid"], str)
assert_any_analytics_event(
record,
WeeklyReportSent(
user_id=user.id,
organization_id=self.organization.id,
notification_uuid="mock.ANY",
user_project_count=1,
),
exclude_fields=["notification_uuid"],
)
@mock.patch("sentry.analytics.record")
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_message_builder_filter_resolved(
self, message_builder: mock.MagicMock, record: mock.MagicMock
) -> None:
"""Test that we filter resolved issues out of key errors"""
user = self.create_user()
self.create_member(teams=[self.team], user=user, organization=self.organization)
self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
self.store_event_outcomes(
self.organization.id, self.project.id, self.three_days_ago, num_times=2
)
self.store_event_outcomes(
self.organization.id,
self.project.id,
self.three_days_ago,
num_times=10,
category=DataCategory.TRANSACTION,
)
self.create_performance_issue(fingerprint=f"{PerformanceNPlusOneGroupType.type_id}-group1")
self.create_performance_issue(fingerprint=f"{PerformanceNPlusOneGroupType.type_id}-group2")
# store a crons issue just to make sure it's not counted in key_performance_issues
self.create_group(type=MonitorIncidentType.type_id)
prepare_organization_report(
self.now.timestamp(), ONE_DAY * 7, self.organization.id, self._dummy_batch_id
)
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert message_params["template"] == "sentry/emails/reports/body.txt"
assert message_params["html_template"] == "sentry/emails/reports/body.html"
assert context["organization"] == self.organization
assert context["issue_summary"] == {
"escalating_substatus_count": 0,
"new_substatus_count": 4,
"ongoing_substatus_count": 0,
"regression_substatus_count": 0,
"total_substatus_count": 4,
}
assert len(context["key_errors"]) == 2
assert len(context["key_performance_issues"]) == 2
assert context["trends"]["total_error_count"] == 2
assert context["trends"]["total_transaction_count"] == 10
assert "Weekly Report for" in message_params["subject"]
assert isinstance(context["notification_uuid"], str)
assert_any_analytics_event(
record,
WeeklyReportSent(
user_id=user.id,
organization_id=self.organization.id,
notification_uuid="mock.ANY",
user_project_count=1,
),
exclude_fields=["notification_uuid"],
)
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_message_builder_filter_to_error_level(self, message_builder: mock.MagicMock) -> None:
"""Test that we filter non-error level issues out of key errors"""
user = self.create_user()
self.create_member(teams=[self.team], user=user, organization=self.organization)
self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-1"],
"level": "info",
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-2"],
"level": "error",
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
self.store_event_outcomes(
self.organization.id, self.project.id, self.three_days_ago, num_times=2
)
self.store_event_outcomes(
self.organization.id,
self.project.id,
self.three_days_ago,
num_times=10,
category=DataCategory.TRANSACTION,
)
prepare_organization_report(
self.now.timestamp(), ONE_DAY * 7, self.organization.id, self._dummy_batch_id
)
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert context["organization"] == self.organization
assert context["issue_summary"] == {
"escalating_substatus_count": 0,
"new_substatus_count": 2,
"ongoing_substatus_count": 0,
"regression_substatus_count": 0,
"total_substatus_count": 2,
}
assert len(context["key_errors"]) == 1
@mock.patch("sentry.analytics.record")
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_message_builder_multiple_users_prevent_resend(
self, message_builder: mock.MagicMock, record: mock.MagicMock
) -> None:
user = self.create_user()
self.create_member(teams=[self.team], user=user, organization=self.organization)
user2 = self.create_user()
self.create_member(teams=[self.team], user=user2, organization=self.organization)
event1 = self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
event2 = self.store_event(
data={
"event_id": "b" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
self.store_event_outcomes(
self.organization.id, self.project.id, self.three_days_ago, num_times=2
)
self.store_event_outcomes(
self.organization.id,
self.project.id,
self.three_days_ago,
num_times=10,
category=DataCategory.TRANSACTION,
)
group1 = event1.group
group2 = event2.group
group1.status = GroupStatus.RESOLVED
group1.substatus = None
group1.resolved_at = self.two_days_ago
group1.save()
group2.status = GroupStatus.RESOLVED
group2.substatus = None
group2.resolved_at = self.two_days_ago
group2.save()
# TODO(RyanSkonnord): Make sure this doesn't cause false negatives after
# batch IDs are also used to prevent duplicate sends
batch_id = "ea18c80c-d44f-48a4-8973-b0daa3169c44"
with (
mock.patch(
"sentry.tasks.summaries.weekly_reports.prepare_template_context",
side_effect=ValueError("oh no!"),
),
mock.patch(
"sentry.tasks.summaries.weekly_reports.OrganizationReportBatch.send_email"
) as mock_send_email,
):
with pytest.raises(Exception):
prepare_organization_report(
self.now.timestamp(), ONE_DAY * 7, self.organization.id, batch_id
)
mock_send_email.assert_not_called()
prepare_organization_report(
self.now.timestamp(), ONE_DAY * 7, self.organization.id, batch_id
)
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert message_params["template"] == "sentry/emails/reports/body.txt"
assert message_params["html_template"] == "sentry/emails/reports/body.html"
assert context["organization"] == self.organization
assert context["issue_summary"] == {
"escalating_substatus_count": 0,
"new_substatus_count": 0,
"ongoing_substatus_count": 0,
"regression_substatus_count": 0,
"total_substatus_count": 0,
}
assert len(context["key_errors"]) == 0
assert context["trends"]["total_error_count"] == 2
assert context["trends"]["total_transaction_count"] == 10
assert "Weekly Report for" in message_params["subject"]
assert isinstance(context["notification_uuid"], str)
assert_any_analytics_event(
record,
WeeklyReportSent(
user_id=user.id,
organization_id=self.organization.id,
notification_uuid="mock.ANY",
user_project_count=1,
),
exclude_fields=["notification_uuid"],
)
assert_any_analytics_event(
record,
WeeklyReportSent(
user_id=user2.id,
organization_id=self.organization.id,
notification_uuid="mock.ANY",
user_project_count=1,
),
exclude_fields=["notification_uuid"],
)
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
@with_feature("organizations:escalating-issues")
def test_message_builder_substatus_simple(self, message_builder: mock.MagicMock) -> None:
self.create_member(
teams=[self.team], user=self.create_user(), organization=self.organization
)
event1 = self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
group1 = event1.group
group1.substatus = GroupSubStatus.NEW
group1.save()
event2 = self.store_event(
data={
"event_id": "b" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
group2 = event2.group
group2.substatus = GroupSubStatus.ONGOING
group2.save()
prepare_organization_report(
self.now.timestamp(), ONE_DAY * 7, self.organization.id, self._dummy_batch_id
)
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert message_params["template"] == "sentry/emails/reports/body.txt"
assert message_params["html_template"] == "sentry/emails/reports/body.html"
assert context["organization"] == self.organization
assert context["issue_summary"] == {
"escalating_substatus_count": 0,
"new_substatus_count": 1,
"ongoing_substatus_count": 1,
"regression_substatus_count": 0,
"total_substatus_count": 2,
}
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_message_builder_advanced(self, message_builder: mock.MagicMock) -> None:
for outcome, category, num in [
(Outcome.ACCEPTED, DataCategory.ERROR, 1),
(Outcome.RATE_LIMITED, DataCategory.ERROR, 2),
(Outcome.ACCEPTED, DataCategory.TRANSACTION, 3),
(Outcome.RATE_LIMITED, DataCategory.TRANSACTION, 4),
# Filtered should be ignored in these emails
(Outcome.FILTERED, DataCategory.TRANSACTION, 5),
]:
self.store_event_outcomes(
self.organization.id,
self.project.id,
self.two_days_ago,
num_times=num,
outcome=outcome,
category=category,
)
event1 = self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": self.three_days_ago.isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
group1 = event1.group
group1.status = GroupStatus.RESOLVED
group1.substatus = None
group1.resolved_at = self.two_days_ago
group1.save()
prepare_organization_report(
self.timestamp, ONE_DAY * 7, self.organization.id, self._dummy_batch_id
)
message_params = message_builder.call_args.kwargs
ctx = message_params["context"]
assert ctx["trends"]["legend"][0] == {
"slug": "bar",
"url": f"http://testserver/organizations/baz/issues/?referrer=weekly_report¬ification_uuid={ctx['notification_uuid']}&project={self.project.id}",
"color": "#422C6E",
"dropped_error_count": 2,
"accepted_error_count": 1,
"accepted_replay_count": 0,
"dropped_replay_count": 0,
"dropped_transaction_count": 9,
"accepted_transaction_count": 3,
}
assert ctx["trends"]["series"][-2][1][0] == {
"color": "#422C6E",
"error_count": 1,
"replay_count": 0,
"transaction_count": 3,
}
@mock.patch("sentry.tasks.summaries.weekly_reports.OrganizationReportBatch.send_email")
def test_empty_report(self, mock_send_email: mock.MagicMock) -> None:
# date is out of range
ten_days_ago = self.now - timedelta(days=10)
self.store_event(
data={
"event_id": "a" * 32,
"message": "message",
"timestamp": ten_days_ago.isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
default_event_type=EventType.DEFAULT,
)
prepare_organization_report(
self.now.timestamp(), ONE_DAY * 7, self.organization.id, self._dummy_batch_id
)
assert mock_send_email.call_count == 0
@with_feature("organizations:session-replay")
@with_feature("organizations:session-replay-weekly_report")
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_message_builder_replays(self, message_builder: mock.MagicMock) -> None:
for outcome, category, num in [
(Outcome.ACCEPTED, DataCategory.REPLAY, 6),
(Outcome.RATE_LIMITED, DataCategory.REPLAY, 7),
]:
self.store_event_outcomes(
self.organization.id,
self.project.id,
self.two_days_ago,
num_times=num,
outcome=outcome,
category=category,
)
prepare_organization_report(
self.timestamp, ONE_DAY * 7, self.organization.id, self._dummy_batch_id
)
message_params = message_builder.call_args.kwargs
ctx = message_params["context"]
assert ctx["trends"]["legend"][0] == {
"slug": "bar",
"url": f"http://testserver/organizations/baz/issues/?referrer=weekly_report¬ification_uuid={ctx['notification_uuid']}&project={self.project.id}",
"color": "#422C6E",
"dropped_error_count": 0,
"accepted_error_count": 0,
"accepted_replay_count": 6,
"dropped_replay_count": 7,
"dropped_transaction_count": 0,
"accepted_transaction_count": 0,
}
assert ctx["trends"]["series"][-2][1][0] == {
"color": "#422C6E",
"error_count": 0,
"replay_count": 6,
"transaction_count": 0,
}
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_message_builder_timezone(self, message_builder: mock.MagicMock) -> None:
# fill with data so report not skipped
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
self._set_timezone(self.user, "US/Pacific")
prepare_organization_report(
self.timestamp,
ONE_DAY * 7,
self.organization.id,
self._dummy_batch_id,
dry_run=False,
target_user=self.user.id,
)
utc_start = to_datetime(self.timestamp - ONE_DAY * 7)
utc_end = to_datetime(self.timestamp)
local_timezone = zoneinfo.ZoneInfo("US/Pacific")
local_start = date_format(utc_start.astimezone(local_timezone))
local_end = date_format(utc_end.astimezone(local_timezone))
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert context["organization"] == self.organization
assert context["user_project_count"] == 1
assert context["start"] == local_start
assert context["end"] == local_end
assert f"Weekly Report for {self.organization.name}" in message_params["subject"]
assert local_start in message_params["subject"]
def test_group_status_to_color_obj_correct_length(self) -> None:
# We want to check for the values because GroupHistoryStatus.UNRESOLVED and GroupHistoryStatus.ONGOING have the same value
enum_values = set()
for attr_name in dir(GroupHistoryStatus):
if not callable(getattr(GroupHistoryStatus, attr_name)) and not attr_name.startswith(
"__"
):
enum_value = getattr(GroupHistoryStatus, attr_name)
enum_values.add(enum_value)
unique_enum_count = len(enum_values)
assert len(group_status_to_color) == unique_enum_count
@mock.patch("sentry.analytics.record")
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_email_override_simple(
self, message_builder: mock.MagicMock, record: mock.MagicMock
) -> None:
user = self.create_user(email="itwasme@dio.xyz")
user_id = user.id
self.create_member(teams=[self.team], user=user, organization=self.organization)
extra_team = self.create_team(organization=self.organization)
# create an extra project to ensure our email only gets the user's project
self.create_project(teams=[extra_team])
# fill with data so report not skipped
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
prepare_organization_report(
self.timestamp,
ONE_DAY * 7,
self.organization.id,
self._dummy_batch_id,
dry_run=False,
target_user=user_id,
email_override="joseph@speedwagon.org",
)
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert context["organization"] == self.organization
assert context["user_project_count"] == 1
assert f"Weekly Report for {self.organization.name}" in message_params["subject"]
with pytest.raises(AssertionError):
assert_any_analytics_event(
record,
WeeklyReportSent(
user_id=user.id,
organization_id=self.organization.id,
notification_uuid="mock.ANY",
user_project_count=1,
),
)
message_builder.return_value.send.assert_called_with(to=("joseph@speedwagon.org",))
@mock.patch("sentry.analytics.record")
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_user_with_team_and_no_projects(
self, message_builder: mock.MagicMock, record: mock.MagicMock
) -> None:
organization = self.create_organization()
project = self.create_project(organization=organization)
user = self.create_user(email="itwasme@dio.xyz")
user_id = user.id
extra_team = self.create_team(organization=organization, members=[])
self.create_member(teams=[extra_team], user=user, organization=organization)
self.store_event_outcomes(organization.id, project.id, self.two_days_ago, num_times=2)
prepare_organization_report(
self.timestamp,
ONE_DAY * 7,
organization.id,
self._dummy_batch_id,
dry_run=False,
target_user=user_id,
)
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert context["organization"] == organization
assert context["user_project_count"] == 0
assert f"Weekly Report for {organization.name}" in message_params["subject"]
@mock.patch("sentry.analytics.record")
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_email_override_no_target_user(
self, message_builder: mock.MagicMock, record: mock.MagicMock
) -> None:
# create some extra projects; we expect to receive a report with all projects included
self.create_project(organization=self.organization)
self.create_project(organization=self.organization)
# fill with data so report not skipped
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
prepare_organization_report(
self.timestamp,
ONE_DAY * 7,
self.organization.id,
self._dummy_batch_id,
dry_run=False,
target_user=None,
email_override="jonathan@speedwagon.org",
)
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert context["organization"] == self.organization
assert context["user_project_count"] == 3
with pytest.raises(AssertionError):
assert_any_analytics_event(
record,
WeeklyReportSent(
user_id=None,
organization_id=self.organization.id,
notification_uuid="mock.ANY",
user_project_count=1,
),
)
message_builder.return_value.send.assert_called_with(to=("jonathan@speedwagon.org",))
@mock.patch("sentry.tasks.summaries.weekly_reports.logger")
def test_email_override_invalid_target_user(self, logger: mock.MagicMock) -> None:
org = self.create_organization()
proj = self.create_project(organization=org)
# fill with data so report not skipped
self.store_event_outcomes(org.id, proj.id, self.two_days_ago, num_times=2)
batch_id = "ef61f1d1-41a3-4530-8160-615466937076"
prepare_organization_report(
self.timestamp,
ONE_DAY * 7,
org.id,
batch_id=batch_id,
dry_run=False,
target_user="dummy",
email_override="doesntmatter@smad.com",
)
logger.error.assert_called_with(
"Target user must have an ID",
extra={
"batch_id": str(batch_id),
"organization": org.id,
"target_user": "dummy",
"email_override": "doesntmatter@smad.com",
},
)
@mock.patch("sentry.analytics.record")
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_dry_run_simple(self, message_builder: mock.MagicMock, record: mock.MagicMock) -> None:
org = self.create_organization()
proj = self.create_project(organization=org)
# fill with data so report not skipped
self.store_event_outcomes(org.id, proj.id, self.two_days_ago, num_times=2)
prepare_organization_report(
self.timestamp,
ONE_DAY * 7,
org.id,
self._dummy_batch_id,
dry_run=True,
target_user=None,
email_override="doesntmatter@smad.com",
)
with pytest.raises(AssertionError):
assert_any_analytics_event(
record,
WeeklyReportSent(
user_id=None,
organization_id=self.organization.id,
notification_uuid="mock.ANY",
user_project_count=1,
),
)
message_builder.return_value.send.assert_not_called()
@mock.patch("sentry.tasks.summaries.weekly_reports.logger")
@mock.patch("sentry.tasks.summaries.weekly_reports.prepare_template_context")
@mock.patch("sentry.tasks.summaries.weekly_reports.OrganizationReportBatch.send_email")
def test_duplicate_detection(
self,
mock_send_email: mock.MagicMock,
mock_prepare_template_context: mock.MagicMock,
mock_logger: mock.MagicMock,
) -> None:
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
ctx = OrganizationReportContext(0, 0, organization=self.organization)
ctx = OrganizationReportContext(0, 0, self.organization)
user_project_ownership(ctx)
template_context = prepare_template_context(ctx, [self.user.id])
mock_prepare_template_context.return_value = template_context
batch1_id = "abe8ba3e-90af-4a98-b925-5f30250ae6a0"
batch2_id = "abe8ba3e-90af-4a98-b925-5f30250ae6a1"
self._set_option_value("always")
# First send
OrganizationReportBatch(ctx, batch1_id).deliver_reports()
assert mock_send_email.call_count == 1
mock_logger.error.assert_not_called()
# Duplicate send
OrganizationReportBatch(ctx, batch2_id).deliver_reports()
assert mock_send_email.call_count == 1
assert mock_logger.error.call_count == 1
mock_logger.error.assert_called_once_with(
"weekly_report.delivery_record.duplicate_detected",
extra={
"batch_id": str(batch2_id),
"organization": self.organization.id,
"user_id": self.user.id,
"has_email_override": False,
"report_date": "1970-01-01",
},
)
@mock.patch("sentry.tasks.summaries.weekly_reports.prepare_organization_report")
def test_schedule_organizations_with_redis_tracking(
self, mock_prepare_organization_report: mock.MagicMock
) -> None:
"""Test that schedule_organizations uses Redis to track minimum organization ID."""
timestamp = self.timestamp
redis_cluster = redis.clusters.get("default").get_local_client_for_key(
"weekly_reports_org_id_min"
)
# Create multiple organizations
org1 = self.organization # Use existing organization
org2 = self.create_organization(name="Another Org")
org3 = self.create_organization(name="Third Org")
# Set initial Redis value to simulate a previous run that was interrupted
redis_cluster.set(f"weekly_reports_org_id_min:{timestamp}", org1.id)
# Run the task
schedule_organizations(timestamp=timestamp)
# Verify that prepare_organization_report was called for org2 and org3 but not org1
# because we started from org1.id
mock_prepare_organization_report.delay.assert_any_call(
timestamp, ONE_DAY * 7, org2.id, mock.ANY, dry_run=False
)
mock_prepare_organization_report.delay.assert_any_call(
timestamp, ONE_DAY * 7, org3.id, mock.ANY, dry_run=False
)
# Verify that Redis key was deleted after completion
assert redis_cluster.get(f"weekly_reports_org_id_min:{timestamp}") is None
# Reset call counts for the next test
mock_prepare_organization_report.reset_mock()
# Run again with no Redis value set
schedule_organizations(timestamp=timestamp)
# Verify that prepare_organization_report was called for all organizations
assert mock_prepare_organization_report.delay.call_count == 3
@mock.patch("sentry.tasks.summaries.weekly_reports.prepare_organization_report")
def test_schedule_organizations_updates_redis_during_processing(
self, mock_prepare_organization_report
):
"""Test that schedule_organizations updates Redis with the current organization ID during processing."""
timestamp = self.timestamp
# Create multiple organizations
orgs = [
self.organization,
self.create_organization(name="Org 2"),
self.create_organization(name="Org 3"),
]
# Sort organizations by ID
orgs.sort(key=lambda org: org.id)
# Use a spy to track Redis set calls
with mock.patch("redis.client.Redis.set") as mock_redis_set:
# Run the task
schedule_organizations(timestamp=timestamp)
# Verify that redis.set was called for each organization
expected_key = f"weekly_reports_org_id_min:{timestamp}"
# Check that set was called at least once for each organization except the last one
assert mock_redis_set.call_count > 0, "Redis set was not called"
# Get the keys that were set
set_keys = [args[0] for args, _ in mock_redis_set.call_args_list]
# Verify that the expected key was used
assert expected_key in set_keys, f"Expected key {expected_key} not found in {set_keys}"
@mock.patch("sentry.tasks.summaries.weekly_reports.prepare_organization_report")
def test_schedule_organizations_starts_from_beginning_when_no_redis_key(
self, mock_prepare_organization_report
):
"""Test that schedule_organizations starts from the beginning when no Redis key exists."""
timestamp = self.timestamp
redis_cluster = redis.clusters.get("default").get_local_client_for_key(
"weekly_reports_org_id_min"
)
# Ensure Redis key doesn't exist
redis_cluster.delete(f"weekly_reports_org_id_min:{timestamp}")
# Create multiple organizations
orgs = [
self.organization,
self.create_organization(name="Org 2"),
self.create_organization(name="Org 3"),
]
# Sort organizations by ID
orgs.sort(key=lambda org: org.id)
# Run the task
schedule_organizations(timestamp=timestamp)
# Verify that prepare_organization_report was called for all organizations
assert mock_prepare_organization_report.delay.call_count == len(orgs)
# Verify that each organization was processed
for org in orgs:
mock_prepare_organization_report.delay.assert_any_call(
timestamp, ONE_DAY * 7, org.id, mock.ANY, dry_run=False
)
@mock.patch("sentry.tasks.summaries.weekly_reports.MessageBuilder")
def test_user_does_not_see_deleted_team_data(self, message_builder: mock.MagicMock) -> None:
user = self.create_user(email="test@example.com")
self.create_member(teams=[self.team], user=user, organization=self.organization)
self.team.status = TeamStatus.PENDING_DELETION
self.team.save()
self.store_event_outcomes(
self.organization.id, self.project.id, self.two_days_ago, num_times=2
)
prepare_organization_report(
self.timestamp,
ONE_DAY * 7,
self.organization.id,
self._dummy_batch_id,
dry_run=False,
target_user=user.id,
)
# Verify the report is empty as the user's team is pending deletion
for call_args in message_builder.call_args_list:
message_params = call_args.kwargs
context = message_params["context"]
assert len(context["trends"]["legend"]) == 0
| WeeklyReportsTest |
python | django__django | tests/migrations/migrations_test_apps/alter_fk/book_app/migrations/0001_initial.py | {
"start": 43,
"end": 624
} | class ____(migrations.Migration):
dependencies = [
("author_app", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Book",
fields=[
(
"id",
models.AutoField(
serialize=False, auto_created=True, primary_key=True
),
),
("title", models.CharField(max_length=50)),
("author", models.ForeignKey("author_app.Author", models.CASCADE)),
],
),
]
| Migration |
python | allegroai__clearml | clearml/binding/frameworks/tensorflow_bind.py | {
"start": 2220,
"end": 10606
} | class ____(object):
def __init__(
self,
logger: Any,
report_freq: int = 100,
histogram_update_freq_multiplier: int = 10,
histogram_granularity: int = 50,
) -> None:
self.logger = logger
self.report_freq = report_freq
self._histogram_granularity = histogram_granularity
self._histogram_update_freq_multiplier = histogram_update_freq_multiplier
self._histogram_update_call_counter = 0
self._hist_report_cache = {}
self._hist_x_granularity = 50
@staticmethod
def _sample_histograms(_hist_iters: numpy.ndarray, _histogram_granularity: int) -> numpy.ndarray:
# re-sample history based on distribution of samples across time (steps)
ratio = (
(
(_hist_iters[-1] - _hist_iters[_histogram_granularity])
/ (_hist_iters[_histogram_granularity - 1] - _hist_iters[0])
)
if _hist_iters.size > _histogram_granularity
else 0.0
)
cur_idx_below = np.arange(0, min(_hist_iters.size, _histogram_granularity - 1))
np.random.shuffle(cur_idx_below)
cur_idx_below = cur_idx_below[: int(_histogram_granularity * (1.0 - ratio / (1 + ratio)) + 0.5)]
if ratio > 0.0:
cur_idx_above = np.arange(_histogram_granularity - 1, _hist_iters.size)
np.random.shuffle(cur_idx_above)
cur_idx_above = cur_idx_above[: int(_histogram_granularity * ratio / (1 + ratio))]
else:
cur_idx_above = np.array([])
_cur_idx = np.unique(np.sort(np.concatenate((cur_idx_below, cur_idx_above)).astype(np.int64)))
return _cur_idx
def add_histograms(self, histograms: List[Dict[str, Any]]) -> None:
for index, histogram in enumerate(histograms):
self.add_histogram(
histogram.get("title"),
histogram.get("series"),
histogram.get("step"),
histogram.get("hist_data"),
increase_histogram_update_call_counter=(index == len(histograms) - 1),
)
def add_histogram(
self,
title: str,
series: str,
step: int,
hist_data: Union[dict, np.ndarray],
increase_histogram_update_call_counter: bool = True,
) -> None:
# only collect histogram every specific interval
offset = 1 if increase_histogram_update_call_counter else 0
if (self._histogram_update_call_counter + offset) % self.report_freq != 0 or (
self._histogram_update_call_counter + offset
) < self.report_freq - 1:
self._histogram_update_call_counter += offset
return
if isinstance(hist_data, dict):
pass
elif isinstance(hist_data, np.ndarray) and len(hist_data.shape) == 2 and np.atleast_2d(hist_data).shape[1] == 3:
# prepare the dictionary, assume numpy
# hist_data['bucketLimit'] is the histogram bucket right side limit, meaning X axis
# hist_data['bucket'] is the histogram height, meaning the Y axis
# notice hist_data[:, 1] is the right side limit, for backwards compatibility we take the left side
hist_data = {
"bucketLimit": hist_data[:, 0].tolist(),
"bucket": hist_data[:, 2].tolist(),
}
else:
# assume we have to do the histogram on the data
hist_data = np.histogram(hist_data, bins=32)
hist_data = {
"bucketLimit": hist_data[1].tolist(),
"bucket": hist_data[0].tolist(),
}
self._add_histogram(
title=title,
series=series,
step=step,
hist_data=hist_data,
increase_histogram_update_call_counter=increase_histogram_update_call_counter,
)
def _add_histogram(
self,
title: str,
series: str,
step: int,
hist_data: dict,
increase_histogram_update_call_counter: bool = True,
) -> None:
# only collect histogram every specific interval
if increase_histogram_update_call_counter:
self._histogram_update_call_counter += 1
if (
self._histogram_update_call_counter % self.report_freq != 0
or self._histogram_update_call_counter < self.report_freq - 1
):
return
# generate forward matrix of the histograms
# Y-axis (rows) is iteration (from 0 to current Step)
# X-axis averaged bins (conformed sample 'bucketLimit')
# Z-axis actual value (interpolated 'bucket')
step = EventTrainsWriter._fix_step_counter(title, series, step)
# get histograms from cache
hist_list, hist_iters, minmax = self._hist_report_cache.get((title, series), ([], np.array([]), None))
# resample data so we are always constrained in number of histogram we keep
if hist_iters.size >= self._histogram_granularity**2:
idx = self._sample_histograms(hist_iters, self._histogram_granularity)
hist_iters = hist_iters[idx]
hist_list = [hist_list[i] for i in idx]
# check if current sample is not already here (actually happens some times)
if step in hist_iters:
return None
# add current sample, if not already here
hist_iters = np.append(hist_iters, step)
# hist_data['bucketLimit'] is the histogram bucket right side limit, meaning X axis
# hist_data['bucket'] is the histogram height, meaning the Y axis
hist = np.array(list(zip(hist_data["bucketLimit"], hist_data["bucket"])), dtype=np.float32)
hist = hist[~np.isinf(hist[:, 0]), :]
hist_list.append(hist)
# keep track of min/max values of histograms (for later re-binning)
if minmax is None:
minmax = hist[:, 0].min(), hist[:, 0].max()
else:
# noinspection PyUnresolvedReferences
minmax = min(minmax[0], hist[:, 0].min()), max(minmax[1], hist[:, 0].max())
# update the cache
self._hist_report_cache[(title, series)] = hist_list, hist_iters, minmax
# only report histogram every specific interval, but do report the first few, so you know there are histograms
if hist_iters.size < 1 or (
hist_iters.size >= self._histogram_update_freq_multiplier
and hist_iters.size % self._histogram_update_freq_multiplier != 0
):
return
# resample histograms on a unified bin axis +- epsilon
_epsilon = abs((minmax[1] - minmax[0]) / float(self._hist_x_granularity))
if _epsilon == 0:
_epsilon = 0.01
_minmax = minmax[0] - _epsilon, minmax[1] + _epsilon
prev_xedge = np.arange(
start=_minmax[0],
step=(_minmax[1] - _minmax[0]) / float(self._hist_x_granularity - 2),
stop=_minmax[1],
)
# uniformly select histograms and the last one
cur_idx = self._sample_histograms(hist_iters, self._histogram_granularity)
report_hist = np.zeros(shape=(len(cur_idx), prev_xedge.size), dtype=np.float32)
for i, n in enumerate(cur_idx):
h = hist_list[n]
report_hist[i, :] = np.interp(prev_xedge, h[:, 0], h[:, 1], right=0, left=0)
yedges = hist_iters[cur_idx]
xedges = prev_xedge
# if only a single line make, add another zero line, for the scatter plot to draw
if report_hist.shape[0] < 2:
report_hist = np.vstack((np.zeros_like(report_hist), report_hist))
# create 3d line (scatter) of histograms
skipx = max(1, int(xedges.size / 10))
skipy = max(1, int(yedges.size / 10))
xlabels = ["%.2f" % v if i % skipx == 0 else "" for i, v in enumerate(xedges[:-1])]
ylabels = [str(int(v)) if i % skipy == 0 else "" for i, v in enumerate(yedges)]
self.logger.report_surface(
title=title,
series=series,
iteration=0,
xaxis=" ",
yaxis="iteration",
xlabels=xlabels,
ylabels=ylabels,
matrix=report_hist,
camera=(-0.1, +1.3, 1.4),
)
# noinspection PyMethodMayBeStatic,PyProtectedMember,SpellCheckingInspection
| WeightsGradientHistHelper |
python | eventlet__eventlet | tests/wsgi_test.py | {
"start": 6572,
"end": 8026
} | class ____(tests.LimitedTestCase):
def setUp(self):
super().setUp()
self.site = Site()
self.killer = None
self.set_site()
self.spawn_server()
def tearDown(self):
greenthread.kill(self.killer)
eventlet.sleep(0)
super().tearDown()
def spawn_server(self, **kwargs):
"""Spawns a new wsgi server with the given arguments using
:meth:`spawn_thread`.
Sets `self.server_addr` to (host, port) tuple suitable for `socket.connect`.
"""
self.logfile = io.StringIO()
new_kwargs = dict(max_size=128,
log=self.logfile,
site=self.site)
new_kwargs.update(kwargs)
if 'sock' not in new_kwargs:
new_kwargs['sock'] = eventlet.listen(('localhost', 0))
self.server_addr = new_kwargs['sock'].getsockname()
self.spawn_thread(wsgi.server, **new_kwargs)
def spawn_thread(self, target, **kwargs):
"""Spawns a new greenthread using specified target and arguments.
Kills any previously-running server and sets self.killer to the
greenthread running the target.
"""
eventlet.sleep(0) # give previous server a chance to start
if self.killer:
greenthread.kill(self.killer)
self.killer = eventlet.spawn(target, **kwargs)
def set_site(self):
raise NotImplementedError
| _TestBase |
python | bottlepy__bottle | test/test_wsgi.py | {
"start": 17544,
"end": 18455
} | class ____(ServerTestBase):
def setUp(self):
ServerTestBase.setUp(self)
def testWithStatement(self):
default = bottle.default_app()
inner_app = bottle.Bottle()
self.assertEqual(default, bottle.default_app())
with inner_app:
self.assertEqual(inner_app, bottle.default_app())
self.assertEqual(default, bottle.default_app())
def assertWraps(self, test, other):
self.assertEqual(test.__doc__, other.__doc__)
def test_module_shortcuts(self):
for name in '''route get post put delete error mount
hook install uninstall'''.split():
short = getattr(bottle, name)
original = getattr(bottle.app(), name)
self.assertWraps(short, original)
def test_module_shortcuts_with_different_name(self):
self.assertWraps(bottle.url, bottle.app().get_url)
| TestAppShortcuts |
python | scipy__scipy | scipy/special/tests/test_exponential_integrals.py | {
"start": 3745,
"end": 3863
} | class ____:
def test_out_of_domain(self):
assert all(np.isnan([sc.expn(-1, 1.0), sc.expn(1, -1.0)]))
| TestExpn |
python | ray-project__ray | python/ray/llm/_internal/common/callbacks/cloud_downloader.py | {
"start": 184,
"end": 893
} | class ____(BaseModel):
"""Model for validating CloudDownloader configuration."""
paths: List[Tuple[str, str]]
@field_validator("paths")
@classmethod
def validate_paths(cls, v: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
# Supported cloud storage URI schemes
valid_schemes = ("s3://", "gs://", "abfss://", "azure://")
for i, (cloud_uri, _) in enumerate(v):
if not any(cloud_uri.startswith(scheme) for scheme in valid_schemes):
raise ValueError(
f"paths[{i}][0] (cloud_uri) must start with one of {valid_schemes}, "
f"got '{cloud_uri}'"
)
return v
| CloudDownloaderConfig |
python | ansible__ansible | test/lib/ansible_test/_internal/config.py | {
"start": 9302,
"end": 11180
} | class ____(TestConfig):
"""Configuration for the integration command."""
def __init__(self, args: t.Any, command: str) -> None:
super().__init__(args, command)
self.start_at: str = args.start_at
self.start_at_task: str = args.start_at_task
self.allow_destructive: bool = args.allow_destructive
self.allow_root: bool = args.allow_root
self.allow_disabled: bool = args.allow_disabled
self.allow_unstable: bool = args.allow_unstable
self.allow_unstable_changed: bool = args.allow_unstable_changed
self.allow_unsupported: bool = args.allow_unsupported
self.retry_on_error: bool = args.retry_on_error
self.continue_on_error: bool = args.continue_on_error
self.debug_strategy: bool = args.debug_strategy
self.changed_all_target: str = args.changed_all_target
self.changed_all_mode: str = args.changed_all_mode
self.list_targets: bool = args.list_targets
self.tags = args.tags
self.skip_tags = args.skip_tags
self.diff = args.diff
self.no_temp_workdir: bool = args.no_temp_workdir
self.no_temp_unicode: bool = args.no_temp_unicode
if self.list_targets:
self.explain = True
self.display_stderr = True
def get_ansible_config(self) -> str:
"""Return the path to the Ansible config for the given config."""
ansible_config_relative_path = os.path.join(data_context().content.integration_path, '%s.cfg' % self.command)
ansible_config_path = os.path.join(data_context().content.root, ansible_config_relative_path)
if not os.path.exists(ansible_config_path):
# use the default empty configuration unless one has been provided
ansible_config_path = super().get_ansible_config()
return ansible_config_path
| IntegrationConfig |
python | scipy__scipy | benchmarks/benchmarks/interpolate.py | {
"start": 4276,
"end": 4726
} | class ____(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'cubic', 'quintic'],
]
def setup(self, n_samples, method):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.arange(-r_samples, r_samples, 0.25)
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.z = np.sin(self.xx**2+self.yy**2)
| Interpolate2d |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 187737,
"end": 187838
} | class ____(
_DateTimeTZRangeTests, _RangeTypeCompilation
):
pass
| DateTimeTZRangeCompilationTest |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 16885,
"end": 17429
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
# The minimum can only be zero if there are no nominal features,
# otherwise it is at least one
# TODO: shouldn't this rather be two?
minimum = None
for unique in helper_functions.get_value("NumSymbols"):
if unique > 0 and (minimum is None or unique < minimum):
minimum = unique
return minimum if minimum is not None else 0
@metafeatures.define("SymbolsMax", dependency="NumSymbols")
| SymbolsMin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.